1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #if defined(__DragonFly__) 34 #define CTLFLAG_RWTUN CTLFLAG_RW 35 #endif 36 37 /* 38 * Driver for the Atheros Wireless LAN controller. 39 * 40 * This software is derived from work of Atsushi Onoe; his contribution 41 * is greatly appreciated. 42 */ 43 44 #include "opt_inet.h" 45 #include "opt_ath.h" 46 /* 47 * This is needed for register operations which are performed 48 * by the driver - eg, calls to ath_hal_gettsf32(). 49 * 50 * It's also required for any AH_DEBUG checks in here, eg the 51 * module dependencies. 52 */ 53 #include "opt_ah.h" 54 #include "opt_wlan.h" 55 56 #include <sys/param.h> 57 #include <sys/systm.h> 58 #include <sys/sysctl.h> 59 #include <sys/mbuf.h> 60 #include <sys/malloc.h> 61 #include <sys/lock.h> 62 #include <sys/mutex.h> 63 #include <sys/kernel.h> 64 #include <sys/socket.h> 65 #include <sys/sockio.h> 66 #include <sys/errno.h> 67 #include <sys/callout.h> 68 #include <sys/bus.h> 69 #include <sys/endian.h> 70 #include <sys/kthread.h> 71 #include <sys/taskqueue.h> 72 #include <sys/priv.h> 73 #include <sys/module.h> 74 #include <sys/ktr.h> 75 76 #if defined(__DragonFly__) 77 /* empty */ 78 #else 79 #include <sys/smp.h> /* for mp_ncpus */ 80 #include <machine/bus.h> 81 #endif 82 83 #include <net/if.h> 84 #include <net/if_var.h> 85 #include <net/if_dl.h> 86 #include <net/if_media.h> 87 #include <net/if_types.h> 88 #include <net/if_arp.h> 89 #include <net/ethernet.h> 90 #include <net/if_llc.h> 91 #if defined(__DragonFly__) 92 #include <net/ifq_var.h> 93 #endif 94 95 #include <netproto/802_11/ieee80211_var.h> 96 #include <netproto/802_11/ieee80211_regdomain.h> 97 #ifdef IEEE80211_SUPPORT_SUPERG 98 #include <netproto/802_11/ieee80211_superg.h> 99 #endif 100 #ifdef IEEE80211_SUPPORT_TDMA 101 #include <netproto/802_11/ieee80211_tdma.h> 102 #endif 103 104 #include <net/bpf.h> 105 106 #ifdef INET 107 #include <netinet/in.h> 108 #include <netinet/if_ether.h> 109 #endif 110 111 #include <dev/netif/ath/ath/if_athvar.h> 112 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 113 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 114 115 #include <dev/netif/ath/ath/if_ath_debug.h> 116 #include <dev/netif/ath/ath/if_ath_misc.h> 117 #include <dev/netif/ath/ath/if_ath_tsf.h> 118 #include <dev/netif/ath/ath/if_ath_tx.h> 119 #include <dev/netif/ath/ath/if_ath_sysctl.h> 120 #include <dev/netif/ath/ath/if_ath_led.h> 121 #include <dev/netif/ath/ath/if_ath_keycache.h> 122 #include <dev/netif/ath/ath/if_ath_rx.h> 123 #include <dev/netif/ath/ath/if_ath_rx_edma.h> 124 #include <dev/netif/ath/ath/if_ath_tx_edma.h> 125 #include <dev/netif/ath/ath/if_ath_beacon.h> 126 #include <dev/netif/ath/ath/if_ath_btcoex.h> 127 #include <dev/netif/ath/ath/if_ath_spectral.h> 128 #include <dev/netif/ath/ath/if_ath_lna_div.h> 129 #include <dev/netif/ath/ath/if_athdfs.h> 130 #include <dev/netif/ath/ath/if_ath_ioctl.h> 131 #include <dev/netif/ath/ath/if_ath_descdma.h> 132 133 #ifdef ATH_TX99_DIAG 134 #include <dev/netif/ath/ath/ath_tx99/ath_tx99.h> 135 #endif 136 137 #ifdef ATH_DEBUG_ALQ 138 #include <dev/netif/ath/ath/if_ath_alq.h> 139 #endif 140 141 /* 142 * Only enable this if you're working on PS-POLL support. 143 */ 144 #define ATH_SW_PSQ 145 146 /* 147 * ATH_BCBUF determines the number of vap's that can transmit 148 * beacons and also (currently) the number of vap's that can 149 * have unique mac addresses/bssid. When staggering beacons 150 * 4 is probably a good max as otherwise the beacons become 151 * very closely spaced and there is limited time for cab q traffic 152 * to go out. You can burst beacons instead but that is not good 153 * for stations in power save and at some point you really want 154 * another radio (and channel). 155 * 156 * The limit on the number of mac addresses is tied to our use of 157 * the U/L bit and tracking addresses in a byte; it would be 158 * worthwhile to allow more for applications like proxy sta. 159 */ 160 CTASSERT(ATH_BCBUF <= 8); 161 162 static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 163 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 164 const uint8_t [IEEE80211_ADDR_LEN], 165 const uint8_t [IEEE80211_ADDR_LEN]); 166 static void ath_vap_delete(struct ieee80211vap *); 167 static int ath_init(struct ath_softc *); 168 static void ath_stop(struct ath_softc *); 169 static int ath_reset_vap(struct ieee80211vap *, u_long); 170 static int ath_transmit(struct ieee80211com *, struct mbuf *); 171 static int ath_media_change(struct ifnet *); 172 static void ath_watchdog(void *); 173 static void ath_parent(struct ieee80211com *); 174 static void ath_fatal_proc(void *, int); 175 static void ath_bmiss_vap(struct ieee80211vap *); 176 static void ath_bmiss_proc(void *, int); 177 static void ath_key_update_begin(struct ieee80211vap *); 178 static void ath_key_update_end(struct ieee80211vap *); 179 static void ath_update_mcast_hw(struct ath_softc *); 180 static void ath_update_mcast(struct ieee80211com *); 181 static void ath_update_promisc(struct ieee80211com *); 182 static void ath_updateslot(struct ieee80211com *); 183 static void ath_bstuck_proc(void *, int); 184 static void ath_reset_proc(void *, int); 185 static int ath_desc_alloc(struct ath_softc *); 186 static void ath_desc_free(struct ath_softc *); 187 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 188 const uint8_t [IEEE80211_ADDR_LEN]); 189 static void ath_node_cleanup(struct ieee80211_node *); 190 static void ath_node_free(struct ieee80211_node *); 191 static void ath_node_getsignal(const struct ieee80211_node *, 192 int8_t *, int8_t *); 193 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 194 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 195 static int ath_tx_setup(struct ath_softc *, int, int); 196 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 197 static void ath_tx_cleanup(struct ath_softc *); 198 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, 199 int dosched); 200 static void ath_tx_proc_q0(void *, int); 201 static void ath_tx_proc_q0123(void *, int); 202 static void ath_tx_proc(void *, int); 203 static void ath_txq_sched_tasklet(void *, int); 204 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 205 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 206 static void ath_scan_start(struct ieee80211com *); 207 static void ath_scan_end(struct ieee80211com *); 208 static void ath_set_channel(struct ieee80211com *); 209 #ifdef ATH_ENABLE_11N 210 static void ath_update_chw(struct ieee80211com *); 211 #endif /* ATH_ENABLE_11N */ 212 static void ath_calibrate(void *); 213 static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 214 static void ath_setup_stationkey(struct ieee80211_node *); 215 static void ath_newassoc(struct ieee80211_node *, int); 216 static int ath_setregdomain(struct ieee80211com *, 217 struct ieee80211_regdomain *, int, 218 struct ieee80211_channel []); 219 static void ath_getradiocaps(struct ieee80211com *, int, int *, 220 struct ieee80211_channel []); 221 static int ath_getchannels(struct ath_softc *); 222 223 static int ath_rate_setup(struct ath_softc *, u_int mode); 224 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 225 226 static void ath_announce(struct ath_softc *); 227 228 static void ath_dfs_tasklet(void *, int); 229 static void ath_node_powersave(struct ieee80211_node *, int); 230 static int ath_node_set_tim(struct ieee80211_node *, int); 231 static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *); 232 233 #ifdef IEEE80211_SUPPORT_TDMA 234 #include <dev/netif/ath/ath/if_ath_tdma.h> 235 #endif 236 237 #if defined(__DragonFly__) 238 extern const char* ath_hal_ether_sprintf(const u_int8_t *mac); 239 #endif 240 241 SYSCTL_DECL(_hw_ath); 242 243 /* XXX validate sysctl values */ 244 static int ath_longcalinterval = 30; /* long cals every 30 secs */ 245 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 246 0, "long chip calibration interval (secs)"); 247 static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 248 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 249 0, "short chip calibration interval (msecs)"); 250 static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 251 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 252 0, "reset chip calibration results (secs)"); 253 static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 254 SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 255 0, "ANI calibration (msecs)"); 256 257 int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 258 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf, 259 0, "rx buffers allocated"); 260 #if defined(__DragonFly__) 261 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 262 #endif 263 264 int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 265 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf, 266 0, "tx buffers allocated"); 267 #if defined(__DragonFly__) 268 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 269 #endif 270 271 int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 272 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt, 273 0, "tx (mgmt) buffers allocated"); 274 #if defined(__DragonFly__) 275 TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); 276 #endif 277 278 int ath_bstuck_threshold = 4; /* max missed beacons */ 279 SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 280 0, "max missed beacon xmits before chip reset"); 281 282 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 283 284 void 285 ath_legacy_attach_comp_func(struct ath_softc *sc) 286 { 287 288 /* 289 * Special case certain configurations. Note the 290 * CAB queue is handled by these specially so don't 291 * include them when checking the txq setup mask. 292 */ 293 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 294 case 0x01: 295 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 296 break; 297 case 0x0f: 298 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 299 break; 300 default: 301 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 302 break; 303 } 304 } 305 306 /* 307 * Set the target power mode. 308 * 309 * If this is called during a point in time where 310 * the hardware is being programmed elsewhere, it will 311 * simply store it away and update it when all current 312 * uses of the hardware are completed. 313 */ 314 void 315 _ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line) 316 { 317 ATH_LOCK_ASSERT(sc); 318 319 sc->sc_target_powerstate = power_state; 320 321 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 322 __func__, 323 file, 324 line, 325 power_state, 326 sc->sc_powersave_refcnt); 327 328 if (sc->sc_powersave_refcnt == 0 && 329 power_state != sc->sc_cur_powerstate) { 330 sc->sc_cur_powerstate = power_state; 331 ath_hal_setpower(sc->sc_ah, power_state); 332 333 /* 334 * If the NIC is force-awake, then set the 335 * self-gen frame state appropriately. 336 * 337 * If the nic is in network sleep or full-sleep, 338 * we let the above call leave the self-gen 339 * state as "sleep". 340 */ 341 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 342 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 343 ath_hal_setselfgenpower(sc->sc_ah, 344 sc->sc_target_selfgen_state); 345 } 346 } 347 } 348 349 /* 350 * Set the current self-generated frames state. 351 * 352 * This is separate from the target power mode. The chip may be 353 * awake but the desired state is "sleep", so frames sent to the 354 * destination has PWRMGT=1 in the 802.11 header. The NIC also 355 * needs to know to set PWRMGT=1 in self-generated frames. 356 */ 357 void 358 _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line) 359 { 360 361 ATH_LOCK_ASSERT(sc); 362 363 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 364 __func__, 365 file, 366 line, 367 power_state, 368 sc->sc_target_selfgen_state); 369 370 sc->sc_target_selfgen_state = power_state; 371 372 /* 373 * If the NIC is force-awake, then set the power state. 374 * Network-state and full-sleep will already transition it to 375 * mark self-gen frames as sleeping - and we can't 376 * guarantee the NIC is awake to program the self-gen frame 377 * setting anyway. 378 */ 379 if (sc->sc_cur_powerstate == HAL_PM_AWAKE) { 380 ath_hal_setselfgenpower(sc->sc_ah, power_state); 381 } 382 } 383 384 /* 385 * Set the hardware power mode and take a reference. 386 * 387 * This doesn't update the target power mode in the driver; 388 * it just updates the hardware power state. 389 * 390 * XXX it should only ever force the hardware awake; it should 391 * never be called to set it asleep. 392 */ 393 void 394 _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line) 395 { 396 ATH_LOCK_ASSERT(sc); 397 398 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 399 __func__, 400 file, 401 line, 402 power_state, 403 sc->sc_powersave_refcnt); 404 405 sc->sc_powersave_refcnt++; 406 407 if (power_state != sc->sc_cur_powerstate) { 408 ath_hal_setpower(sc->sc_ah, power_state); 409 sc->sc_cur_powerstate = power_state; 410 411 /* 412 * Adjust the self-gen powerstate if appropriate. 413 */ 414 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 415 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 416 ath_hal_setselfgenpower(sc->sc_ah, 417 sc->sc_target_selfgen_state); 418 } 419 420 } 421 } 422 423 /* 424 * Restore the power save mode to what it once was. 425 * 426 * This will decrement the reference counter and once it hits 427 * zero, it'll restore the powersave state. 428 */ 429 void 430 _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line) 431 { 432 433 ATH_LOCK_ASSERT(sc); 434 435 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n", 436 __func__, 437 file, 438 line, 439 sc->sc_powersave_refcnt, 440 sc->sc_target_powerstate); 441 442 if (sc->sc_powersave_refcnt == 0) 443 device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__); 444 else 445 sc->sc_powersave_refcnt--; 446 447 if (sc->sc_powersave_refcnt == 0 && 448 sc->sc_target_powerstate != sc->sc_cur_powerstate) { 449 sc->sc_cur_powerstate = sc->sc_target_powerstate; 450 ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate); 451 } 452 453 /* 454 * Adjust the self-gen powerstate if appropriate. 455 */ 456 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 457 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 458 ath_hal_setselfgenpower(sc->sc_ah, 459 sc->sc_target_selfgen_state); 460 } 461 462 } 463 464 /* 465 * Configure the initial HAL configuration values based on bus 466 * specific parameters. 467 * 468 * Some PCI IDs and other information may need tweaking. 469 * 470 * XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable 471 * if BT antenna diversity isn't enabled. 472 * 473 * So, let's also figure out how to enable BT diversity for AR9485. 474 */ 475 static void 476 ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config) 477 { 478 /* XXX TODO: only for PCI devices? */ 479 480 if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) { 481 ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */ 482 ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE; 483 ah_config->ath_hal_min_gainidx = AH_TRUE; 484 ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88; 485 /* XXX low_rssi_thresh */ 486 /* XXX fast_div_bias */ 487 device_printf(sc->sc_dev, "configuring for %s\n", 488 (sc->sc_pci_devinfo & ATH_PCI_CUS198) ? 489 "CUS198" : "CUS230"); 490 } 491 492 if (sc->sc_pci_devinfo & ATH_PCI_CUS217) 493 device_printf(sc->sc_dev, "CUS217 card detected\n"); 494 495 if (sc->sc_pci_devinfo & ATH_PCI_CUS252) 496 device_printf(sc->sc_dev, "CUS252 card detected\n"); 497 498 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT) 499 device_printf(sc->sc_dev, "WB335 1-ANT card detected\n"); 500 501 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT) 502 device_printf(sc->sc_dev, "WB335 2-ANT card detected\n"); 503 504 if (sc->sc_pci_devinfo & ATH_PCI_KILLER) 505 device_printf(sc->sc_dev, "Killer Wireless card detected\n"); 506 507 #if 0 508 /* 509 * Some WB335 cards do not support antenna diversity. Since 510 * we use a hardcoded value for AR9565 instead of using the 511 * EEPROM/OTP data, remove the combining feature from 512 * the HW capabilities bitmap. 513 */ 514 if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) { 515 if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV)) 516 pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB; 517 } 518 519 if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) { 520 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; 521 device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n"); 522 } 523 #endif 524 525 if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) { 526 ah_config->ath_hal_pcie_waen = 0x0040473b; 527 device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n"); 528 } 529 530 #if 0 531 if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) { 532 ah->config.no_pll_pwrsave = true; 533 device_printf(sc->sc_dev, "Disable PLL PowerSave\n"); 534 } 535 #endif 536 537 } 538 539 /* 540 * Attempt to fetch the MAC address from the kernel environment. 541 * 542 * Returns 0, macaddr in macaddr if successful; -1 otherwise. 543 */ 544 static int 545 ath_fetch_mac_kenv(struct ath_softc *sc, uint8_t *macaddr) 546 { 547 char devid_str[32]; 548 int local_mac = 0; 549 char *local_macstr; 550 551 /* 552 * Fetch from the kenv rather than using hints. 553 * 554 * Hints would be nice but the transition to dynamic 555 * hints/kenv doesn't happen early enough for this 556 * to work reliably (eg on anything embedded.) 557 */ 558 ksnprintf(devid_str, 32, "hint.%s.%d.macaddr", 559 device_get_name(sc->sc_dev), 560 device_get_unit(sc->sc_dev)); 561 562 #if defined(__DragonFly__) 563 if ((local_macstr = kgetenv(devid_str)) != NULL) { 564 #else 565 if ((local_macstr = kern_getenv(devid_str)) != NULL) { 566 #endif 567 uint32_t tmpmac[ETHER_ADDR_LEN]; 568 int count; 569 int i; 570 571 /* Have a MAC address; should use it */ 572 device_printf(sc->sc_dev, 573 "Overriding MAC address from environment: '%s'\n", 574 local_macstr); 575 576 /* Extract out the MAC address */ 577 count = ksscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", 578 &tmpmac[0], &tmpmac[1], 579 &tmpmac[2], &tmpmac[3], 580 &tmpmac[4], &tmpmac[5]); 581 if (count == 6) { 582 /* Valid! */ 583 local_mac = 1; 584 for (i = 0; i < ETHER_ADDR_LEN; i++) 585 macaddr[i] = tmpmac[i]; 586 } 587 /* Done! */ 588 kfreeenv(local_macstr); 589 local_macstr = NULL; 590 } 591 592 if (local_mac) 593 return (0); 594 return (-1); 595 } 596 597 #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 598 #define HAL_MODE_HT40 \ 599 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 600 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 601 int 602 ath_attach(u_int16_t devid, struct ath_softc *sc) 603 { 604 struct ieee80211com *ic = &sc->sc_ic; 605 struct ath_hal *ah = NULL; 606 HAL_STATUS status; 607 int error = 0, i; 608 u_int wmodes; 609 int rx_chainmask, tx_chainmask; 610 HAL_OPS_CONFIG ah_config; 611 612 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 613 614 #if defined(__DragonFly__) 615 wlan_serialize_enter(); 616 #endif 617 ic->ic_softc = sc; 618 ic->ic_name = device_get_nameunit(sc->sc_dev); 619 620 /* 621 * Configure the initial configuration data. 622 * 623 * This is stuff that may be needed early during attach 624 * rather than done via configuration calls later. 625 */ 626 bzero(&ah_config, sizeof(ah_config)); 627 ath_setup_hal_config(sc, &ah_config); 628 629 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 630 sc->sc_eepromdata, &ah_config, &status); 631 if (ah == NULL) { 632 device_printf(sc->sc_dev, 633 "unable to attach hardware; HAL status %u\n", status); 634 error = ENXIO; 635 goto bad; 636 } 637 sc->sc_ah = ah; 638 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 639 #ifdef ATH_DEBUG 640 sc->sc_debug = ath_debug; 641 #endif 642 643 /* 644 * Setup the DMA/EDMA functions based on the current 645 * hardware support. 646 * 647 * This is required before the descriptors are allocated. 648 */ 649 if (ath_hal_hasedma(sc->sc_ah)) { 650 sc->sc_isedma = 1; 651 ath_recv_setup_edma(sc); 652 ath_xmit_setup_edma(sc); 653 } else { 654 ath_recv_setup_legacy(sc); 655 ath_xmit_setup_legacy(sc); 656 } 657 658 if (ath_hal_hasmybeacon(sc->sc_ah)) { 659 sc->sc_do_mybeacon = 1; 660 } 661 662 /* 663 * Check if the MAC has multi-rate retry support. 664 * We do this by trying to setup a fake extended 665 * descriptor. MAC's that don't have support will 666 * return false w/o doing anything. MAC's that do 667 * support it will return true w/o doing anything. 668 */ 669 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 670 671 /* 672 * Check if the device has hardware counters for PHY 673 * errors. If so we need to enable the MIB interrupt 674 * so we can act on stat triggers. 675 */ 676 if (ath_hal_hwphycounters(ah)) 677 sc->sc_needmib = 1; 678 679 /* 680 * Get the hardware key cache size. 681 */ 682 sc->sc_keymax = ath_hal_keycachesize(ah); 683 if (sc->sc_keymax > ATH_KEYMAX) { 684 device_printf(sc->sc_dev, 685 "Warning, using only %u of %u key cache slots\n", 686 ATH_KEYMAX, sc->sc_keymax); 687 sc->sc_keymax = ATH_KEYMAX; 688 } 689 /* 690 * Reset the key cache since some parts do not 691 * reset the contents on initial power up. 692 */ 693 for (i = 0; i < sc->sc_keymax; i++) 694 ath_hal_keyreset(ah, i); 695 696 /* 697 * Collect the default channel list. 698 */ 699 error = ath_getchannels(sc); 700 if (error != 0) 701 goto bad; 702 703 /* 704 * Setup rate tables for all potential media types. 705 */ 706 ath_rate_setup(sc, IEEE80211_MODE_11A); 707 ath_rate_setup(sc, IEEE80211_MODE_11B); 708 ath_rate_setup(sc, IEEE80211_MODE_11G); 709 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 710 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 711 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 712 ath_rate_setup(sc, IEEE80211_MODE_11NA); 713 ath_rate_setup(sc, IEEE80211_MODE_11NG); 714 ath_rate_setup(sc, IEEE80211_MODE_HALF); 715 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 716 717 /* NB: setup here so ath_rate_update is happy */ 718 ath_setcurmode(sc, IEEE80211_MODE_11A); 719 720 /* 721 * Allocate TX descriptors and populate the lists. 722 */ 723 error = ath_desc_alloc(sc); 724 if (error != 0) { 725 device_printf(sc->sc_dev, 726 "failed to allocate TX descriptors: %d\n", error); 727 goto bad; 728 } 729 error = ath_txdma_setup(sc); 730 if (error != 0) { 731 device_printf(sc->sc_dev, 732 "failed to allocate TX descriptors: %d\n", error); 733 goto bad; 734 } 735 736 /* 737 * Allocate RX descriptors and populate the lists. 738 */ 739 error = ath_rxdma_setup(sc); 740 if (error != 0) { 741 device_printf(sc->sc_dev, 742 "failed to allocate RX descriptors: %d\n", error); 743 goto bad; 744 } 745 746 #if defined(__DragonFly__) 747 callout_init_lk(&sc->sc_cal_ch, &sc->sc_mtx); 748 callout_init_lk(&sc->sc_wd_ch, &sc->sc_mtx); 749 #else 750 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx); 751 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx); 752 #endif 753 754 ATH_TXBUF_LOCK_INIT(sc); 755 756 #if defined(__DragonFly__) 757 sc->sc_tq = taskqueue_create("ath_taskq", M_INTWAIT, 758 taskqueue_thread_enqueue, &sc->sc_tq); 759 taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1, 760 "%s taskq", device_get_nameunit(sc->sc_dev)); 761 #else 762 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 763 taskqueue_thread_enqueue, &sc->sc_tq); 764 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", 765 device_get_nameunit(sc->sc_dev)); 766 #endif 767 768 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 769 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 770 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 771 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 772 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); 773 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); 774 775 /* 776 * Allocate hardware transmit queues: one queue for 777 * beacon frames and one data queue for each QoS 778 * priority. Note that the hal handles resetting 779 * these queues at the needed time. 780 * 781 * XXX PS-Poll 782 */ 783 sc->sc_bhalq = ath_beaconq_setup(sc); 784 if (sc->sc_bhalq == (u_int) -1) { 785 device_printf(sc->sc_dev, 786 "unable to setup a beacon xmit queue!\n"); 787 error = EIO; 788 goto bad2; 789 } 790 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 791 if (sc->sc_cabq == NULL) { 792 device_printf(sc->sc_dev, "unable to setup CAB xmit queue!\n"); 793 error = EIO; 794 goto bad2; 795 } 796 /* NB: insure BK queue is the lowest priority h/w queue */ 797 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 798 device_printf(sc->sc_dev, 799 "unable to setup xmit queue for %s traffic!\n", 800 ieee80211_wme_acnames[WME_AC_BK]); 801 error = EIO; 802 goto bad2; 803 } 804 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 805 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 806 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 807 /* 808 * Not enough hardware tx queues to properly do WME; 809 * just punt and assign them all to the same h/w queue. 810 * We could do a better job of this if, for example, 811 * we allocate queues when we switch from station to 812 * AP mode. 813 */ 814 if (sc->sc_ac2q[WME_AC_VI] != NULL) 815 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 816 if (sc->sc_ac2q[WME_AC_BE] != NULL) 817 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 818 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 819 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 820 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 821 } 822 823 /* 824 * Attach the TX completion function. 825 * 826 * The non-EDMA chips may have some special case optimisations; 827 * this method gives everyone a chance to attach cleanly. 828 */ 829 sc->sc_tx.xmit_attach_comp_func(sc); 830 831 /* 832 * Setup rate control. Some rate control modules 833 * call back to change the anntena state so expose 834 * the necessary entry points. 835 * XXX maybe belongs in struct ath_ratectrl? 836 */ 837 sc->sc_setdefantenna = ath_setdefantenna; 838 sc->sc_rc = ath_rate_attach(sc); 839 if (sc->sc_rc == NULL) { 840 error = EIO; 841 goto bad2; 842 } 843 844 /* Attach DFS module */ 845 if (! ath_dfs_attach(sc)) { 846 device_printf(sc->sc_dev, 847 "%s: unable to attach DFS\n", __func__); 848 error = EIO; 849 goto bad2; 850 } 851 852 /* Attach spectral module */ 853 if (ath_spectral_attach(sc) < 0) { 854 device_printf(sc->sc_dev, 855 "%s: unable to attach spectral\n", __func__); 856 error = EIO; 857 goto bad2; 858 } 859 860 /* Attach bluetooth coexistence module */ 861 if (ath_btcoex_attach(sc) < 0) { 862 device_printf(sc->sc_dev, 863 "%s: unable to attach bluetooth coexistence\n", __func__); 864 error = EIO; 865 goto bad2; 866 } 867 868 /* Attach LNA diversity module */ 869 if (ath_lna_div_attach(sc) < 0) { 870 device_printf(sc->sc_dev, 871 "%s: unable to attach LNA diversity\n", __func__); 872 error = EIO; 873 goto bad2; 874 } 875 876 /* Start DFS processing tasklet */ 877 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 878 879 /* Configure LED state */ 880 sc->sc_blinking = 0; 881 sc->sc_ledstate = 1; 882 sc->sc_ledon = 0; /* low true */ 883 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 884 #if defined(__DragonFly__) 885 callout_init_mp(&sc->sc_ledtimer); 886 #else 887 callout_init(&sc->sc_ledtimer, 1); 888 #endif 889 890 /* 891 * Don't setup hardware-based blinking. 892 * 893 * Although some NICs may have this configured in the 894 * default reset register values, the user may wish 895 * to alter which pins have which function. 896 * 897 * The reference driver attaches the MAC network LED to GPIO1 and 898 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 899 * NIC has these reversed. 900 */ 901 sc->sc_hardled = (1 == 0); 902 sc->sc_led_net_pin = -1; 903 sc->sc_led_pwr_pin = -1; 904 /* 905 * Auto-enable soft led processing for IBM cards and for 906 * 5211 minipci cards. Users can also manually enable/disable 907 * support with a sysctl. 908 */ 909 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 910 ath_led_config(sc); 911 ath_hal_setledstate(ah, HAL_LED_INIT); 912 913 /* XXX not right but it's not used anywhere important */ 914 ic->ic_phytype = IEEE80211_T_OFDM; 915 ic->ic_opmode = IEEE80211_M_STA; 916 ic->ic_caps = 917 IEEE80211_C_STA /* station mode */ 918 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 919 | IEEE80211_C_HOSTAP /* hostap mode */ 920 | IEEE80211_C_MONITOR /* monitor mode */ 921 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 922 | IEEE80211_C_WDS /* 4-address traffic works */ 923 | IEEE80211_C_MBSS /* mesh point link mode */ 924 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 925 | IEEE80211_C_SHSLOT /* short slot time supported */ 926 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 927 #ifndef ATH_ENABLE_11N 928 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 929 #endif 930 | IEEE80211_C_TXFRAG /* handle tx frags */ 931 #ifdef ATH_ENABLE_DFS 932 | IEEE80211_C_DFS /* Enable radar detection */ 933 #endif 934 | IEEE80211_C_PMGT /* Station side power mgmt */ 935 | IEEE80211_C_SWSLEEP 936 ; 937 /* 938 * Query the hal to figure out h/w crypto support. 939 */ 940 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 941 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 942 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 943 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 944 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 945 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 946 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 947 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 948 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 949 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 950 /* 951 * Check if h/w does the MIC and/or whether the 952 * separate key cache entries are required to 953 * handle both tx+rx MIC keys. 954 */ 955 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 956 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 957 /* 958 * If the h/w supports storing tx+rx MIC keys 959 * in one cache slot automatically enable use. 960 */ 961 if (ath_hal_hastkipsplit(ah) || 962 !ath_hal_settkipsplit(ah, AH_FALSE)) 963 sc->sc_splitmic = 1; 964 /* 965 * If the h/w can do TKIP MIC together with WME then 966 * we use it; otherwise we force the MIC to be done 967 * in software by the net80211 layer. 968 */ 969 if (ath_hal_haswmetkipmic(ah)) 970 sc->sc_wmetkipmic = 1; 971 } 972 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 973 /* 974 * Check for multicast key search support. 975 */ 976 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 977 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 978 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 979 } 980 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 981 /* 982 * Mark key cache slots associated with global keys 983 * as in use. If we knew TKIP was not to be used we 984 * could leave the +32, +64, and +32+64 slots free. 985 */ 986 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 987 setbit(sc->sc_keymap, i); 988 setbit(sc->sc_keymap, i+64); 989 if (sc->sc_splitmic) { 990 setbit(sc->sc_keymap, i+32); 991 setbit(sc->sc_keymap, i+32+64); 992 } 993 } 994 /* 995 * TPC support can be done either with a global cap or 996 * per-packet support. The latter is not available on 997 * all parts. We're a bit pedantic here as all parts 998 * support a global cap. 999 */ 1000 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 1001 ic->ic_caps |= IEEE80211_C_TXPMGT; 1002 1003 /* 1004 * Mark WME capability only if we have sufficient 1005 * hardware queues to do proper priority scheduling. 1006 */ 1007 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 1008 ic->ic_caps |= IEEE80211_C_WME; 1009 /* 1010 * Check for misc other capabilities. 1011 */ 1012 if (ath_hal_hasbursting(ah)) 1013 ic->ic_caps |= IEEE80211_C_BURST; 1014 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 1015 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 1016 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 1017 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 1018 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 1019 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah); 1020 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah); 1021 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah); 1022 1023 if (ath_hal_hasfastframes(ah)) 1024 ic->ic_caps |= IEEE80211_C_FF; 1025 wmodes = ath_hal_getwirelessmodes(ah); 1026 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 1027 ic->ic_caps |= IEEE80211_C_TURBOP; 1028 #ifdef IEEE80211_SUPPORT_TDMA 1029 if (ath_hal_macversion(ah) > 0x78) { 1030 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 1031 ic->ic_tdma_update = ath_tdma_update; 1032 } 1033 #endif 1034 1035 /* 1036 * TODO: enforce that at least this many frames are available 1037 * in the txbuf list before allowing data frames (raw or 1038 * otherwise) to be transmitted. 1039 */ 1040 sc->sc_txq_data_minfree = 10; 1041 /* 1042 * Leave this as default to maintain legacy behaviour. 1043 * Shortening the cabq/mcastq may end up causing some 1044 * undesirable behaviour. 1045 */ 1046 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 1047 1048 /* 1049 * How deep can the node software TX queue get whilst it's asleep. 1050 */ 1051 sc->sc_txq_node_psq_maxdepth = 16; 1052 1053 /* 1054 * Default the maximum queue depth for a given node 1055 * to 1/4'th the TX buffers, or 64, whichever 1056 * is larger. 1057 */ 1058 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4); 1059 1060 /* Enable CABQ by default */ 1061 sc->sc_cabq_enable = 1; 1062 1063 /* 1064 * Allow the TX and RX chainmasks to be overridden by 1065 * environment variables and/or device.hints. 1066 * 1067 * This must be done early - before the hardware is 1068 * calibrated or before the 802.11n stream calculation 1069 * is done. 1070 */ 1071 if (resource_int_value(device_get_name(sc->sc_dev), 1072 device_get_unit(sc->sc_dev), "rx_chainmask", 1073 &rx_chainmask) == 0) { 1074 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 1075 rx_chainmask); 1076 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 1077 } 1078 if (resource_int_value(device_get_name(sc->sc_dev), 1079 device_get_unit(sc->sc_dev), "tx_chainmask", 1080 &tx_chainmask) == 0) { 1081 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 1082 tx_chainmask); 1083 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 1084 } 1085 1086 /* 1087 * Query the TX/RX chainmask configuration. 1088 * 1089 * This is only relevant for 11n devices. 1090 */ 1091 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 1092 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 1093 1094 /* 1095 * Disable MRR with protected frames by default. 1096 * Only 802.11n series NICs can handle this. 1097 */ 1098 sc->sc_mrrprot = 0; /* XXX should be a capability */ 1099 1100 /* 1101 * Query the enterprise mode information the HAL. 1102 */ 1103 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0, 1104 &sc->sc_ent_cfg) == HAL_OK) 1105 sc->sc_use_ent = 1; 1106 1107 #ifdef ATH_ENABLE_11N 1108 /* 1109 * Query HT capabilities 1110 */ 1111 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 1112 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 1113 uint32_t rxs, txs; 1114 uint32_t ldpc; 1115 1116 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 1117 1118 sc->sc_mrrprot = 1; /* XXX should be a capability */ 1119 1120 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 1121 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 1122 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 1123 | IEEE80211_HTCAP_MAXAMSDU_3839 1124 /* max A-MSDU length */ 1125 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 1126 1127 /* 1128 * Enable short-GI for HT20 only if the hardware 1129 * advertises support. 1130 * Notably, anything earlier than the AR9287 doesn't. 1131 */ 1132 if ((ath_hal_getcapability(ah, 1133 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 1134 (wmodes & HAL_MODE_HT20)) { 1135 device_printf(sc->sc_dev, 1136 "[HT] enabling short-GI in 20MHz mode\n"); 1137 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 1138 } 1139 1140 if (wmodes & HAL_MODE_HT40) 1141 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 1142 | IEEE80211_HTCAP_SHORTGI40; 1143 1144 /* 1145 * TX/RX streams need to be taken into account when 1146 * negotiating which MCS rates it'll receive and 1147 * what MCS rates are available for TX. 1148 */ 1149 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 1150 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 1151 ic->ic_txstream = txs; 1152 ic->ic_rxstream = rxs; 1153 1154 /* 1155 * Setup TX and RX STBC based on what the HAL allows and 1156 * the currently configured chainmask set. 1157 * Ie - don't enable STBC TX if only one chain is enabled. 1158 * STBC RX is fine on a single RX chain; it just won't 1159 * provide any real benefit. 1160 */ 1161 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0, 1162 NULL) == HAL_OK) { 1163 sc->sc_rx_stbc = 1; 1164 device_printf(sc->sc_dev, 1165 "[HT] 1 stream STBC receive enabled\n"); 1166 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM; 1167 } 1168 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0, 1169 NULL) == HAL_OK) { 1170 sc->sc_tx_stbc = 1; 1171 device_printf(sc->sc_dev, 1172 "[HT] 1 stream STBC transmit enabled\n"); 1173 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC; 1174 } 1175 1176 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 1177 &sc->sc_rts_aggr_limit); 1178 if (sc->sc_rts_aggr_limit != (64 * 1024)) 1179 device_printf(sc->sc_dev, 1180 "[HT] RTS aggregates limited to %d KiB\n", 1181 sc->sc_rts_aggr_limit / 1024); 1182 1183 /* 1184 * LDPC 1185 */ 1186 if ((ath_hal_getcapability(ah, HAL_CAP_LDPC, 0, &ldpc)) 1187 == HAL_OK && (ldpc == 1)) { 1188 sc->sc_has_ldpc = 1; 1189 device_printf(sc->sc_dev, 1190 "[HT] LDPC transmit/receive enabled\n"); 1191 ic->ic_htcaps |= IEEE80211_HTCAP_LDPC; 1192 } 1193 1194 1195 device_printf(sc->sc_dev, 1196 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 1197 } 1198 #endif 1199 1200 /* 1201 * Initial aggregation settings. 1202 */ 1203 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH; 1204 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH; 1205 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1206 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1207 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE; 1208 sc->sc_delim_min_pad = 0; 1209 1210 /* 1211 * Check if the hardware requires PCI register serialisation. 1212 * Some of the Owl based MACs require this. 1213 */ 1214 #if defined(__DragonFly__) 1215 if (ncpus > 1 && 1216 #else 1217 if (mp_ncpus > 1 && 1218 #endif 1219 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 1220 0, NULL) == HAL_OK) { 1221 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 1222 device_printf(sc->sc_dev, 1223 "Enabling register serialisation\n"); 1224 } 1225 1226 /* 1227 * Initialise the deferred completed RX buffer list. 1228 */ 1229 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 1230 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 1231 1232 /* 1233 * Indicate we need the 802.11 header padded to a 1234 * 32-bit boundary for 4-address and QoS frames. 1235 */ 1236 ic->ic_flags |= IEEE80211_F_DATAPAD; 1237 1238 /* 1239 * Query the hal about antenna support. 1240 */ 1241 sc->sc_defant = ath_hal_getdefantenna(ah); 1242 1243 /* 1244 * Not all chips have the VEOL support we want to 1245 * use with IBSS beacons; check here for it. 1246 */ 1247 sc->sc_hasveol = ath_hal_hasveol(ah); 1248 1249 /* get mac address from kenv first, then hardware */ 1250 if (ath_fetch_mac_kenv(sc, ic->ic_macaddr) == 0) { 1251 /* Tell the HAL now about the new MAC */ 1252 ath_hal_setmac(ah, ic->ic_macaddr); 1253 } else { 1254 ath_hal_getmac(ah, ic->ic_macaddr); 1255 } 1256 1257 if (sc->sc_hasbmask) 1258 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 1259 1260 /* NB: used to size node table key mapping array */ 1261 ic->ic_max_keyix = sc->sc_keymax; 1262 /* call MI attach routine. */ 1263 ieee80211_ifattach(ic); 1264 ic->ic_setregdomain = ath_setregdomain; 1265 ic->ic_getradiocaps = ath_getradiocaps; 1266 sc->sc_opmode = HAL_M_STA; 1267 1268 /* override default methods */ 1269 ic->ic_ioctl = ath_ioctl; 1270 ic->ic_parent = ath_parent; 1271 ic->ic_transmit = ath_transmit; 1272 ic->ic_newassoc = ath_newassoc; 1273 ic->ic_updateslot = ath_updateslot; 1274 ic->ic_wme.wme_update = ath_wme_update; 1275 ic->ic_vap_create = ath_vap_create; 1276 ic->ic_vap_delete = ath_vap_delete; 1277 ic->ic_raw_xmit = ath_raw_xmit; 1278 ic->ic_update_mcast = ath_update_mcast; 1279 ic->ic_update_promisc = ath_update_promisc; 1280 ic->ic_node_alloc = ath_node_alloc; 1281 sc->sc_node_free = ic->ic_node_free; 1282 ic->ic_node_free = ath_node_free; 1283 sc->sc_node_cleanup = ic->ic_node_cleanup; 1284 ic->ic_node_cleanup = ath_node_cleanup; 1285 ic->ic_node_getsignal = ath_node_getsignal; 1286 ic->ic_scan_start = ath_scan_start; 1287 ic->ic_scan_end = ath_scan_end; 1288 ic->ic_set_channel = ath_set_channel; 1289 #ifdef ATH_ENABLE_11N 1290 /* 802.11n specific - but just override anyway */ 1291 sc->sc_addba_request = ic->ic_addba_request; 1292 sc->sc_addba_response = ic->ic_addba_response; 1293 sc->sc_addba_stop = ic->ic_addba_stop; 1294 sc->sc_bar_response = ic->ic_bar_response; 1295 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 1296 1297 ic->ic_addba_request = ath_addba_request; 1298 ic->ic_addba_response = ath_addba_response; 1299 ic->ic_addba_response_timeout = ath_addba_response_timeout; 1300 ic->ic_addba_stop = ath_addba_stop; 1301 ic->ic_bar_response = ath_bar_response; 1302 1303 ic->ic_update_chw = ath_update_chw; 1304 #endif /* ATH_ENABLE_11N */ 1305 1306 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 1307 /* 1308 * There's one vendor bitmap entry in the RX radiotap 1309 * header; make sure that's taken into account. 1310 */ 1311 ieee80211_radiotap_attachv(ic, 1312 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 1313 ATH_TX_RADIOTAP_PRESENT, 1314 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 1315 ATH_RX_RADIOTAP_PRESENT); 1316 #else 1317 /* 1318 * No vendor bitmap/extensions are present. 1319 */ 1320 ieee80211_radiotap_attach(ic, 1321 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 1322 ATH_TX_RADIOTAP_PRESENT, 1323 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1324 ATH_RX_RADIOTAP_PRESENT); 1325 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 1326 1327 /* 1328 * Setup the ALQ logging if required 1329 */ 1330 #ifdef ATH_DEBUG_ALQ 1331 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev)); 1332 if_ath_alq_setcfg(&sc->sc_alq, 1333 sc->sc_ah->ah_macVersion, 1334 sc->sc_ah->ah_macRev, 1335 sc->sc_ah->ah_phyRev, 1336 sc->sc_ah->ah_magic); 1337 #endif 1338 1339 /* 1340 * Setup dynamic sysctl's now that country code and 1341 * regdomain are available from the hal. 1342 */ 1343 ath_sysctlattach(sc); 1344 ath_sysctl_stats_attach(sc); 1345 ath_sysctl_hal_attach(sc); 1346 1347 if (bootverbose) 1348 ieee80211_announce(ic); 1349 ath_announce(sc); 1350 1351 /* 1352 * Put it to sleep for now. 1353 */ 1354 ATH_LOCK(sc); 1355 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 1356 ATH_UNLOCK(sc); 1357 1358 #if defined(__DragonFly__) 1359 wlan_serialize_exit(); 1360 #endif 1361 1362 return 0; 1363 bad2: 1364 ath_tx_cleanup(sc); 1365 ath_desc_free(sc); 1366 ath_txdma_teardown(sc); 1367 ath_rxdma_teardown(sc); 1368 bad: 1369 if (ah) 1370 ath_hal_detach(ah); 1371 1372 #if defined(__DragonFly__) 1373 /* 1374 * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE.. 1375 */ 1376 sc->sc_invalid = 1; 1377 wlan_serialize_exit(); 1378 #else 1379 sc->sc_invalid = 1; 1380 #endif 1381 1382 return error; 1383 } 1384 1385 int 1386 ath_detach(struct ath_softc *sc) 1387 { 1388 1389 /* 1390 * NB: the order of these is important: 1391 * o stop the chip so no more interrupts will fire 1392 * o call the 802.11 layer before detaching the hal to 1393 * insure callbacks into the driver to delete global 1394 * key cache entries can be handled 1395 * o free the taskqueue which drains any pending tasks 1396 * o reclaim the tx queue data structures after calling 1397 * the 802.11 layer as we'll get called back to reclaim 1398 * node state and potentially want to use them 1399 * o to cleanup the tx queues the hal is called, so detach 1400 * it last 1401 * Other than that, it's straightforward... 1402 */ 1403 1404 /* 1405 * XXX Wake the hardware up first. ath_stop() will still 1406 * wake it up first, but I'd rather do it here just to 1407 * ensure it's awake. 1408 */ 1409 ATH_LOCK(sc); 1410 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1411 ath_power_setpower(sc, HAL_PM_AWAKE); 1412 1413 /* 1414 * Stop things cleanly. 1415 */ 1416 ath_stop(sc); 1417 ATH_UNLOCK(sc); 1418 1419 ieee80211_ifdetach(&sc->sc_ic); 1420 taskqueue_free(sc->sc_tq); 1421 #ifdef ATH_TX99_DIAG 1422 if (sc->sc_tx99 != NULL) 1423 sc->sc_tx99->detach(sc->sc_tx99); 1424 #endif 1425 ath_rate_detach(sc->sc_rc); 1426 #ifdef ATH_DEBUG_ALQ 1427 if_ath_alq_tidyup(&sc->sc_alq); 1428 #endif 1429 ath_lna_div_detach(sc); 1430 ath_btcoex_detach(sc); 1431 ath_spectral_detach(sc); 1432 ath_dfs_detach(sc); 1433 ath_desc_free(sc); 1434 ath_txdma_teardown(sc); 1435 ath_rxdma_teardown(sc); 1436 ath_tx_cleanup(sc); 1437 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 1438 1439 return 0; 1440 } 1441 1442 /* 1443 * MAC address handling for multiple BSS on the same radio. 1444 * The first vap uses the MAC address from the EEPROM. For 1445 * subsequent vap's we set the U/L bit (bit 1) in the MAC 1446 * address and use the next six bits as an index. 1447 */ 1448 static void 1449 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 1450 { 1451 int i; 1452 1453 if (clone && sc->sc_hasbmask) { 1454 /* NB: we only do this if h/w supports multiple bssid */ 1455 for (i = 0; i < 8; i++) 1456 if ((sc->sc_bssidmask & (1<<i)) == 0) 1457 break; 1458 if (i != 0) 1459 mac[0] |= (i << 2)|0x2; 1460 } else 1461 i = 0; 1462 sc->sc_bssidmask |= 1<<i; 1463 sc->sc_hwbssidmask[0] &= ~mac[0]; 1464 if (i == 0) 1465 sc->sc_nbssid0++; 1466 } 1467 1468 static void 1469 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 1470 { 1471 int i = mac[0] >> 2; 1472 uint8_t mask; 1473 1474 if (i != 0 || --sc->sc_nbssid0 == 0) { 1475 sc->sc_bssidmask &= ~(1<<i); 1476 /* recalculate bssid mask from remaining addresses */ 1477 mask = 0xff; 1478 for (i = 1; i < 8; i++) 1479 if (sc->sc_bssidmask & (1<<i)) 1480 mask &= ~((i<<2)|0x2); 1481 sc->sc_hwbssidmask[0] |= mask; 1482 } 1483 } 1484 1485 /* 1486 * Assign a beacon xmit slot. We try to space out 1487 * assignments so when beacons are staggered the 1488 * traffic coming out of the cab q has maximal time 1489 * to go out before the next beacon is scheduled. 1490 */ 1491 static int 1492 assign_bslot(struct ath_softc *sc) 1493 { 1494 u_int slot, free; 1495 1496 free = 0; 1497 for (slot = 0; slot < ATH_BCBUF; slot++) 1498 if (sc->sc_bslot[slot] == NULL) { 1499 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 1500 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 1501 return slot; 1502 free = slot; 1503 /* NB: keep looking for a double slot */ 1504 } 1505 return free; 1506 } 1507 1508 static struct ieee80211vap * 1509 ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1510 enum ieee80211_opmode opmode, int flags, 1511 const uint8_t bssid[IEEE80211_ADDR_LEN], 1512 const uint8_t mac0[IEEE80211_ADDR_LEN]) 1513 { 1514 struct ath_softc *sc = ic->ic_softc; 1515 struct ath_vap *avp; 1516 struct ieee80211vap *vap; 1517 uint8_t mac[IEEE80211_ADDR_LEN]; 1518 int needbeacon, error; 1519 enum ieee80211_opmode ic_opmode; 1520 1521 avp = kmalloc(sizeof(struct ath_vap), M_80211_VAP, M_WAITOK | M_ZERO); 1522 needbeacon = 0; 1523 IEEE80211_ADDR_COPY(mac, mac0); 1524 1525 ATH_LOCK(sc); 1526 ic_opmode = opmode; /* default to opmode of new vap */ 1527 switch (opmode) { 1528 case IEEE80211_M_STA: 1529 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1530 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1531 goto bad; 1532 } 1533 if (sc->sc_nvaps) { 1534 /* 1535 * With multiple vaps we must fall back 1536 * to s/w beacon miss handling. 1537 */ 1538 flags |= IEEE80211_CLONE_NOBEACONS; 1539 } 1540 if (flags & IEEE80211_CLONE_NOBEACONS) { 1541 /* 1542 * Station mode w/o beacons are implemented w/ AP mode. 1543 */ 1544 ic_opmode = IEEE80211_M_HOSTAP; 1545 } 1546 break; 1547 case IEEE80211_M_IBSS: 1548 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1549 device_printf(sc->sc_dev, 1550 "only 1 ibss vap supported\n"); 1551 goto bad; 1552 } 1553 needbeacon = 1; 1554 break; 1555 case IEEE80211_M_AHDEMO: 1556 #ifdef IEEE80211_SUPPORT_TDMA 1557 if (flags & IEEE80211_CLONE_TDMA) { 1558 if (sc->sc_nvaps != 0) { 1559 device_printf(sc->sc_dev, 1560 "only 1 tdma vap supported\n"); 1561 goto bad; 1562 } 1563 needbeacon = 1; 1564 flags |= IEEE80211_CLONE_NOBEACONS; 1565 } 1566 /* fall thru... */ 1567 #endif 1568 case IEEE80211_M_MONITOR: 1569 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1570 /* 1571 * Adopt existing mode. Adding a monitor or ahdemo 1572 * vap to an existing configuration is of dubious 1573 * value but should be ok. 1574 */ 1575 /* XXX not right for monitor mode */ 1576 ic_opmode = ic->ic_opmode; 1577 } 1578 break; 1579 case IEEE80211_M_HOSTAP: 1580 case IEEE80211_M_MBSS: 1581 needbeacon = 1; 1582 break; 1583 case IEEE80211_M_WDS: 1584 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1585 device_printf(sc->sc_dev, 1586 "wds not supported in sta mode\n"); 1587 goto bad; 1588 } 1589 /* 1590 * Silently remove any request for a unique 1591 * bssid; WDS vap's always share the local 1592 * mac address. 1593 */ 1594 flags &= ~IEEE80211_CLONE_BSSID; 1595 if (sc->sc_nvaps == 0) 1596 ic_opmode = IEEE80211_M_HOSTAP; 1597 else 1598 ic_opmode = ic->ic_opmode; 1599 break; 1600 default: 1601 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1602 goto bad; 1603 } 1604 /* 1605 * Check that a beacon buffer is available; the code below assumes it. 1606 */ 1607 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1608 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1609 goto bad; 1610 } 1611 1612 /* STA, AHDEMO? */ 1613 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1614 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1615 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1616 } 1617 1618 vap = &avp->av_vap; 1619 /* XXX can't hold mutex across if_alloc */ 1620 ATH_UNLOCK(sc); 1621 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 1622 ATH_LOCK(sc); 1623 if (error != 0) { 1624 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1625 __func__, error); 1626 goto bad2; 1627 } 1628 1629 /* h/w crypto support */ 1630 vap->iv_key_alloc = ath_key_alloc; 1631 vap->iv_key_delete = ath_key_delete; 1632 vap->iv_key_set = ath_key_set; 1633 vap->iv_key_update_begin = ath_key_update_begin; 1634 vap->iv_key_update_end = ath_key_update_end; 1635 1636 /* override various methods */ 1637 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1638 vap->iv_recv_mgmt = ath_recv_mgmt; 1639 vap->iv_reset = ath_reset_vap; 1640 vap->iv_update_beacon = ath_beacon_update; 1641 avp->av_newstate = vap->iv_newstate; 1642 vap->iv_newstate = ath_newstate; 1643 avp->av_bmiss = vap->iv_bmiss; 1644 vap->iv_bmiss = ath_bmiss_vap; 1645 1646 avp->av_node_ps = vap->iv_node_ps; 1647 vap->iv_node_ps = ath_node_powersave; 1648 1649 avp->av_set_tim = vap->iv_set_tim; 1650 vap->iv_set_tim = ath_node_set_tim; 1651 1652 avp->av_recv_pspoll = vap->iv_recv_pspoll; 1653 vap->iv_recv_pspoll = ath_node_recv_pspoll; 1654 1655 /* Set default parameters */ 1656 1657 /* 1658 * Anything earlier than some AR9300 series MACs don't 1659 * support a smaller MPDU density. 1660 */ 1661 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1662 /* 1663 * All NICs can handle the maximum size, however 1664 * AR5416 based MACs can only TX aggregates w/ RTS 1665 * protection when the total aggregate size is <= 8k. 1666 * However, for now that's enforced by the TX path. 1667 */ 1668 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1669 1670 avp->av_bslot = -1; 1671 if (needbeacon) { 1672 /* 1673 * Allocate beacon state and setup the q for buffered 1674 * multicast frames. We know a beacon buffer is 1675 * available because we checked above. 1676 */ 1677 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1678 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1679 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1680 /* 1681 * Assign the vap to a beacon xmit slot. As above 1682 * this cannot fail to find a free one. 1683 */ 1684 avp->av_bslot = assign_bslot(sc); 1685 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1686 ("beacon slot %u not empty", avp->av_bslot)); 1687 sc->sc_bslot[avp->av_bslot] = vap; 1688 sc->sc_nbcnvaps++; 1689 } 1690 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1691 /* 1692 * Multple vaps are to transmit beacons and we 1693 * have h/w support for TSF adjusting; enable 1694 * use of staggered beacons. 1695 */ 1696 sc->sc_stagbeacons = 1; 1697 } 1698 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1699 } 1700 1701 ic->ic_opmode = ic_opmode; 1702 if (opmode != IEEE80211_M_WDS) { 1703 sc->sc_nvaps++; 1704 if (opmode == IEEE80211_M_STA) 1705 sc->sc_nstavaps++; 1706 if (opmode == IEEE80211_M_MBSS) 1707 sc->sc_nmeshvaps++; 1708 } 1709 switch (ic_opmode) { 1710 case IEEE80211_M_IBSS: 1711 sc->sc_opmode = HAL_M_IBSS; 1712 break; 1713 case IEEE80211_M_STA: 1714 sc->sc_opmode = HAL_M_STA; 1715 break; 1716 case IEEE80211_M_AHDEMO: 1717 #ifdef IEEE80211_SUPPORT_TDMA 1718 if (vap->iv_caps & IEEE80211_C_TDMA) { 1719 sc->sc_tdma = 1; 1720 /* NB: disable tsf adjust */ 1721 sc->sc_stagbeacons = 0; 1722 } 1723 /* 1724 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1725 * just ap mode. 1726 */ 1727 /* fall thru... */ 1728 #endif 1729 case IEEE80211_M_HOSTAP: 1730 case IEEE80211_M_MBSS: 1731 sc->sc_opmode = HAL_M_HOSTAP; 1732 break; 1733 case IEEE80211_M_MONITOR: 1734 sc->sc_opmode = HAL_M_MONITOR; 1735 break; 1736 default: 1737 /* XXX should not happen */ 1738 break; 1739 } 1740 if (sc->sc_hastsfadd) { 1741 /* 1742 * Configure whether or not TSF adjust should be done. 1743 */ 1744 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1745 } 1746 if (flags & IEEE80211_CLONE_NOBEACONS) { 1747 /* 1748 * Enable s/w beacon miss handling. 1749 */ 1750 sc->sc_swbmiss = 1; 1751 } 1752 ATH_UNLOCK(sc); 1753 1754 /* complete setup */ 1755 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status, 1756 mac); 1757 return vap; 1758 bad2: 1759 reclaim_address(sc, mac); 1760 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1761 bad: 1762 kfree(avp, M_80211_VAP); 1763 ATH_UNLOCK(sc); 1764 return NULL; 1765 } 1766 1767 static void 1768 ath_vap_delete(struct ieee80211vap *vap) 1769 { 1770 struct ieee80211com *ic = vap->iv_ic; 1771 struct ath_softc *sc = ic->ic_softc; 1772 struct ath_hal *ah = sc->sc_ah; 1773 struct ath_vap *avp = ATH_VAP(vap); 1774 1775 ATH_LOCK(sc); 1776 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1777 ATH_UNLOCK(sc); 1778 1779 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1780 if (sc->sc_running) { 1781 /* 1782 * Quiesce the hardware while we remove the vap. In 1783 * particular we need to reclaim all references to 1784 * the vap state by any frames pending on the tx queues. 1785 */ 1786 ath_hal_intrset(ah, 0); /* disable interrupts */ 1787 /* XXX Do all frames from all vaps/nodes need draining here? */ 1788 ath_stoprecv(sc, 1); /* stop recv side */ 1789 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1790 } 1791 1792 /* .. leave the hardware awake for now. */ 1793 1794 ieee80211_vap_detach(vap); 1795 1796 /* 1797 * XXX Danger Will Robinson! Danger! 1798 * 1799 * Because ieee80211_vap_detach() can queue a frame (the station 1800 * diassociate message?) after we've drained the TXQ and 1801 * flushed the software TXQ, we will end up with a frame queued 1802 * to a node whose vap is about to be freed. 1803 * 1804 * To work around this, flush the hardware/software again. 1805 * This may be racy - the ath task may be running and the packet 1806 * may be being scheduled between sw->hw txq. Tsk. 1807 * 1808 * TODO: figure out why a new node gets allocated somewhere around 1809 * here (after the ath_tx_swq() call; and after an ath_stop() 1810 * call!) 1811 */ 1812 1813 ath_draintxq(sc, ATH_RESET_DEFAULT); 1814 1815 ATH_LOCK(sc); 1816 /* 1817 * Reclaim beacon state. Note this must be done before 1818 * the vap instance is reclaimed as we may have a reference 1819 * to it in the buffer for the beacon frame. 1820 */ 1821 if (avp->av_bcbuf != NULL) { 1822 if (avp->av_bslot != -1) { 1823 sc->sc_bslot[avp->av_bslot] = NULL; 1824 sc->sc_nbcnvaps--; 1825 } 1826 ath_beacon_return(sc, avp->av_bcbuf); 1827 avp->av_bcbuf = NULL; 1828 if (sc->sc_nbcnvaps == 0) { 1829 sc->sc_stagbeacons = 0; 1830 if (sc->sc_hastsfadd) 1831 ath_hal_settsfadjust(sc->sc_ah, 0); 1832 } 1833 /* 1834 * Reclaim any pending mcast frames for the vap. 1835 */ 1836 ath_tx_draintxq(sc, &avp->av_mcastq); 1837 } 1838 /* 1839 * Update bookkeeping. 1840 */ 1841 if (vap->iv_opmode == IEEE80211_M_STA) { 1842 sc->sc_nstavaps--; 1843 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1844 sc->sc_swbmiss = 0; 1845 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1846 vap->iv_opmode == IEEE80211_M_MBSS) { 1847 reclaim_address(sc, vap->iv_myaddr); 1848 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1849 if (vap->iv_opmode == IEEE80211_M_MBSS) 1850 sc->sc_nmeshvaps--; 1851 } 1852 if (vap->iv_opmode != IEEE80211_M_WDS) 1853 sc->sc_nvaps--; 1854 #ifdef IEEE80211_SUPPORT_TDMA 1855 /* TDMA operation ceases when the last vap is destroyed */ 1856 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1857 sc->sc_tdma = 0; 1858 sc->sc_swbmiss = 0; 1859 } 1860 #endif 1861 kfree(avp, M_80211_VAP); 1862 1863 if (sc->sc_running) { 1864 /* 1865 * Restart rx+tx machines if still running (RUNNING will 1866 * be reset if we just destroyed the last vap). 1867 */ 1868 if (ath_startrecv(sc) != 0) 1869 device_printf(sc->sc_dev, 1870 "%s: unable to restart recv logic\n", __func__); 1871 if (sc->sc_beacons) { /* restart beacons */ 1872 #ifdef IEEE80211_SUPPORT_TDMA 1873 if (sc->sc_tdma) 1874 ath_tdma_config(sc, NULL); 1875 else 1876 #endif 1877 ath_beacon_config(sc, NULL); 1878 } 1879 ath_hal_intrset(ah, sc->sc_imask); 1880 } 1881 1882 /* Ok, let the hardware asleep. */ 1883 ath_power_restore_power_state(sc); 1884 ATH_UNLOCK(sc); 1885 } 1886 1887 void 1888 ath_suspend(struct ath_softc *sc) 1889 { 1890 struct ieee80211com *ic = &sc->sc_ic; 1891 1892 sc->sc_resume_up = ic->ic_nrunning != 0; 1893 1894 ieee80211_suspend_all(ic); 1895 /* 1896 * NB: don't worry about putting the chip in low power 1897 * mode; pci will power off our socket on suspend and 1898 * CardBus detaches the device. 1899 * 1900 * XXX TODO: well, that's great, except for non-cardbus 1901 * devices! 1902 */ 1903 1904 /* 1905 * XXX This doesn't wait until all pending taskqueue 1906 * items and parallel transmit/receive/other threads 1907 * are running! 1908 */ 1909 ath_hal_intrset(sc->sc_ah, 0); 1910 taskqueue_block(sc->sc_tq); 1911 1912 ATH_LOCK(sc); 1913 #if defined(__DragonFly__) 1914 callout_stop_sync(&sc->sc_cal_ch); 1915 #else 1916 callout_stop(&sc->sc_cal_ch); 1917 #endif 1918 ATH_UNLOCK(sc); 1919 1920 /* 1921 * XXX ensure sc_invalid is 1 1922 */ 1923 1924 /* Disable the PCIe PHY, complete with workarounds */ 1925 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1926 } 1927 1928 /* 1929 * Reset the key cache since some parts do not reset the 1930 * contents on resume. First we clear all entries, then 1931 * re-load keys that the 802.11 layer assumes are setup 1932 * in h/w. 1933 */ 1934 static void 1935 ath_reset_keycache(struct ath_softc *sc) 1936 { 1937 struct ieee80211com *ic = &sc->sc_ic; 1938 struct ath_hal *ah = sc->sc_ah; 1939 int i; 1940 1941 ATH_LOCK(sc); 1942 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1943 for (i = 0; i < sc->sc_keymax; i++) 1944 ath_hal_keyreset(ah, i); 1945 ath_power_restore_power_state(sc); 1946 ATH_UNLOCK(sc); 1947 ieee80211_crypto_reload_keys(ic); 1948 } 1949 1950 /* 1951 * Fetch the current chainmask configuration based on the current 1952 * operating channel and options. 1953 */ 1954 static void 1955 ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan) 1956 { 1957 1958 /* 1959 * Set TX chainmask to the currently configured chainmask; 1960 * the TX chainmask depends upon the current operating mode. 1961 */ 1962 sc->sc_cur_rxchainmask = sc->sc_rxchainmask; 1963 if (IEEE80211_IS_CHAN_HT(chan)) { 1964 sc->sc_cur_txchainmask = sc->sc_txchainmask; 1965 } else { 1966 sc->sc_cur_txchainmask = 1; 1967 } 1968 1969 DPRINTF(sc, ATH_DEBUG_RESET, 1970 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n", 1971 __func__, 1972 sc->sc_cur_txchainmask, 1973 sc->sc_cur_rxchainmask); 1974 } 1975 1976 void 1977 ath_resume(struct ath_softc *sc) 1978 { 1979 struct ieee80211com *ic = &sc->sc_ic; 1980 struct ath_hal *ah = sc->sc_ah; 1981 HAL_STATUS status; 1982 1983 ath_hal_enablepcie(ah, 0, 0); 1984 1985 /* 1986 * Must reset the chip before we reload the 1987 * keycache as we were powered down on suspend. 1988 */ 1989 ath_update_chainmasks(sc, 1990 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan); 1991 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 1992 sc->sc_cur_rxchainmask); 1993 1994 /* Ensure we set the current power state to on */ 1995 ATH_LOCK(sc); 1996 ath_power_setselfgen(sc, HAL_PM_AWAKE); 1997 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1998 ath_power_setpower(sc, HAL_PM_AWAKE); 1999 ATH_UNLOCK(sc); 2000 2001 ath_hal_reset(ah, sc->sc_opmode, 2002 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 2003 AH_FALSE, HAL_RESET_NORMAL, &status); 2004 ath_reset_keycache(sc); 2005 2006 ATH_RX_LOCK(sc); 2007 sc->sc_rx_stopped = 1; 2008 sc->sc_rx_resetted = 1; 2009 ATH_RX_UNLOCK(sc); 2010 2011 /* Let DFS at it in case it's a DFS channel */ 2012 ath_dfs_radar_enable(sc, ic->ic_curchan); 2013 2014 /* Let spectral at in case spectral is enabled */ 2015 ath_spectral_enable(sc, ic->ic_curchan); 2016 2017 /* 2018 * Let bluetooth coexistence at in case it's needed for this channel 2019 */ 2020 ath_btcoex_enable(sc, ic->ic_curchan); 2021 2022 /* 2023 * If we're doing TDMA, enforce the TXOP limitation for chips that 2024 * support it. 2025 */ 2026 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2027 ath_hal_setenforcetxop(sc->sc_ah, 1); 2028 else 2029 ath_hal_setenforcetxop(sc->sc_ah, 0); 2030 2031 /* Restore the LED configuration */ 2032 ath_led_config(sc); 2033 ath_hal_setledstate(ah, HAL_LED_INIT); 2034 2035 if (sc->sc_resume_up) 2036 ieee80211_resume_all(ic); 2037 2038 ATH_LOCK(sc); 2039 ath_power_restore_power_state(sc); 2040 ATH_UNLOCK(sc); 2041 2042 /* XXX beacons ? */ 2043 } 2044 2045 void 2046 ath_shutdown(struct ath_softc *sc) 2047 { 2048 2049 ATH_LOCK(sc); 2050 ath_stop(sc); 2051 ATH_UNLOCK(sc); 2052 /* NB: no point powering down chip as we're about to reboot */ 2053 } 2054 2055 /* 2056 * Interrupt handler. Most of the actual processing is deferred. 2057 */ 2058 void 2059 ath_intr(void *arg) 2060 { 2061 struct ath_softc *sc = arg; 2062 struct ath_hal *ah = sc->sc_ah; 2063 HAL_INT status = 0; 2064 uint32_t txqs; 2065 2066 /* 2067 * If we're inside a reset path, just print a warning and 2068 * clear the ISR. The reset routine will finish it for us. 2069 */ 2070 ATH_PCU_LOCK(sc); 2071 if (sc->sc_inreset_cnt) { 2072 HAL_INT status; 2073 ath_hal_getisr(ah, &status); /* clear ISR */ 2074 ath_hal_intrset(ah, 0); /* disable further intr's */ 2075 DPRINTF(sc, ATH_DEBUG_ANY, 2076 "%s: in reset, ignoring: status=0x%x\n", 2077 __func__, status); 2078 ATH_PCU_UNLOCK(sc); 2079 return; 2080 } 2081 2082 if (sc->sc_invalid) { 2083 /* 2084 * The hardware is not ready/present, don't touch anything. 2085 * Note this can happen early on if the IRQ is shared. 2086 */ 2087 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 2088 ATH_PCU_UNLOCK(sc); 2089 return; 2090 } 2091 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 2092 ATH_PCU_UNLOCK(sc); 2093 return; 2094 } 2095 2096 ATH_LOCK(sc); 2097 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2098 ATH_UNLOCK(sc); 2099 2100 if (sc->sc_ic.ic_nrunning == 0 && sc->sc_running == 0) { 2101 HAL_INT status; 2102 2103 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_nrunning %d sc_running %d\n", 2104 __func__, sc->sc_ic.ic_nrunning, sc->sc_running); 2105 ath_hal_getisr(ah, &status); /* clear ISR */ 2106 ath_hal_intrset(ah, 0); /* disable further intr's */ 2107 ATH_PCU_UNLOCK(sc); 2108 2109 ATH_LOCK(sc); 2110 ath_power_restore_power_state(sc); 2111 ATH_UNLOCK(sc); 2112 return; 2113 } 2114 2115 /* 2116 * Figure out the reason(s) for the interrupt. Note 2117 * that the hal returns a pseudo-ISR that may include 2118 * bits we haven't explicitly enabled so we mask the 2119 * value to insure we only process bits we requested. 2120 */ 2121 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 2122 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 2123 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); 2124 #ifdef ATH_DEBUG_ALQ 2125 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate, 2126 ah->ah_syncstate); 2127 #endif /* ATH_DEBUG_ALQ */ 2128 #ifdef ATH_KTR_INTR_DEBUG 2129 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, 2130 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 2131 ah->ah_intrstate[0], 2132 ah->ah_intrstate[1], 2133 ah->ah_intrstate[2], 2134 ah->ah_intrstate[3], 2135 ah->ah_intrstate[6]); 2136 #endif 2137 2138 /* Squirrel away SYNC interrupt debugging */ 2139 if (ah->ah_syncstate != 0) { 2140 int i; 2141 for (i = 0; i < 32; i++) 2142 if (ah->ah_syncstate & (i << i)) 2143 sc->sc_intr_stats.sync_intr[i]++; 2144 } 2145 2146 status &= sc->sc_imask; /* discard unasked for bits */ 2147 2148 /* Short-circuit un-handled interrupts */ 2149 if (status == 0x0) { 2150 ATH_PCU_UNLOCK(sc); 2151 2152 ATH_LOCK(sc); 2153 ath_power_restore_power_state(sc); 2154 ATH_UNLOCK(sc); 2155 2156 return; 2157 } 2158 2159 /* 2160 * Take a note that we're inside the interrupt handler, so 2161 * the reset routines know to wait. 2162 */ 2163 sc->sc_intr_cnt++; 2164 ATH_PCU_UNLOCK(sc); 2165 2166 /* 2167 * Handle the interrupt. We won't run concurrent with the reset 2168 * or channel change routines as they'll wait for sc_intr_cnt 2169 * to be 0 before continuing. 2170 */ 2171 if (status & HAL_INT_FATAL) { 2172 sc->sc_stats.ast_hardware++; 2173 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 2174 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 2175 } else { 2176 if (status & HAL_INT_SWBA) { 2177 /* 2178 * Software beacon alert--time to send a beacon. 2179 * Handle beacon transmission directly; deferring 2180 * this is too slow to meet timing constraints 2181 * under load. 2182 */ 2183 #ifdef IEEE80211_SUPPORT_TDMA 2184 if (sc->sc_tdma) { 2185 if (sc->sc_tdmaswba == 0) { 2186 struct ieee80211com *ic = &sc->sc_ic; 2187 struct ieee80211vap *vap = 2188 TAILQ_FIRST(&ic->ic_vaps); 2189 ath_tdma_beacon_send(sc, vap); 2190 sc->sc_tdmaswba = 2191 vap->iv_tdma->tdma_bintval; 2192 } else 2193 sc->sc_tdmaswba--; 2194 } else 2195 #endif 2196 { 2197 ath_beacon_proc(sc, 0); 2198 #ifdef IEEE80211_SUPPORT_SUPERG 2199 /* 2200 * Schedule the rx taskq in case there's no 2201 * traffic so any frames held on the staging 2202 * queue are aged and potentially flushed. 2203 */ 2204 sc->sc_rx.recv_sched(sc, 1); 2205 #endif 2206 } 2207 } 2208 if (status & HAL_INT_RXEOL) { 2209 int imask; 2210 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); 2211 if (! sc->sc_isedma) { 2212 ATH_PCU_LOCK(sc); 2213 /* 2214 * NB: the hardware should re-read the link when 2215 * RXE bit is written, but it doesn't work at 2216 * least on older hardware revs. 2217 */ 2218 sc->sc_stats.ast_rxeol++; 2219 /* 2220 * Disable RXEOL/RXORN - prevent an interrupt 2221 * storm until the PCU logic can be reset. 2222 * In case the interface is reset some other 2223 * way before "sc_kickpcu" is called, don't 2224 * modify sc_imask - that way if it is reset 2225 * by a call to ath_reset() somehow, the 2226 * interrupt mask will be correctly reprogrammed. 2227 */ 2228 imask = sc->sc_imask; 2229 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 2230 ath_hal_intrset(ah, imask); 2231 /* 2232 * Only blank sc_rxlink if we've not yet kicked 2233 * the PCU. 2234 * 2235 * This isn't entirely correct - the correct solution 2236 * would be to have a PCU lock and engage that for 2237 * the duration of the PCU fiddling; which would include 2238 * running the RX process. Otherwise we could end up 2239 * messing up the RX descriptor chain and making the 2240 * RX desc list much shorter. 2241 */ 2242 if (! sc->sc_kickpcu) 2243 sc->sc_rxlink = NULL; 2244 sc->sc_kickpcu = 1; 2245 ATH_PCU_UNLOCK(sc); 2246 } 2247 /* 2248 * Enqueue an RX proc to handle whatever 2249 * is in the RX queue. 2250 * This will then kick the PCU if required. 2251 */ 2252 sc->sc_rx.recv_sched(sc, 1); 2253 } 2254 if (status & HAL_INT_TXURN) { 2255 sc->sc_stats.ast_txurn++; 2256 /* bump tx trigger level */ 2257 ath_hal_updatetxtriglevel(ah, AH_TRUE); 2258 } 2259 /* 2260 * Handle both the legacy and RX EDMA interrupt bits. 2261 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 2262 */ 2263 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 2264 sc->sc_stats.ast_rx_intr++; 2265 sc->sc_rx.recv_sched(sc, 1); 2266 } 2267 if (status & HAL_INT_TX) { 2268 sc->sc_stats.ast_tx_intr++; 2269 /* 2270 * Grab all the currently set bits in the HAL txq bitmap 2271 * and blank them. This is the only place we should be 2272 * doing this. 2273 */ 2274 if (! sc->sc_isedma) { 2275 ATH_PCU_LOCK(sc); 2276 txqs = 0xffffffff; 2277 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 2278 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, 2279 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", 2280 txqs, 2281 sc->sc_txq_active, 2282 sc->sc_txq_active | txqs); 2283 sc->sc_txq_active |= txqs; 2284 ATH_PCU_UNLOCK(sc); 2285 } 2286 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 2287 } 2288 if (status & HAL_INT_BMISS) { 2289 sc->sc_stats.ast_bmiss++; 2290 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 2291 } 2292 if (status & HAL_INT_GTT) 2293 sc->sc_stats.ast_tx_timeout++; 2294 if (status & HAL_INT_CST) 2295 sc->sc_stats.ast_tx_cst++; 2296 if (status & HAL_INT_MIB) { 2297 sc->sc_stats.ast_mib++; 2298 ATH_PCU_LOCK(sc); 2299 /* 2300 * Disable interrupts until we service the MIB 2301 * interrupt; otherwise it will continue to fire. 2302 */ 2303 ath_hal_intrset(ah, 0); 2304 /* 2305 * Let the hal handle the event. We assume it will 2306 * clear whatever condition caused the interrupt. 2307 */ 2308 ath_hal_mibevent(ah, &sc->sc_halstats); 2309 /* 2310 * Don't reset the interrupt if we've just 2311 * kicked the PCU, or we may get a nested 2312 * RXEOL before the rxproc has had a chance 2313 * to run. 2314 */ 2315 if (sc->sc_kickpcu == 0) 2316 ath_hal_intrset(ah, sc->sc_imask); 2317 ATH_PCU_UNLOCK(sc); 2318 } 2319 if (status & HAL_INT_RXORN) { 2320 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 2321 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); 2322 sc->sc_stats.ast_rxorn++; 2323 } 2324 if (status & HAL_INT_TSFOOR) { 2325 device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__); 2326 sc->sc_syncbeacon = 1; 2327 } 2328 } 2329 ATH_PCU_LOCK(sc); 2330 sc->sc_intr_cnt--; 2331 ATH_PCU_UNLOCK(sc); 2332 2333 ATH_LOCK(sc); 2334 ath_power_restore_power_state(sc); 2335 ATH_UNLOCK(sc); 2336 } 2337 2338 static void 2339 ath_fatal_proc(void *arg, int pending) 2340 { 2341 struct ath_softc *sc = arg; 2342 u_int32_t *state; 2343 u_int32_t len; 2344 void *sp; 2345 2346 if (sc->sc_invalid) 2347 return; 2348 2349 device_printf(sc->sc_dev, "hardware error; resetting\n"); 2350 /* 2351 * Fatal errors are unrecoverable. Typically these 2352 * are caused by DMA errors. Collect h/w state from 2353 * the hal so we can diagnose what's going on. 2354 */ 2355 #if defined(__DragonFly__) 2356 wlan_serialize_enter(); 2357 #endif 2358 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 2359 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 2360 state = sp; 2361 device_printf(sc->sc_dev, 2362 "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 2363 state[0], state[1] , state[2], state[3], 2364 state[4], state[5]); 2365 } 2366 ath_reset(sc, ATH_RESET_NOLOSS); 2367 #if defined(__DragonFly__) 2368 wlan_serialize_exit(); 2369 #endif 2370 } 2371 2372 static void 2373 ath_bmiss_vap(struct ieee80211vap *vap) 2374 { 2375 struct ath_softc *sc = vap->iv_ic->ic_softc; 2376 2377 /* 2378 * Workaround phantom bmiss interrupts by sanity-checking 2379 * the time of our last rx'd frame. If it is within the 2380 * beacon miss interval then ignore the interrupt. If it's 2381 * truly a bmiss we'll get another interrupt soon and that'll 2382 * be dispatched up for processing. Note this applies only 2383 * for h/w beacon miss events. 2384 */ 2385 2386 /* 2387 * XXX TODO: Just read the TSF during the interrupt path; 2388 * that way we don't have to wake up again just to read it 2389 * again. 2390 */ 2391 ATH_LOCK(sc); 2392 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2393 ATH_UNLOCK(sc); 2394 2395 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 2396 u_int64_t lastrx = sc->sc_lastrx; 2397 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 2398 /* XXX should take a locked ref to iv_bss */ 2399 u_int bmisstimeout = 2400 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 2401 2402 DPRINTF(sc, ATH_DEBUG_BEACON, 2403 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 2404 __func__, (unsigned long long) tsf, 2405 (unsigned long long)(tsf - lastrx), 2406 (unsigned long long) lastrx, bmisstimeout); 2407 2408 if (tsf - lastrx <= bmisstimeout) { 2409 sc->sc_stats.ast_bmiss_phantom++; 2410 2411 ATH_LOCK(sc); 2412 ath_power_restore_power_state(sc); 2413 ATH_UNLOCK(sc); 2414 2415 return; 2416 } 2417 } 2418 2419 /* 2420 * There's no need to keep the hardware awake during the call 2421 * to av_bmiss(). 2422 */ 2423 ATH_LOCK(sc); 2424 ath_power_restore_power_state(sc); 2425 ATH_UNLOCK(sc); 2426 2427 /* 2428 * Attempt to force a beacon resync. 2429 */ 2430 sc->sc_syncbeacon = 1; 2431 2432 ATH_VAP(vap)->av_bmiss(vap); 2433 } 2434 2435 /* XXX this needs a force wakeup! */ 2436 int 2437 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 2438 { 2439 uint32_t rsize; 2440 void *sp; 2441 2442 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 2443 return 0; 2444 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 2445 *hangs = *(uint32_t *)sp; 2446 return 1; 2447 } 2448 2449 static void 2450 ath_bmiss_proc(void *arg, int pending) 2451 { 2452 struct ath_softc *sc = arg; 2453 uint32_t hangs; 2454 2455 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 2456 2457 ATH_LOCK(sc); 2458 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2459 ATH_UNLOCK(sc); 2460 2461 ath_beacon_miss(sc); 2462 2463 /* 2464 * Do a reset upon any becaon miss event. 2465 * 2466 * It may be a non-recognised RX clear hang which needs a reset 2467 * to clear. 2468 */ 2469 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 2470 ath_reset(sc, ATH_RESET_NOLOSS); 2471 device_printf(sc->sc_dev, 2472 "bb hang detected (0x%x), resetting\n", hangs); 2473 } else { 2474 ath_reset(sc, ATH_RESET_NOLOSS); 2475 ieee80211_beacon_miss(&sc->sc_ic); 2476 } 2477 2478 /* Force a beacon resync, in case they've drifted */ 2479 sc->sc_syncbeacon = 1; 2480 2481 ATH_LOCK(sc); 2482 ath_power_restore_power_state(sc); 2483 ATH_UNLOCK(sc); 2484 } 2485 2486 /* 2487 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 2488 * calcs together with WME. If necessary disable the crypto 2489 * hardware and mark the 802.11 state so keys will be setup 2490 * with the MIC work done in software. 2491 */ 2492 static void 2493 ath_settkipmic(struct ath_softc *sc) 2494 { 2495 struct ieee80211com *ic = &sc->sc_ic; 2496 2497 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 2498 if (ic->ic_flags & IEEE80211_F_WME) { 2499 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 2500 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 2501 } else { 2502 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 2503 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 2504 } 2505 } 2506 } 2507 2508 static int 2509 ath_init(struct ath_softc *sc) 2510 { 2511 struct ieee80211com *ic = &sc->sc_ic; 2512 struct ath_hal *ah = sc->sc_ah; 2513 HAL_STATUS status; 2514 2515 ATH_LOCK_ASSERT(sc); 2516 2517 /* 2518 * Force the sleep state awake. 2519 */ 2520 ath_power_setselfgen(sc, HAL_PM_AWAKE); 2521 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2522 ath_power_setpower(sc, HAL_PM_AWAKE); 2523 2524 /* 2525 * Stop anything previously setup. This is safe 2526 * whether this is the first time through or not. 2527 */ 2528 ath_stop(sc); 2529 2530 /* 2531 * The basic interface to setting the hardware in a good 2532 * state is ``reset''. On return the hardware is known to 2533 * be powered up and with interrupts disabled. This must 2534 * be followed by initialization of the appropriate bits 2535 * and then setup of the interrupt mask. 2536 */ 2537 ath_settkipmic(sc); 2538 ath_update_chainmasks(sc, ic->ic_curchan); 2539 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2540 sc->sc_cur_rxchainmask); 2541 2542 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, 2543 HAL_RESET_NORMAL, &status)) { 2544 device_printf(sc->sc_dev, 2545 "unable to reset hardware; hal status %u\n", status); 2546 return (ENODEV); 2547 } 2548 2549 ATH_RX_LOCK(sc); 2550 sc->sc_rx_stopped = 1; 2551 sc->sc_rx_resetted = 1; 2552 ATH_RX_UNLOCK(sc); 2553 2554 ath_chan_change(sc, ic->ic_curchan); 2555 2556 /* Let DFS at it in case it's a DFS channel */ 2557 ath_dfs_radar_enable(sc, ic->ic_curchan); 2558 2559 /* Let spectral at in case spectral is enabled */ 2560 ath_spectral_enable(sc, ic->ic_curchan); 2561 2562 /* 2563 * Let bluetooth coexistence at in case it's needed for this channel 2564 */ 2565 ath_btcoex_enable(sc, ic->ic_curchan); 2566 2567 /* 2568 * If we're doing TDMA, enforce the TXOP limitation for chips that 2569 * support it. 2570 */ 2571 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2572 ath_hal_setenforcetxop(sc->sc_ah, 1); 2573 else 2574 ath_hal_setenforcetxop(sc->sc_ah, 0); 2575 2576 /* 2577 * Likewise this is set during reset so update 2578 * state cached in the driver. 2579 */ 2580 sc->sc_diversity = ath_hal_getdiversity(ah); 2581 sc->sc_lastlongcal = ticks; 2582 sc->sc_resetcal = 1; 2583 sc->sc_lastcalreset = 0; 2584 sc->sc_lastani = ticks; 2585 sc->sc_lastshortcal = ticks; 2586 sc->sc_doresetcal = AH_FALSE; 2587 /* 2588 * Beacon timers were cleared here; give ath_newstate() 2589 * a hint that the beacon timers should be poked when 2590 * things transition to the RUN state. 2591 */ 2592 sc->sc_beacons = 0; 2593 2594 /* 2595 * Setup the hardware after reset: the key cache 2596 * is filled as needed and the receive engine is 2597 * set going. Frame transmit is handled entirely 2598 * in the frame output path; there's nothing to do 2599 * here except setup the interrupt mask. 2600 */ 2601 if (ath_startrecv(sc) != 0) { 2602 device_printf(sc->sc_dev, "unable to start recv logic\n"); 2603 ath_power_restore_power_state(sc); 2604 return (ENODEV); 2605 } 2606 2607 /* 2608 * Enable interrupts. 2609 */ 2610 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 2611 | HAL_INT_RXORN | HAL_INT_TXURN 2612 | HAL_INT_FATAL | HAL_INT_GLOBAL; 2613 2614 /* 2615 * Enable RX EDMA bits. Note these overlap with 2616 * HAL_INT_RX and HAL_INT_RXDESC respectively. 2617 */ 2618 if (sc->sc_isedma) 2619 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 2620 2621 /* 2622 * If we're an EDMA NIC, we don't care about RXEOL. 2623 * Writing a new descriptor in will simply restart 2624 * RX DMA. 2625 */ 2626 if (! sc->sc_isedma) 2627 sc->sc_imask |= HAL_INT_RXEOL; 2628 2629 /* 2630 * Enable MIB interrupts when there are hardware phy counters. 2631 * Note we only do this (at the moment) for station mode. 2632 */ 2633 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 2634 sc->sc_imask |= HAL_INT_MIB; 2635 2636 /* 2637 * XXX add capability for this. 2638 * 2639 * If we're in STA mode (and maybe IBSS?) then register for 2640 * TSFOOR interrupts. 2641 */ 2642 if (ic->ic_opmode == IEEE80211_M_STA) 2643 sc->sc_imask |= HAL_INT_TSFOOR; 2644 2645 /* Enable global TX timeout and carrier sense timeout if available */ 2646 if (ath_hal_gtxto_supported(ah)) 2647 sc->sc_imask |= HAL_INT_GTT; 2648 2649 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 2650 __func__, sc->sc_imask); 2651 2652 sc->sc_running = 1; 2653 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 2654 ath_hal_intrset(ah, sc->sc_imask); 2655 2656 ath_power_restore_power_state(sc); 2657 2658 return (0); 2659 } 2660 2661 static void 2662 ath_stop(struct ath_softc *sc) 2663 { 2664 struct ath_hal *ah = sc->sc_ah; 2665 2666 ATH_LOCK_ASSERT(sc); 2667 2668 /* 2669 * Wake the hardware up before fiddling with it. 2670 */ 2671 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2672 2673 if (sc->sc_running) { 2674 /* 2675 * Shutdown the hardware and driver: 2676 * reset 802.11 state machine 2677 * turn off timers 2678 * disable interrupts 2679 * turn off the radio 2680 * clear transmit machinery 2681 * clear receive machinery 2682 * drain and release tx queues 2683 * reclaim beacon resources 2684 * power down hardware 2685 * 2686 * Note that some of this work is not possible if the 2687 * hardware is gone (invalid). 2688 */ 2689 #ifdef ATH_TX99_DIAG 2690 if (sc->sc_tx99 != NULL) 2691 sc->sc_tx99->stop(sc->sc_tx99); 2692 #endif 2693 #if defined(__DragonFly__) 2694 callout_stop_sync(&sc->sc_wd_ch); 2695 #else 2696 callout_stop(&sc->sc_wd_ch); 2697 #endif 2698 sc->sc_wd_timer = 0; 2699 /* ifp->if_flags &= ~IFF_RUNNING; */ 2700 sc->sc_running = 0; 2701 if (!sc->sc_invalid) { 2702 if (sc->sc_softled) { 2703 #if defined(__DragonFly__) 2704 callout_stop_sync(&sc->sc_ledtimer); 2705 #else 2706 callout_stop(&sc->sc_ledtimer); 2707 #endif 2708 ath_hal_gpioset(ah, sc->sc_ledpin, 2709 !sc->sc_ledon); 2710 sc->sc_blinking = 0; 2711 } 2712 ath_hal_intrset(ah, 0); 2713 } 2714 /* XXX we should stop RX regardless of whether it's valid */ 2715 if (!sc->sc_invalid) { 2716 ath_stoprecv(sc, 1); 2717 ath_hal_phydisable(ah); 2718 } else 2719 sc->sc_rxlink = NULL; 2720 ath_draintxq(sc, ATH_RESET_DEFAULT); 2721 ath_beacon_free(sc); /* XXX not needed */ 2722 } 2723 2724 /* And now, restore the current power state */ 2725 ath_power_restore_power_state(sc); 2726 } 2727 2728 /* 2729 * Wait until all pending TX/RX has completed. 2730 * 2731 * This waits until all existing transmit, receive and interrupts 2732 * have completed. It's assumed that the caller has first 2733 * grabbed the reset lock so it doesn't try to do overlapping 2734 * chip resets. 2735 */ 2736 #define MAX_TXRX_ITERATIONS 100 2737 static void 2738 ath_txrx_stop_locked(struct ath_softc *sc) 2739 { 2740 int i = MAX_TXRX_ITERATIONS; 2741 2742 ATH_UNLOCK_ASSERT(sc); 2743 ATH_PCU_LOCK_ASSERT(sc); 2744 2745 /* 2746 * Sleep until all the pending operations have completed. 2747 * 2748 * The caller must ensure that reset has been incremented 2749 * or the pending operations may continue being queued. 2750 */ 2751 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 2752 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 2753 if (i <= 0) 2754 break; 2755 #if defined(__DragonFly__) 2756 if (wlan_is_serialized()) { 2757 wlan_serialize_exit(); 2758 lksleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 2759 msecs_to_ticks(10)); 2760 wlan_serialize_enter(); 2761 } else { 2762 lksleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 2763 msecs_to_ticks(10)); 2764 } 2765 #else 2766 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 2767 msecs_to_ticks(10)); 2768 #endif 2769 i--; 2770 } 2771 2772 if (i <= 0) 2773 device_printf(sc->sc_dev, 2774 "%s: didn't finish after %d iterations\n", 2775 __func__, MAX_TXRX_ITERATIONS); 2776 } 2777 #undef MAX_TXRX_ITERATIONS 2778 2779 #if 0 2780 static void 2781 ath_txrx_stop(struct ath_softc *sc) 2782 { 2783 ATH_UNLOCK_ASSERT(sc); 2784 ATH_PCU_UNLOCK_ASSERT(sc); 2785 2786 ATH_PCU_LOCK(sc); 2787 ath_txrx_stop_locked(sc); 2788 ATH_PCU_UNLOCK(sc); 2789 } 2790 #endif 2791 2792 static void 2793 ath_txrx_start(struct ath_softc *sc) 2794 { 2795 2796 taskqueue_unblock(sc->sc_tq); 2797 } 2798 2799 /* 2800 * Grab the reset lock, and wait around until no one else 2801 * is trying to do anything with it. 2802 * 2803 * This is totally horrible but we can't hold this lock for 2804 * long enough to do TX/RX or we end up with net80211/ip stack 2805 * LORs and eventual deadlock. 2806 * 2807 * "dowait" signals whether to spin, waiting for the reset 2808 * lock count to reach 0. This should (for now) only be used 2809 * during the reset path, as the rest of the code may not 2810 * be locking-reentrant enough to behave correctly. 2811 * 2812 * Another, cleaner way should be found to serialise all of 2813 * these operations. 2814 */ 2815 #define MAX_RESET_ITERATIONS 25 2816 static int 2817 ath_reset_grablock(struct ath_softc *sc, int dowait) 2818 { 2819 int w = 0; 2820 int i = MAX_RESET_ITERATIONS; 2821 2822 ATH_PCU_LOCK_ASSERT(sc); 2823 do { 2824 if (sc->sc_inreset_cnt == 0) { 2825 w = 1; 2826 break; 2827 } 2828 if (dowait == 0) { 2829 w = 0; 2830 break; 2831 } 2832 ATH_PCU_UNLOCK(sc); 2833 /* 2834 * 1 tick is likely not enough time for long calibrations 2835 * to complete. So we should wait quite a while. 2836 */ 2837 #if defined(__DragonFly__) 2838 tsleep(&sc->sc_inreset_cnt, 0, 2839 "ath_reset_grablock", (hz + 99) / 100); 2840 #else 2841 pause("ath_reset_grablock", msecs_to_ticks(100)); 2842 #endif 2843 i--; 2844 ATH_PCU_LOCK(sc); 2845 } while (i > 0); 2846 2847 /* 2848 * We always increment the refcounter, regardless 2849 * of whether we succeeded to get it in an exclusive 2850 * way. 2851 */ 2852 sc->sc_inreset_cnt++; 2853 2854 if (i <= 0) 2855 device_printf(sc->sc_dev, 2856 "%s: didn't finish after %d iterations\n", 2857 __func__, MAX_RESET_ITERATIONS); 2858 2859 if (w == 0) 2860 device_printf(sc->sc_dev, 2861 "%s: warning, recursive reset path!\n", 2862 __func__); 2863 2864 return w; 2865 } 2866 #undef MAX_RESET_ITERATIONS 2867 2868 /* 2869 * Reset the hardware w/o losing operational state. This is 2870 * basically a more efficient way of doing ath_stop, ath_init, 2871 * followed by state transitions to the current 802.11 2872 * operational state. Used to recover from various errors and 2873 * to reset or reload hardware state. 2874 */ 2875 int 2876 ath_reset(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 2877 { 2878 struct ieee80211com *ic = &sc->sc_ic; 2879 struct ath_hal *ah = sc->sc_ah; 2880 HAL_STATUS status; 2881 int i; 2882 2883 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2884 2885 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2886 ATH_PCU_UNLOCK_ASSERT(sc); 2887 ATH_UNLOCK_ASSERT(sc); 2888 2889 /* Try to (stop any further TX/RX from occurring */ 2890 taskqueue_block(sc->sc_tq); 2891 2892 /* 2893 * Wake the hardware up. 2894 */ 2895 ATH_LOCK(sc); 2896 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2897 ATH_UNLOCK(sc); 2898 2899 ATH_PCU_LOCK(sc); 2900 2901 /* 2902 * Grab the reset lock before TX/RX is stopped. 2903 * 2904 * This is needed to ensure that when the TX/RX actually does finish, 2905 * no further TX/RX/reset runs in parallel with this. 2906 */ 2907 if (ath_reset_grablock(sc, 1) == 0) { 2908 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2909 __func__); 2910 } 2911 2912 /* disable interrupts */ 2913 ath_hal_intrset(ah, 0); 2914 2915 /* 2916 * Now, ensure that any in progress TX/RX completes before we 2917 * continue. 2918 */ 2919 ath_txrx_stop_locked(sc); 2920 2921 ATH_PCU_UNLOCK(sc); 2922 2923 /* 2924 * Regardless of whether we're doing a no-loss flush or 2925 * not, stop the PCU and handle what's in the RX queue. 2926 * That way frames aren't dropped which shouldn't be. 2927 */ 2928 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2929 ath_rx_flush(sc); 2930 2931 /* 2932 * Should now wait for pending TX/RX to complete 2933 * and block future ones from occurring. This needs to be 2934 * done before the TX queue is drained. 2935 */ 2936 ath_draintxq(sc, reset_type); /* stop xmit side */ 2937 2938 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2939 /* NB: indicate channel change so we do a full reset */ 2940 ath_update_chainmasks(sc, ic->ic_curchan); 2941 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2942 sc->sc_cur_rxchainmask); 2943 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, 2944 HAL_RESET_NORMAL, &status)) 2945 device_printf(sc->sc_dev, 2946 "%s: unable to reset hardware; hal status %u\n", 2947 __func__, status); 2948 sc->sc_diversity = ath_hal_getdiversity(ah); 2949 2950 ATH_RX_LOCK(sc); 2951 sc->sc_rx_stopped = 1; 2952 sc->sc_rx_resetted = 1; 2953 ATH_RX_UNLOCK(sc); 2954 2955 /* Let DFS at it in case it's a DFS channel */ 2956 ath_dfs_radar_enable(sc, ic->ic_curchan); 2957 2958 /* Let spectral at in case spectral is enabled */ 2959 ath_spectral_enable(sc, ic->ic_curchan); 2960 2961 /* 2962 * Let bluetooth coexistence at in case it's needed for this channel 2963 */ 2964 ath_btcoex_enable(sc, ic->ic_curchan); 2965 2966 /* 2967 * If we're doing TDMA, enforce the TXOP limitation for chips that 2968 * support it. 2969 */ 2970 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2971 ath_hal_setenforcetxop(sc->sc_ah, 1); 2972 else 2973 ath_hal_setenforcetxop(sc->sc_ah, 0); 2974 2975 if (ath_startrecv(sc) != 0) /* restart recv */ 2976 device_printf(sc->sc_dev, 2977 "%s: unable to start recv logic\n", __func__); 2978 /* 2979 * We may be doing a reset in response to an ioctl 2980 * that changes the channel so update any state that 2981 * might change as a result. 2982 */ 2983 ath_chan_change(sc, ic->ic_curchan); 2984 if (sc->sc_beacons) { /* restart beacons */ 2985 #ifdef IEEE80211_SUPPORT_TDMA 2986 if (sc->sc_tdma) 2987 ath_tdma_config(sc, NULL); 2988 else 2989 #endif 2990 ath_beacon_config(sc, NULL); 2991 } 2992 2993 /* 2994 * Release the reset lock and re-enable interrupts here. 2995 * If an interrupt was being processed in ath_intr(), 2996 * it would disable interrupts at this point. So we have 2997 * to atomically enable interrupts and decrement the 2998 * reset counter - this way ath_intr() doesn't end up 2999 * disabling interrupts without a corresponding enable 3000 * in the rest or channel change path. 3001 * 3002 * Grab the TX reference in case we need to transmit. 3003 * That way a parallel transmit doesn't. 3004 */ 3005 ATH_PCU_LOCK(sc); 3006 sc->sc_inreset_cnt--; 3007 sc->sc_txstart_cnt++; 3008 /* XXX only do this if sc_inreset_cnt == 0? */ 3009 ath_hal_intrset(ah, sc->sc_imask); 3010 ATH_PCU_UNLOCK(sc); 3011 3012 /* 3013 * TX and RX can be started here. If it were started with 3014 * sc_inreset_cnt > 0, the TX and RX path would abort. 3015 * Thus if this is a nested call through the reset or 3016 * channel change code, TX completion will occur but 3017 * RX completion and ath_start / ath_tx_start will not 3018 * run. 3019 */ 3020 3021 /* Restart TX/RX as needed */ 3022 ath_txrx_start(sc); 3023 3024 /* XXX TODO: we need to hold the tx refcount here! */ 3025 3026 /* Restart TX completion and pending TX */ 3027 if (reset_type == ATH_RESET_NOLOSS) { 3028 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 3029 if (ATH_TXQ_SETUP(sc, i)) { 3030 ATH_TXQ_LOCK(&sc->sc_txq[i]); 3031 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 3032 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 3033 3034 ATH_TX_LOCK(sc); 3035 ath_txq_sched(sc, &sc->sc_txq[i]); 3036 ATH_TX_UNLOCK(sc); 3037 } 3038 } 3039 } 3040 3041 ATH_LOCK(sc); 3042 ath_power_restore_power_state(sc); 3043 ATH_UNLOCK(sc); 3044 3045 ATH_PCU_LOCK(sc); 3046 sc->sc_txstart_cnt--; 3047 ATH_PCU_UNLOCK(sc); 3048 3049 /* Handle any frames in the TX queue */ 3050 /* 3051 * XXX should this be done by the caller, rather than 3052 * ath_reset() ? 3053 */ 3054 ath_tx_kick(sc); /* restart xmit */ 3055 return 0; 3056 } 3057 3058 static int 3059 ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 3060 { 3061 struct ieee80211com *ic = vap->iv_ic; 3062 struct ath_softc *sc = ic->ic_softc; 3063 struct ath_hal *ah = sc->sc_ah; 3064 3065 switch (cmd) { 3066 case IEEE80211_IOC_TXPOWER: 3067 /* 3068 * If per-packet TPC is enabled, then we have nothing 3069 * to do; otherwise we need to force the global limit. 3070 * All this can happen directly; no need to reset. 3071 */ 3072 if (!ath_hal_gettpc(ah)) 3073 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 3074 return 0; 3075 } 3076 /* XXX? Full or NOLOSS? */ 3077 return ath_reset(sc, ATH_RESET_FULL); 3078 } 3079 3080 struct ath_buf * 3081 _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 3082 { 3083 struct ath_buf *bf; 3084 3085 ATH_TXBUF_LOCK_ASSERT(sc); 3086 3087 if (btype == ATH_BUFTYPE_MGMT) 3088 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 3089 else 3090 bf = TAILQ_FIRST(&sc->sc_txbuf); 3091 3092 if (bf == NULL) { 3093 sc->sc_stats.ast_tx_getnobuf++; 3094 } else { 3095 if (bf->bf_flags & ATH_BUF_BUSY) { 3096 sc->sc_stats.ast_tx_getbusybuf++; 3097 bf = NULL; 3098 } 3099 } 3100 3101 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 3102 if (btype == ATH_BUFTYPE_MGMT) 3103 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 3104 else { 3105 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 3106 sc->sc_txbuf_cnt--; 3107 3108 /* 3109 * This shuldn't happen; however just to be 3110 * safe print a warning and fudge the txbuf 3111 * count. 3112 */ 3113 if (sc->sc_txbuf_cnt < 0) { 3114 device_printf(sc->sc_dev, 3115 "%s: sc_txbuf_cnt < 0?\n", 3116 __func__); 3117 sc->sc_txbuf_cnt = 0; 3118 } 3119 } 3120 } else 3121 bf = NULL; 3122 3123 if (bf == NULL) { 3124 /* XXX should check which list, mgmt or otherwise */ 3125 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 3126 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 3127 "out of xmit buffers" : "xmit buffer busy"); 3128 return NULL; 3129 } 3130 3131 /* XXX TODO: should do this at buffer list initialisation */ 3132 /* XXX (then, ensure the buffer has the right flag set) */ 3133 bf->bf_flags = 0; 3134 if (btype == ATH_BUFTYPE_MGMT) 3135 bf->bf_flags |= ATH_BUF_MGMT; 3136 else 3137 bf->bf_flags &= (~ATH_BUF_MGMT); 3138 3139 /* Valid bf here; clear some basic fields */ 3140 bf->bf_next = NULL; /* XXX just to be sure */ 3141 bf->bf_last = NULL; /* XXX again, just to be sure */ 3142 bf->bf_comp = NULL; /* XXX again, just to be sure */ 3143 bzero(&bf->bf_state, sizeof(bf->bf_state)); 3144 3145 /* 3146 * Track the descriptor ID only if doing EDMA 3147 */ 3148 if (sc->sc_isedma) { 3149 bf->bf_descid = sc->sc_txbuf_descid; 3150 sc->sc_txbuf_descid++; 3151 } 3152 3153 return bf; 3154 } 3155 3156 /* 3157 * When retrying a software frame, buffers marked ATH_BUF_BUSY 3158 * can't be thrown back on the queue as they could still be 3159 * in use by the hardware. 3160 * 3161 * This duplicates the buffer, or returns NULL. 3162 * 3163 * The descriptor is also copied but the link pointers and 3164 * the DMA segments aren't copied; this frame should thus 3165 * be again passed through the descriptor setup/chain routines 3166 * so the link is correct. 3167 * 3168 * The caller must free the buffer using ath_freebuf(). 3169 */ 3170 struct ath_buf * 3171 ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf) 3172 { 3173 struct ath_buf *tbf; 3174 3175 tbf = ath_getbuf(sc, 3176 (bf->bf_flags & ATH_BUF_MGMT) ? 3177 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 3178 if (tbf == NULL) 3179 return NULL; /* XXX failure? Why? */ 3180 3181 /* Copy basics */ 3182 tbf->bf_next = NULL; 3183 tbf->bf_nseg = bf->bf_nseg; 3184 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE; 3185 tbf->bf_status = bf->bf_status; 3186 tbf->bf_m = bf->bf_m; 3187 tbf->bf_node = bf->bf_node; 3188 KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__)); 3189 /* will be setup by the chain/setup function */ 3190 tbf->bf_lastds = NULL; 3191 /* for now, last == self */ 3192 tbf->bf_last = tbf; 3193 tbf->bf_comp = bf->bf_comp; 3194 3195 /* NOTE: DMA segments will be setup by the setup/chain functions */ 3196 3197 /* The caller has to re-init the descriptor + links */ 3198 3199 /* 3200 * Free the DMA mapping here, before we NULL the mbuf. 3201 * We must only call bus_dmamap_unload() once per mbuf chain 3202 * or behaviour is undefined. 3203 */ 3204 if (bf->bf_m != NULL) { 3205 /* 3206 * XXX is this POSTWRITE call required? 3207 */ 3208 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3209 BUS_DMASYNC_POSTWRITE); 3210 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3211 } 3212 3213 bf->bf_m = NULL; 3214 bf->bf_node = NULL; 3215 3216 /* Copy state */ 3217 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 3218 3219 return tbf; 3220 } 3221 3222 struct ath_buf * 3223 ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 3224 { 3225 struct ath_buf *bf; 3226 3227 ATH_TXBUF_LOCK(sc); 3228 bf = _ath_getbuf_locked(sc, btype); 3229 /* 3230 * If a mgmt buffer was requested but we're out of those, 3231 * try requesting a normal one. 3232 */ 3233 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 3234 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 3235 ATH_TXBUF_UNLOCK(sc); 3236 if (bf == NULL) { 3237 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 3238 sc->sc_stats.ast_tx_qstop++; 3239 } 3240 return bf; 3241 } 3242 3243 /* 3244 * Transmit a single frame. 3245 * 3246 * net80211 will free the node reference if the transmit 3247 * fails, so don't free the node reference here. 3248 */ 3249 static int 3250 ath_transmit(struct ieee80211com *ic, struct mbuf *m) 3251 { 3252 struct ath_softc *sc = ic->ic_softc; 3253 struct ieee80211_node *ni; 3254 struct mbuf *next; 3255 struct ath_buf *bf; 3256 ath_bufhead frags; 3257 int retval = 0; 3258 3259 /* 3260 * Tell the reset path that we're currently transmitting. 3261 */ 3262 ATH_PCU_LOCK(sc); 3263 if (sc->sc_inreset_cnt > 0) { 3264 DPRINTF(sc, ATH_DEBUG_XMIT, 3265 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 3266 ATH_PCU_UNLOCK(sc); 3267 sc->sc_stats.ast_tx_qstop++; 3268 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish"); 3269 /* mbuf left intact on error */ 3270 return (ENOBUFS); /* XXX should be EINVAL or? */ 3271 } 3272 sc->sc_txstart_cnt++; 3273 ATH_PCU_UNLOCK(sc); 3274 3275 /* Wake the hardware up already */ 3276 ATH_LOCK(sc); 3277 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3278 ATH_UNLOCK(sc); 3279 3280 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start"); 3281 /* 3282 * Grab the TX lock - it's ok to do this here; we haven't 3283 * yet started transmitting. 3284 */ 3285 ATH_TX_LOCK(sc); 3286 3287 /* 3288 * Node reference, if there's one. 3289 */ 3290 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 3291 3292 /* 3293 * Enforce how deep a node queue can get. 3294 * 3295 * XXX it would be nicer if we kept an mbuf queue per 3296 * node and only whacked them into ath_bufs when we 3297 * are ready to schedule some traffic from them. 3298 * .. that may come later. 3299 * 3300 * XXX we should also track the per-node hardware queue 3301 * depth so it is easy to limit the _SUM_ of the swq and 3302 * hwq frames. Since we only schedule two HWQ frames 3303 * at a time, this should be OK for now. 3304 */ 3305 if ((!(m->m_flags & M_EAPOL)) && 3306 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) { 3307 sc->sc_stats.ast_tx_nodeq_overflow++; 3308 retval = ENOBUFS; 3309 goto finish; 3310 } 3311 3312 /* 3313 * Check how many TX buffers are available. 3314 * 3315 * If this is for non-EAPOL traffic, just leave some 3316 * space free in order for buffer cloning and raw 3317 * frame transmission to occur. 3318 * 3319 * If it's for EAPOL traffic, ignore this for now. 3320 * Management traffic will be sent via the raw transmit 3321 * method which bypasses this check. 3322 * 3323 * This is needed to ensure that EAPOL frames during 3324 * (re) keying have a chance to go out. 3325 * 3326 * See kern/138379 for more information. 3327 */ 3328 if ((!(m->m_flags & M_EAPOL)) && 3329 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) { 3330 sc->sc_stats.ast_tx_nobuf++; 3331 retval = ENOBUFS; 3332 goto finish; 3333 } 3334 3335 /* 3336 * Grab a TX buffer and associated resources. 3337 * 3338 * If it's an EAPOL frame, allocate a MGMT ath_buf. 3339 * That way even with temporary buffer exhaustion due to 3340 * the data path doesn't leave us without the ability 3341 * to transmit management frames. 3342 * 3343 * Otherwise allocate a normal buffer. 3344 */ 3345 if (m->m_flags & M_EAPOL) 3346 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 3347 else 3348 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 3349 3350 if (bf == NULL) { 3351 /* 3352 * If we failed to allocate a buffer, fail. 3353 * 3354 * We shouldn't fail normally, due to the check 3355 * above. 3356 */ 3357 sc->sc_stats.ast_tx_nobuf++; 3358 retval = ENOBUFS; 3359 goto finish; 3360 } 3361 3362 /* 3363 * At this point we have a buffer; so we need to free it 3364 * if we hit any error conditions. 3365 */ 3366 3367 /* 3368 * Check for fragmentation. If this frame 3369 * has been broken up verify we have enough 3370 * buffers to send all the fragments so all 3371 * go out or none... 3372 */ 3373 TAILQ_INIT(&frags); 3374 if ((m->m_flags & M_FRAG) && 3375 !ath_txfrag_setup(sc, &frags, m, ni)) { 3376 DPRINTF(sc, ATH_DEBUG_XMIT, 3377 "%s: out of txfrag buffers\n", __func__); 3378 sc->sc_stats.ast_tx_nofrag++; 3379 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3380 /* 3381 * XXXGL: is mbuf valid after ath_txfrag_setup? If yes, 3382 * we shouldn't free it but return back. 3383 */ 3384 ieee80211_free_mbuf(m); 3385 m = NULL; 3386 goto bad; 3387 } 3388 3389 /* 3390 * At this point if we have any TX fragments, then we will 3391 * have bumped the node reference once for each of those. 3392 */ 3393 3394 /* 3395 * XXX Is there anything actually _enforcing_ that the 3396 * fragments are being transmitted in one hit, rather than 3397 * being interleaved with other transmissions on that 3398 * hardware queue? 3399 * 3400 * The ATH TX output lock is the only thing serialising this 3401 * right now. 3402 */ 3403 3404 /* 3405 * Calculate the "next fragment" length field in ath_buf 3406 * in order to let the transmit path know enough about 3407 * what to next write to the hardware. 3408 */ 3409 if (m->m_flags & M_FRAG) { 3410 struct ath_buf *fbf = bf; 3411 struct ath_buf *n_fbf = NULL; 3412 struct mbuf *fm = m->m_nextpkt; 3413 3414 /* 3415 * We need to walk the list of fragments and set 3416 * the next size to the following buffer. 3417 * However, the first buffer isn't in the frag 3418 * list, so we have to do some gymnastics here. 3419 */ 3420 TAILQ_FOREACH(n_fbf, &frags, bf_list) { 3421 fbf->bf_nextfraglen = fm->m_pkthdr.len; 3422 fbf = n_fbf; 3423 fm = fm->m_nextpkt; 3424 } 3425 } 3426 3427 nextfrag: 3428 /* 3429 * Pass the frame to the h/w for transmission. 3430 * Fragmented frames have each frag chained together 3431 * with m_nextpkt. We know there are sufficient ath_buf's 3432 * to send all the frags because of work done by 3433 * ath_txfrag_setup. We leave m_nextpkt set while 3434 * calling ath_tx_start so it can use it to extend the 3435 * the tx duration to cover the subsequent frag and 3436 * so it can reclaim all the mbufs in case of an error; 3437 * ath_tx_start clears m_nextpkt once it commits to 3438 * handing the frame to the hardware. 3439 * 3440 * Note: if this fails, then the mbufs are freed but 3441 * not the node reference. 3442 * 3443 * So, we now have to free the node reference ourselves here 3444 * and return OK up to the stack. 3445 */ 3446 next = m->m_nextpkt; 3447 if (ath_tx_start(sc, ni, bf, m)) { 3448 bad: 3449 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3450 reclaim: 3451 bf->bf_m = NULL; 3452 bf->bf_node = NULL; 3453 ATH_TXBUF_LOCK(sc); 3454 ath_returnbuf_head(sc, bf); 3455 /* 3456 * Free the rest of the node references and 3457 * buffers for the fragment list. 3458 */ 3459 ath_txfrag_cleanup(sc, &frags, ni); 3460 ATH_TXBUF_UNLOCK(sc); 3461 3462 /* 3463 * XXX: And free the node/return OK; ath_tx_start() may have 3464 * modified the buffer. We currently have no way to 3465 * signify that the mbuf was freed but there was an error. 3466 */ 3467 ieee80211_free_node(ni); 3468 retval = 0; 3469 goto finish; 3470 } 3471 3472 /* 3473 * Check here if the node is in power save state. 3474 */ 3475 ath_tx_update_tim(sc, ni, 1); 3476 3477 if (next != NULL) { 3478 /* 3479 * Beware of state changing between frags. 3480 * XXX check sta power-save state? 3481 */ 3482 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 3483 DPRINTF(sc, ATH_DEBUG_XMIT, 3484 "%s: flush fragmented packet, state %s\n", 3485 __func__, 3486 ieee80211_state_name[ni->ni_vap->iv_state]); 3487 /* XXX dmamap */ 3488 ieee80211_free_mbuf(next); 3489 goto reclaim; 3490 } 3491 m = next; 3492 bf = TAILQ_FIRST(&frags); 3493 KASSERT(bf != NULL, ("no buf for txfrag")); 3494 TAILQ_REMOVE(&frags, bf, bf_list); 3495 goto nextfrag; 3496 } 3497 3498 /* 3499 * Bump watchdog timer. 3500 */ 3501 sc->sc_wd_timer = 5; 3502 3503 finish: 3504 ATH_TX_UNLOCK(sc); 3505 3506 /* 3507 * Finished transmitting! 3508 */ 3509 ATH_PCU_LOCK(sc); 3510 sc->sc_txstart_cnt--; 3511 ATH_PCU_UNLOCK(sc); 3512 3513 /* Sleep the hardware if required */ 3514 ATH_LOCK(sc); 3515 ath_power_restore_power_state(sc); 3516 ATH_UNLOCK(sc); 3517 3518 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished"); 3519 3520 return (retval); 3521 } 3522 3523 static int 3524 ath_media_change(struct ifnet *ifp) 3525 { 3526 int error = ieee80211_media_change(ifp); 3527 /* NB: only the fixed rate can change and that doesn't need a reset */ 3528 return (error == ENETRESET ? 0 : error); 3529 } 3530 3531 /* 3532 * Block/unblock tx+rx processing while a key change is done. 3533 * We assume the caller serializes key management operations 3534 * so we only need to worry about synchronization with other 3535 * uses that originate in the driver. 3536 */ 3537 static void 3538 ath_key_update_begin(struct ieee80211vap *vap) 3539 { 3540 struct ath_softc *sc = vap->iv_ic->ic_softc; 3541 3542 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3543 taskqueue_block(sc->sc_tq); 3544 } 3545 3546 static void 3547 ath_key_update_end(struct ieee80211vap *vap) 3548 { 3549 struct ath_softc *sc = vap->iv_ic->ic_softc; 3550 3551 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3552 taskqueue_unblock(sc->sc_tq); 3553 } 3554 3555 static void 3556 ath_update_promisc(struct ieee80211com *ic) 3557 { 3558 struct ath_softc *sc = ic->ic_softc; 3559 u_int32_t rfilt; 3560 3561 /* configure rx filter */ 3562 ATH_LOCK(sc); 3563 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3564 rfilt = ath_calcrxfilter(sc); 3565 ath_hal_setrxfilter(sc->sc_ah, rfilt); 3566 ath_power_restore_power_state(sc); 3567 ATH_UNLOCK(sc); 3568 3569 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 3570 } 3571 3572 /* 3573 * Driver-internal mcast update call. 3574 * 3575 * Assumes the hardware is already awake. 3576 */ 3577 static void 3578 ath_update_mcast_hw(struct ath_softc *sc) 3579 { 3580 struct ieee80211com *ic = &sc->sc_ic; 3581 u_int32_t mfilt[2]; 3582 3583 /* calculate and install multicast filter */ 3584 if (ic->ic_allmulti == 0) { 3585 struct ieee80211vap *vap; 3586 struct ifnet *ifp; 3587 struct ifmultiaddr *ifma; 3588 3589 /* 3590 * Merge multicast addresses to form the hardware filter. 3591 */ 3592 mfilt[0] = mfilt[1] = 0; 3593 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 3594 ifp = vap->iv_ifp; 3595 #if defined(__DragonFly__) 3596 /* nothing */ 3597 #else 3598 if_maddr_rlock(ifp); 3599 #endif 3600 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3601 caddr_t dl; 3602 u_int32_t val; 3603 u_int8_t pos; 3604 3605 /* calculate XOR of eight 6bit values */ 3606 dl = LLADDR((struct sockaddr_dl *) 3607 ifma->ifma_addr); 3608 val = le32dec(dl + 0); 3609 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ 3610 val; 3611 val = le32dec(dl + 3); 3612 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ 3613 val; 3614 pos &= 0x3f; 3615 mfilt[pos / 32] |= (1 << (pos % 32)); 3616 } 3617 #if defined(__DragonFly__) 3618 /* nothing */ 3619 #else 3620 if_maddr_runlock(ifp); 3621 #endif 3622 } 3623 } else 3624 mfilt[0] = mfilt[1] = ~0; 3625 3626 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 3627 3628 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 3629 __func__, mfilt[0], mfilt[1]); 3630 } 3631 3632 /* 3633 * Called from the net80211 layer - force the hardware 3634 * awake before operating. 3635 */ 3636 static void 3637 ath_update_mcast(struct ieee80211com *ic) 3638 { 3639 struct ath_softc *sc = ic->ic_softc; 3640 3641 ATH_LOCK(sc); 3642 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3643 ATH_UNLOCK(sc); 3644 3645 ath_update_mcast_hw(sc); 3646 3647 ATH_LOCK(sc); 3648 ath_power_restore_power_state(sc); 3649 ATH_UNLOCK(sc); 3650 } 3651 3652 void 3653 ath_mode_init(struct ath_softc *sc) 3654 { 3655 struct ieee80211com *ic = &sc->sc_ic; 3656 struct ath_hal *ah = sc->sc_ah; 3657 u_int32_t rfilt; 3658 3659 /* configure rx filter */ 3660 rfilt = ath_calcrxfilter(sc); 3661 ath_hal_setrxfilter(ah, rfilt); 3662 3663 /* configure operational mode */ 3664 ath_hal_setopmode(ah); 3665 3666 /* handle any link-level address change */ 3667 ath_hal_setmac(ah, ic->ic_macaddr); 3668 3669 /* calculate and install multicast filter */ 3670 ath_update_mcast_hw(sc); 3671 } 3672 3673 /* 3674 * Set the slot time based on the current setting. 3675 */ 3676 void 3677 ath_setslottime(struct ath_softc *sc) 3678 { 3679 struct ieee80211com *ic = &sc->sc_ic; 3680 struct ath_hal *ah = sc->sc_ah; 3681 u_int usec; 3682 3683 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 3684 usec = 13; 3685 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 3686 usec = 21; 3687 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 3688 /* honor short/long slot time only in 11g */ 3689 /* XXX shouldn't honor on pure g or turbo g channel */ 3690 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3691 usec = HAL_SLOT_TIME_9; 3692 else 3693 usec = HAL_SLOT_TIME_20; 3694 } else 3695 usec = HAL_SLOT_TIME_9; 3696 3697 DPRINTF(sc, ATH_DEBUG_RESET, 3698 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 3699 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 3700 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 3701 3702 /* Wake up the hardware first before updating the slot time */ 3703 ATH_LOCK(sc); 3704 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3705 ath_hal_setslottime(ah, usec); 3706 ath_power_restore_power_state(sc); 3707 sc->sc_updateslot = OK; 3708 ATH_UNLOCK(sc); 3709 } 3710 3711 /* 3712 * Callback from the 802.11 layer to update the 3713 * slot time based on the current setting. 3714 */ 3715 static void 3716 ath_updateslot(struct ieee80211com *ic) 3717 { 3718 struct ath_softc *sc = ic->ic_softc; 3719 3720 /* 3721 * When not coordinating the BSS, change the hardware 3722 * immediately. For other operation we defer the change 3723 * until beacon updates have propagated to the stations. 3724 * 3725 * XXX sc_updateslot isn't changed behind a lock? 3726 */ 3727 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3728 ic->ic_opmode == IEEE80211_M_MBSS) 3729 sc->sc_updateslot = UPDATE; 3730 else 3731 ath_setslottime(sc); 3732 } 3733 3734 /* 3735 * Append the contents of src to dst; both queues 3736 * are assumed to be locked. 3737 */ 3738 void 3739 ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 3740 { 3741 3742 ATH_TXQ_LOCK_ASSERT(src); 3743 ATH_TXQ_LOCK_ASSERT(dst); 3744 3745 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 3746 dst->axq_link = src->axq_link; 3747 src->axq_link = NULL; 3748 dst->axq_depth += src->axq_depth; 3749 dst->axq_aggr_depth += src->axq_aggr_depth; 3750 src->axq_depth = 0; 3751 src->axq_aggr_depth = 0; 3752 } 3753 3754 /* 3755 * Reset the hardware, with no loss. 3756 * 3757 * This can't be used for a general case reset. 3758 */ 3759 static void 3760 ath_reset_proc(void *arg, int pending) 3761 { 3762 struct ath_softc *sc = arg; 3763 3764 #if 0 3765 device_printf(sc->sc_dev, "%s: resetting\n", __func__); 3766 #endif 3767 #if defined(__DragonFly__) 3768 wlan_serialize_enter(); 3769 #endif 3770 ath_reset(sc, ATH_RESET_NOLOSS); 3771 #if defined(__DragonFly__) 3772 wlan_serialize_exit(); 3773 #endif 3774 } 3775 3776 /* 3777 * Reset the hardware after detecting beacons have stopped. 3778 */ 3779 static void 3780 ath_bstuck_proc(void *arg, int pending) 3781 { 3782 struct ath_softc *sc = arg; 3783 uint32_t hangs = 0; 3784 3785 #if defined(__DragonFly__) 3786 wlan_serialize_enter(); 3787 #endif 3788 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 3789 device_printf(sc->sc_dev, "bb hang detected (0x%x)\n", hangs); 3790 3791 #ifdef ATH_DEBUG_ALQ 3792 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON)) 3793 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL); 3794 #endif 3795 3796 device_printf(sc->sc_dev, "stuck beacon; resetting (bmiss count %u)\n", 3797 sc->sc_bmisscount); 3798 sc->sc_stats.ast_bstuck++; 3799 /* 3800 * This assumes that there's no simultaneous channel mode change 3801 */ 3802 ath_reset(sc, ATH_RESET_NOLOSS); 3803 #if defined(__DragonFly__) 3804 wlan_serialize_exit(); 3805 #endif 3806 } 3807 3808 static int 3809 ath_desc_alloc(struct ath_softc *sc) 3810 { 3811 int error; 3812 3813 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3814 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER); 3815 if (error != 0) { 3816 return error; 3817 } 3818 sc->sc_txbuf_cnt = ath_txbuf; 3819 3820 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 3821 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, 3822 ATH_TXDESC); 3823 if (error != 0) { 3824 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3825 return error; 3826 } 3827 3828 /* 3829 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 3830 * flag doesn't have to be set in ath_getbuf_locked(). 3831 */ 3832 3833 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3834 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); 3835 if (error != 0) { 3836 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3837 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3838 &sc->sc_txbuf_mgmt); 3839 return error; 3840 } 3841 return 0; 3842 } 3843 3844 static void 3845 ath_desc_free(struct ath_softc *sc) 3846 { 3847 3848 if (sc->sc_bdma.dd_desc_len != 0) 3849 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3850 if (sc->sc_txdma.dd_desc_len != 0) 3851 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3852 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 3853 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3854 &sc->sc_txbuf_mgmt); 3855 } 3856 3857 static struct ieee80211_node * 3858 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3859 { 3860 struct ieee80211com *ic = vap->iv_ic; 3861 struct ath_softc *sc = ic->ic_softc; 3862 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3863 struct ath_node *an; 3864 3865 an = kmalloc(space, M_80211_NODE, M_INTWAIT | M_ZERO); 3866 if (an == NULL) { 3867 /* XXX stat+msg */ 3868 return NULL; 3869 } 3870 ath_rate_node_init(sc, an); 3871 3872 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3873 ksnprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3874 device_get_nameunit(sc->sc_dev), an); 3875 #if defined(__DragonFly__) 3876 lockinit(&an->an_mtx, an->an_name, 0, 0); 3877 #else 3878 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3879 #endif 3880 3881 /* XXX setup ath_tid */ 3882 ath_tx_tid_init(sc, an); 3883 3884 #if defined(__DragonFly__) 3885 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__, 3886 ath_hal_ether_sprintf(mac), an); 3887 #else 3888 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an); 3889 #endif 3890 return &an->an_node; 3891 } 3892 3893 static void 3894 ath_node_cleanup(struct ieee80211_node *ni) 3895 { 3896 struct ieee80211com *ic = ni->ni_ic; 3897 struct ath_softc *sc = ic->ic_softc; 3898 3899 #if defined(__DragonFly__) 3900 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__, 3901 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni)); 3902 #else 3903 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, 3904 ni->ni_macaddr, ":", ATH_NODE(ni)); 3905 #endif 3906 3907 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3908 ath_tx_node_flush(sc, ATH_NODE(ni)); 3909 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3910 sc->sc_node_cleanup(ni); 3911 } 3912 3913 static void 3914 ath_node_free(struct ieee80211_node *ni) 3915 { 3916 struct ieee80211com *ic = ni->ni_ic; 3917 struct ath_softc *sc = ic->ic_softc; 3918 3919 #if defined(__DragonFly__) 3920 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__, 3921 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni)); 3922 #else 3923 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, 3924 ni->ni_macaddr, ":", ATH_NODE(ni)); 3925 #endif 3926 #if defined(__DragonFly__) 3927 lockuninit(&ATH_NODE(ni)->an_mtx); 3928 #else 3929 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3930 #endif 3931 sc->sc_node_free(ni); 3932 } 3933 3934 static void 3935 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3936 { 3937 struct ieee80211com *ic = ni->ni_ic; 3938 struct ath_softc *sc = ic->ic_softc; 3939 struct ath_hal *ah = sc->sc_ah; 3940 3941 *rssi = ic->ic_node_getrssi(ni); 3942 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3943 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3944 else 3945 *noise = -95; /* nominally correct */ 3946 } 3947 3948 /* 3949 * Set the default antenna. 3950 */ 3951 void 3952 ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3953 { 3954 struct ath_hal *ah = sc->sc_ah; 3955 3956 /* XXX block beacon interrupts */ 3957 ath_hal_setdefantenna(ah, antenna); 3958 if (sc->sc_defant != antenna) 3959 sc->sc_stats.ast_ant_defswitch++; 3960 sc->sc_defant = antenna; 3961 sc->sc_rxotherant = 0; 3962 } 3963 3964 static void 3965 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3966 { 3967 txq->axq_qnum = qnum; 3968 txq->axq_ac = 0; 3969 txq->axq_depth = 0; 3970 txq->axq_aggr_depth = 0; 3971 txq->axq_intrcnt = 0; 3972 txq->axq_link = NULL; 3973 txq->axq_softc = sc; 3974 TAILQ_INIT(&txq->axq_q); 3975 TAILQ_INIT(&txq->axq_tidq); 3976 TAILQ_INIT(&txq->fifo.axq_q); 3977 ATH_TXQ_LOCK_INIT(sc, txq); 3978 } 3979 3980 /* 3981 * Setup a h/w transmit queue. 3982 */ 3983 static struct ath_txq * 3984 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3985 { 3986 struct ath_hal *ah = sc->sc_ah; 3987 HAL_TXQ_INFO qi; 3988 int qnum; 3989 3990 memset(&qi, 0, sizeof(qi)); 3991 qi.tqi_subtype = subtype; 3992 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3993 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3994 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3995 /* 3996 * Enable interrupts only for EOL and DESC conditions. 3997 * We mark tx descriptors to receive a DESC interrupt 3998 * when a tx queue gets deep; otherwise waiting for the 3999 * EOL to reap descriptors. Note that this is done to 4000 * reduce interrupt load and this only defers reaping 4001 * descriptors, never transmitting frames. Aside from 4002 * reducing interrupts this also permits more concurrency. 4003 * The only potential downside is if the tx queue backs 4004 * up in which case the top half of the kernel may backup 4005 * due to a lack of tx descriptors. 4006 */ 4007 if (sc->sc_isedma) 4008 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 4009 HAL_TXQ_TXOKINT_ENABLE; 4010 else 4011 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 4012 HAL_TXQ_TXDESCINT_ENABLE; 4013 4014 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4015 if (qnum == -1) { 4016 /* 4017 * NB: don't print a message, this happens 4018 * normally on parts with too few tx queues 4019 */ 4020 return NULL; 4021 } 4022 if (qnum >= nitems(sc->sc_txq)) { 4023 device_printf(sc->sc_dev, 4024 "hal qnum %u out of range, max %zu!\n", 4025 qnum, nitems(sc->sc_txq)); 4026 ath_hal_releasetxqueue(ah, qnum); 4027 return NULL; 4028 } 4029 if (!ATH_TXQ_SETUP(sc, qnum)) { 4030 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4031 sc->sc_txqsetup |= 1<<qnum; 4032 } 4033 return &sc->sc_txq[qnum]; 4034 } 4035 4036 /* 4037 * Setup a hardware data transmit queue for the specified 4038 * access control. The hal may not support all requested 4039 * queues in which case it will return a reference to a 4040 * previously setup queue. We record the mapping from ac's 4041 * to h/w queues for use by ath_tx_start and also track 4042 * the set of h/w queues being used to optimize work in the 4043 * transmit interrupt handler and related routines. 4044 */ 4045 static int 4046 ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4047 { 4048 struct ath_txq *txq; 4049 4050 if (ac >= nitems(sc->sc_ac2q)) { 4051 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4052 ac, nitems(sc->sc_ac2q)); 4053 return 0; 4054 } 4055 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4056 if (txq != NULL) { 4057 txq->axq_ac = ac; 4058 sc->sc_ac2q[ac] = txq; 4059 return 1; 4060 } else 4061 return 0; 4062 } 4063 4064 /* 4065 * Update WME parameters for a transmit queue. 4066 */ 4067 static int 4068 ath_txq_update(struct ath_softc *sc, int ac) 4069 { 4070 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4071 struct ieee80211com *ic = &sc->sc_ic; 4072 struct ath_txq *txq = sc->sc_ac2q[ac]; 4073 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4074 struct ath_hal *ah = sc->sc_ah; 4075 HAL_TXQ_INFO qi; 4076 4077 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4078 #ifdef IEEE80211_SUPPORT_TDMA 4079 if (sc->sc_tdma) { 4080 /* 4081 * AIFS is zero so there's no pre-transmit wait. The 4082 * burst time defines the slot duration and is configured 4083 * through net80211. The QCU is setup to not do post-xmit 4084 * back off, lockout all lower-priority QCU's, and fire 4085 * off the DMA beacon alert timer which is setup based 4086 * on the slot configuration. 4087 */ 4088 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4089 | HAL_TXQ_TXERRINT_ENABLE 4090 | HAL_TXQ_TXURNINT_ENABLE 4091 | HAL_TXQ_TXEOLINT_ENABLE 4092 | HAL_TXQ_DBA_GATED 4093 | HAL_TXQ_BACKOFF_DISABLE 4094 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 4095 ; 4096 qi.tqi_aifs = 0; 4097 /* XXX +dbaprep? */ 4098 qi.tqi_readyTime = sc->sc_tdmaslotlen; 4099 qi.tqi_burstTime = qi.tqi_readyTime; 4100 } else { 4101 #endif 4102 /* 4103 * XXX shouldn't this just use the default flags 4104 * used in the previous queue setup? 4105 */ 4106 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4107 | HAL_TXQ_TXERRINT_ENABLE 4108 | HAL_TXQ_TXDESCINT_ENABLE 4109 | HAL_TXQ_TXURNINT_ENABLE 4110 | HAL_TXQ_TXEOLINT_ENABLE 4111 ; 4112 qi.tqi_aifs = wmep->wmep_aifsn; 4113 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4114 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 4115 qi.tqi_readyTime = 0; 4116 qi.tqi_burstTime = IEEE80211_TXOP_TO_US(wmep->wmep_txopLimit); 4117 #ifdef IEEE80211_SUPPORT_TDMA 4118 } 4119 #endif 4120 4121 DPRINTF(sc, ATH_DEBUG_RESET, 4122 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 4123 __func__, txq->axq_qnum, qi.tqi_qflags, 4124 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4125 4126 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4127 device_printf(sc->sc_dev, "unable to update hardware queue " 4128 "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); 4129 return 0; 4130 } else { 4131 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4132 return 1; 4133 } 4134 #undef ATH_EXPONENT_TO_VALUE 4135 } 4136 4137 /* 4138 * Callback from the 802.11 layer to update WME parameters. 4139 */ 4140 int 4141 ath_wme_update(struct ieee80211com *ic) 4142 { 4143 struct ath_softc *sc = ic->ic_softc; 4144 4145 return !ath_txq_update(sc, WME_AC_BE) || 4146 !ath_txq_update(sc, WME_AC_BK) || 4147 !ath_txq_update(sc, WME_AC_VI) || 4148 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4149 } 4150 4151 /* 4152 * Reclaim resources for a setup queue. 4153 */ 4154 static void 4155 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4156 { 4157 4158 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4159 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4160 ATH_TXQ_LOCK_DESTROY(txq); 4161 } 4162 4163 /* 4164 * Reclaim all tx queue resources. 4165 */ 4166 static void 4167 ath_tx_cleanup(struct ath_softc *sc) 4168 { 4169 int i; 4170 4171 ATH_TXBUF_LOCK_DESTROY(sc); 4172 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4173 if (ATH_TXQ_SETUP(sc, i)) 4174 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4175 } 4176 4177 /* 4178 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4179 * using the current rates in sc_rixmap. 4180 */ 4181 int 4182 ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4183 { 4184 int rix = sc->sc_rixmap[rate]; 4185 /* NB: return lowest rix for invalid rate */ 4186 return (rix == 0xff ? 0 : rix); 4187 } 4188 4189 static void 4190 ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 4191 struct ath_buf *bf) 4192 { 4193 struct ieee80211_node *ni = bf->bf_node; 4194 struct ieee80211com *ic = &sc->sc_ic; 4195 int sr, lr, pri; 4196 4197 if (ts->ts_status == 0) { 4198 u_int8_t txant = ts->ts_antenna; 4199 sc->sc_stats.ast_ant_tx[txant]++; 4200 sc->sc_ant_tx[txant]++; 4201 if (ts->ts_finaltsi != 0) 4202 sc->sc_stats.ast_tx_altrate++; 4203 pri = M_WME_GETAC(bf->bf_m); 4204 if (pri >= WME_AC_VO) 4205 ic->ic_wme.wme_hipri_traffic++; 4206 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 4207 ni->ni_inact = ni->ni_inact_reload; 4208 } else { 4209 if (ts->ts_status & HAL_TXERR_XRETRY) 4210 sc->sc_stats.ast_tx_xretries++; 4211 if (ts->ts_status & HAL_TXERR_FIFO) 4212 sc->sc_stats.ast_tx_fifoerr++; 4213 if (ts->ts_status & HAL_TXERR_FILT) 4214 sc->sc_stats.ast_tx_filtered++; 4215 if (ts->ts_status & HAL_TXERR_XTXOP) 4216 sc->sc_stats.ast_tx_xtxop++; 4217 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4218 sc->sc_stats.ast_tx_timerexpired++; 4219 4220 if (bf->bf_m->m_flags & M_FF) 4221 sc->sc_stats.ast_ff_txerr++; 4222 } 4223 /* XXX when is this valid? */ 4224 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR) 4225 sc->sc_stats.ast_tx_desccfgerr++; 4226 /* 4227 * This can be valid for successful frame transmission! 4228 * If there's a TX FIFO underrun during aggregate transmission, 4229 * the MAC will pad the rest of the aggregate with delimiters. 4230 * If a BA is returned, the frame is marked as "OK" and it's up 4231 * to the TX completion code to notice which frames weren't 4232 * successfully transmitted. 4233 */ 4234 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN) 4235 sc->sc_stats.ast_tx_data_underrun++; 4236 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN) 4237 sc->sc_stats.ast_tx_delim_underrun++; 4238 4239 sr = ts->ts_shortretry; 4240 lr = ts->ts_longretry; 4241 sc->sc_stats.ast_tx_shortretry += sr; 4242 sc->sc_stats.ast_tx_longretry += lr; 4243 4244 } 4245 4246 /* 4247 * The default completion. If fail is 1, this means 4248 * "please don't retry the frame, and just return -1 status 4249 * to the net80211 stack. 4250 */ 4251 void 4252 ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4253 { 4254 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4255 int st; 4256 4257 if (fail == 1) 4258 st = -1; 4259 else 4260 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 4261 ts->ts_status : HAL_TXERR_XRETRY; 4262 4263 #if 0 4264 if (bf->bf_state.bfs_dobaw) 4265 device_printf(sc->sc_dev, 4266 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 4267 __func__, 4268 bf, 4269 SEQNO(bf->bf_state.bfs_seqno)); 4270 #endif 4271 if (bf->bf_next != NULL) 4272 device_printf(sc->sc_dev, 4273 "%s: bf %p: seqno %d: bf_next not NULL!\n", 4274 __func__, 4275 bf, 4276 SEQNO(bf->bf_state.bfs_seqno)); 4277 4278 /* 4279 * Check if the node software queue is empty; if so 4280 * then clear the TIM. 4281 * 4282 * This needs to be done before the buffer is freed as 4283 * otherwise the node reference will have been released 4284 * and the node may not actually exist any longer. 4285 * 4286 * XXX I don't like this belonging here, but it's cleaner 4287 * to do it here right now then all the other places 4288 * where ath_tx_default_comp() is called. 4289 * 4290 * XXX TODO: during drain, ensure that the callback is 4291 * being called so we get a chance to update the TIM. 4292 */ 4293 if (bf->bf_node) { 4294 ATH_TX_LOCK(sc); 4295 ath_tx_update_tim(sc, bf->bf_node, 0); 4296 ATH_TX_UNLOCK(sc); 4297 } 4298 4299 /* 4300 * Do any tx complete callback. Note this must 4301 * be done before releasing the node reference. 4302 * This will free the mbuf, release the net80211 4303 * node and recycle the ath_buf. 4304 */ 4305 ath_tx_freebuf(sc, bf, st); 4306 } 4307 4308 /* 4309 * Update rate control with the given completion status. 4310 */ 4311 void 4312 ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4313 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4314 int nframes, int nbad) 4315 { 4316 struct ath_node *an; 4317 4318 /* Only for unicast frames */ 4319 if (ni == NULL) 4320 return; 4321 4322 an = ATH_NODE(ni); 4323 ATH_NODE_UNLOCK_ASSERT(an); 4324 4325 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4326 ATH_NODE_LOCK(an); 4327 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4328 ATH_NODE_UNLOCK(an); 4329 } 4330 } 4331 4332 /* 4333 * Process the completion of the given buffer. 4334 * 4335 * This calls the rate control update and then the buffer completion. 4336 * This will either free the buffer or requeue it. In any case, the 4337 * bf pointer should be treated as invalid after this function is called. 4338 */ 4339 void 4340 ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, 4341 struct ath_tx_status *ts, struct ath_buf *bf) 4342 { 4343 struct ieee80211_node *ni = bf->bf_node; 4344 4345 ATH_TX_UNLOCK_ASSERT(sc); 4346 ATH_TXQ_UNLOCK_ASSERT(txq); 4347 4348 /* If unicast frame, update general statistics */ 4349 if (ni != NULL) { 4350 /* update statistics */ 4351 ath_tx_update_stats(sc, ts, bf); 4352 } 4353 4354 /* 4355 * Call the completion handler. 4356 * The completion handler is responsible for 4357 * calling the rate control code. 4358 * 4359 * Frames with no completion handler get the 4360 * rate control code called here. 4361 */ 4362 if (bf->bf_comp == NULL) { 4363 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4364 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 4365 /* 4366 * XXX assume this isn't an aggregate 4367 * frame. 4368 */ 4369 ath_tx_update_ratectrl(sc, ni, 4370 bf->bf_state.bfs_rc, ts, 4371 bf->bf_state.bfs_pktlen, 1, 4372 (ts->ts_status == 0 ? 0 : 1)); 4373 } 4374 ath_tx_default_comp(sc, bf, 0); 4375 } else 4376 bf->bf_comp(sc, bf, 0); 4377 } 4378 4379 4380 4381 /* 4382 * Process completed xmit descriptors from the specified queue. 4383 * Kick the packet scheduler if needed. This can occur from this 4384 * particular task. 4385 */ 4386 static int 4387 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4388 { 4389 struct ath_hal *ah = sc->sc_ah; 4390 struct ath_buf *bf; 4391 struct ath_desc *ds; 4392 struct ath_tx_status *ts; 4393 struct ieee80211_node *ni; 4394 #ifdef IEEE80211_SUPPORT_SUPERG 4395 struct ieee80211com *ic = &sc->sc_ic; 4396 #endif /* IEEE80211_SUPPORT_SUPERG */ 4397 int nacked; 4398 HAL_STATUS status; 4399 4400 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4401 __func__, txq->axq_qnum, 4402 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4403 txq->axq_link); 4404 4405 ATH_KTR(sc, ATH_KTR_TXCOMP, 4, 4406 "ath_tx_processq: txq=%u head %p link %p depth %p", 4407 txq->axq_qnum, 4408 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4409 txq->axq_link, 4410 txq->axq_depth); 4411 4412 nacked = 0; 4413 for (;;) { 4414 ATH_TXQ_LOCK(txq); 4415 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4416 bf = TAILQ_FIRST(&txq->axq_q); 4417 if (bf == NULL) { 4418 ATH_TXQ_UNLOCK(txq); 4419 break; 4420 } 4421 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 4422 ts = &bf->bf_status.ds_txstat; 4423 4424 status = ath_hal_txprocdesc(ah, ds, ts); 4425 #ifdef ATH_DEBUG 4426 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4427 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4428 status == HAL_OK); 4429 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) 4430 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4431 status == HAL_OK); 4432 #endif 4433 #ifdef ATH_DEBUG_ALQ 4434 if (if_ath_alq_checkdebug(&sc->sc_alq, 4435 ATH_ALQ_EDMA_TXSTATUS)) { 4436 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 4437 sc->sc_tx_statuslen, 4438 (char *) ds); 4439 } 4440 #endif 4441 4442 if (status == HAL_EINPROGRESS) { 4443 ATH_KTR(sc, ATH_KTR_TXCOMP, 3, 4444 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", 4445 txq->axq_qnum, bf, ds); 4446 ATH_TXQ_UNLOCK(txq); 4447 break; 4448 } 4449 ATH_TXQ_REMOVE(txq, bf, bf_list); 4450 4451 /* 4452 * Sanity check. 4453 */ 4454 if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) { 4455 device_printf(sc->sc_dev, 4456 "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n", 4457 __func__, 4458 txq->axq_qnum, 4459 bf, 4460 bf->bf_state.bfs_tx_queue); 4461 } 4462 if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) { 4463 device_printf(sc->sc_dev, 4464 "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n", 4465 __func__, 4466 txq->axq_qnum, 4467 bf->bf_last, 4468 bf->bf_last->bf_state.bfs_tx_queue); 4469 } 4470 4471 #if 0 4472 if (txq->axq_depth > 0) { 4473 /* 4474 * More frames follow. Mark the buffer busy 4475 * so it's not re-used while the hardware may 4476 * still re-read the link field in the descriptor. 4477 * 4478 * Use the last buffer in an aggregate as that 4479 * is where the hardware may be - intermediate 4480 * descriptors won't be "busy". 4481 */ 4482 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4483 } else 4484 txq->axq_link = NULL; 4485 #else 4486 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4487 #endif 4488 if (bf->bf_state.bfs_aggr) 4489 txq->axq_aggr_depth--; 4490 4491 ni = bf->bf_node; 4492 4493 ATH_KTR(sc, ATH_KTR_TXCOMP, 5, 4494 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", 4495 txq->axq_qnum, bf, ds, ni, ts->ts_status); 4496 /* 4497 * If unicast frame was ack'd update RSSI, 4498 * including the last rx time used to 4499 * workaround phantom bmiss interrupts. 4500 */ 4501 if (ni != NULL && ts->ts_status == 0 && 4502 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 4503 nacked++; 4504 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4505 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4506 ts->ts_rssi); 4507 } 4508 ATH_TXQ_UNLOCK(txq); 4509 4510 /* 4511 * Update statistics and call completion 4512 */ 4513 ath_tx_process_buf_completion(sc, txq, ts, bf); 4514 4515 /* XXX at this point, bf and ni may be totally invalid */ 4516 } 4517 #ifdef IEEE80211_SUPPORT_SUPERG 4518 /* 4519 * Flush fast-frame staging queue when traffic slows. 4520 */ 4521 if (txq->axq_depth <= 1) 4522 ieee80211_ff_flush(ic, txq->axq_ac); 4523 #endif 4524 4525 /* Kick the software TXQ scheduler */ 4526 if (dosched) { 4527 ATH_TX_LOCK(sc); 4528 ath_txq_sched(sc, txq); 4529 ATH_TX_UNLOCK(sc); 4530 } 4531 4532 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4533 "ath_tx_processq: txq=%u: done", 4534 txq->axq_qnum); 4535 4536 return nacked; 4537 } 4538 4539 #define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4540 4541 /* 4542 * Deferred processing of transmit interrupt; special-cased 4543 * for a single hardware transmit queue (e.g. 5210 and 5211). 4544 */ 4545 static void 4546 ath_tx_proc_q0(void *arg, int npending) 4547 { 4548 struct ath_softc *sc = arg; 4549 uint32_t txqs; 4550 4551 ATH_PCU_LOCK(sc); 4552 sc->sc_txproc_cnt++; 4553 txqs = sc->sc_txq_active; 4554 sc->sc_txq_active &= ~txqs; 4555 ATH_PCU_UNLOCK(sc); 4556 4557 ATH_LOCK(sc); 4558 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4559 ATH_UNLOCK(sc); 4560 4561 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4562 "ath_tx_proc_q0: txqs=0x%08x", txqs); 4563 4564 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 4565 /* XXX why is lastrx updated in tx code? */ 4566 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4567 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4568 ath_tx_processq(sc, sc->sc_cabq, 1); 4569 sc->sc_wd_timer = 0; 4570 4571 if (sc->sc_softled) 4572 ath_led_event(sc, sc->sc_txrix); 4573 4574 ATH_PCU_LOCK(sc); 4575 sc->sc_txproc_cnt--; 4576 ATH_PCU_UNLOCK(sc); 4577 4578 ATH_LOCK(sc); 4579 ath_power_restore_power_state(sc); 4580 ATH_UNLOCK(sc); 4581 4582 ath_tx_kick(sc); 4583 } 4584 4585 /* 4586 * Deferred processing of transmit interrupt; special-cased 4587 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4588 */ 4589 static void 4590 ath_tx_proc_q0123(void *arg, int npending) 4591 { 4592 struct ath_softc *sc = arg; 4593 int nacked; 4594 uint32_t txqs; 4595 4596 ATH_PCU_LOCK(sc); 4597 sc->sc_txproc_cnt++; 4598 txqs = sc->sc_txq_active; 4599 sc->sc_txq_active &= ~txqs; 4600 ATH_PCU_UNLOCK(sc); 4601 4602 ATH_LOCK(sc); 4603 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4604 ATH_UNLOCK(sc); 4605 4606 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4607 "ath_tx_proc_q0123: txqs=0x%08x", txqs); 4608 4609 /* 4610 * Process each active queue. 4611 */ 4612 nacked = 0; 4613 if (TXQACTIVE(txqs, 0)) 4614 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 4615 if (TXQACTIVE(txqs, 1)) 4616 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 4617 if (TXQACTIVE(txqs, 2)) 4618 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 4619 if (TXQACTIVE(txqs, 3)) 4620 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 4621 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4622 ath_tx_processq(sc, sc->sc_cabq, 1); 4623 if (nacked) 4624 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4625 4626 sc->sc_wd_timer = 0; 4627 4628 if (sc->sc_softled) 4629 ath_led_event(sc, sc->sc_txrix); 4630 4631 ATH_PCU_LOCK(sc); 4632 sc->sc_txproc_cnt--; 4633 ATH_PCU_UNLOCK(sc); 4634 4635 ATH_LOCK(sc); 4636 ath_power_restore_power_state(sc); 4637 ATH_UNLOCK(sc); 4638 4639 ath_tx_kick(sc); 4640 } 4641 4642 /* 4643 * Deferred processing of transmit interrupt. 4644 */ 4645 static void 4646 ath_tx_proc(void *arg, int npending) 4647 { 4648 struct ath_softc *sc = arg; 4649 int i, nacked; 4650 uint32_t txqs; 4651 4652 ATH_PCU_LOCK(sc); 4653 sc->sc_txproc_cnt++; 4654 txqs = sc->sc_txq_active; 4655 sc->sc_txq_active &= ~txqs; 4656 ATH_PCU_UNLOCK(sc); 4657 4658 ATH_LOCK(sc); 4659 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4660 ATH_UNLOCK(sc); 4661 4662 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); 4663 4664 /* 4665 * Process each active queue. 4666 */ 4667 nacked = 0; 4668 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4669 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 4670 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 4671 if (nacked) 4672 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4673 4674 sc->sc_wd_timer = 0; 4675 4676 if (sc->sc_softled) 4677 ath_led_event(sc, sc->sc_txrix); 4678 4679 ATH_PCU_LOCK(sc); 4680 sc->sc_txproc_cnt--; 4681 ATH_PCU_UNLOCK(sc); 4682 4683 ATH_LOCK(sc); 4684 ath_power_restore_power_state(sc); 4685 ATH_UNLOCK(sc); 4686 4687 ath_tx_kick(sc); 4688 } 4689 #undef TXQACTIVE 4690 4691 /* 4692 * Deferred processing of TXQ rescheduling. 4693 */ 4694 static void 4695 ath_txq_sched_tasklet(void *arg, int npending) 4696 { 4697 struct ath_softc *sc = arg; 4698 int i; 4699 4700 /* XXX is skipping ok? */ 4701 ATH_PCU_LOCK(sc); 4702 #if 0 4703 if (sc->sc_inreset_cnt > 0) { 4704 device_printf(sc->sc_dev, 4705 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4706 ATH_PCU_UNLOCK(sc); 4707 return; 4708 } 4709 #endif 4710 sc->sc_txproc_cnt++; 4711 ATH_PCU_UNLOCK(sc); 4712 4713 ATH_LOCK(sc); 4714 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4715 ATH_UNLOCK(sc); 4716 4717 ATH_TX_LOCK(sc); 4718 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4719 if (ATH_TXQ_SETUP(sc, i)) { 4720 ath_txq_sched(sc, &sc->sc_txq[i]); 4721 } 4722 } 4723 ATH_TX_UNLOCK(sc); 4724 4725 ATH_LOCK(sc); 4726 ath_power_restore_power_state(sc); 4727 ATH_UNLOCK(sc); 4728 4729 ATH_PCU_LOCK(sc); 4730 sc->sc_txproc_cnt--; 4731 ATH_PCU_UNLOCK(sc); 4732 } 4733 4734 void 4735 ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 4736 { 4737 4738 ATH_TXBUF_LOCK_ASSERT(sc); 4739 4740 if (bf->bf_flags & ATH_BUF_MGMT) 4741 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 4742 else { 4743 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4744 sc->sc_txbuf_cnt++; 4745 if (sc->sc_txbuf_cnt > ath_txbuf) { 4746 device_printf(sc->sc_dev, 4747 "%s: sc_txbuf_cnt > %d?\n", 4748 __func__, 4749 ath_txbuf); 4750 sc->sc_txbuf_cnt = ath_txbuf; 4751 } 4752 } 4753 } 4754 4755 void 4756 ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 4757 { 4758 4759 ATH_TXBUF_LOCK_ASSERT(sc); 4760 4761 if (bf->bf_flags & ATH_BUF_MGMT) 4762 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 4763 else { 4764 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 4765 sc->sc_txbuf_cnt++; 4766 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 4767 device_printf(sc->sc_dev, 4768 "%s: sc_txbuf_cnt > %d?\n", 4769 __func__, 4770 ATH_TXBUF); 4771 sc->sc_txbuf_cnt = ATH_TXBUF; 4772 } 4773 } 4774 } 4775 4776 /* 4777 * Free the holding buffer if it exists 4778 */ 4779 void 4780 ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq) 4781 { 4782 ATH_TXBUF_UNLOCK_ASSERT(sc); 4783 ATH_TXQ_LOCK_ASSERT(txq); 4784 4785 if (txq->axq_holdingbf == NULL) 4786 return; 4787 4788 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY; 4789 4790 ATH_TXBUF_LOCK(sc); 4791 ath_returnbuf_tail(sc, txq->axq_holdingbf); 4792 ATH_TXBUF_UNLOCK(sc); 4793 4794 txq->axq_holdingbf = NULL; 4795 } 4796 4797 /* 4798 * Add this buffer to the holding queue, freeing the previous 4799 * one if it exists. 4800 */ 4801 static void 4802 ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf) 4803 { 4804 struct ath_txq *txq; 4805 4806 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4807 4808 ATH_TXBUF_UNLOCK_ASSERT(sc); 4809 ATH_TXQ_LOCK_ASSERT(txq); 4810 4811 /* XXX assert ATH_BUF_BUSY is set */ 4812 4813 /* XXX assert the tx queue is under the max number */ 4814 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) { 4815 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n", 4816 __func__, 4817 bf, 4818 bf->bf_state.bfs_tx_queue); 4819 bf->bf_flags &= ~ATH_BUF_BUSY; 4820 ath_returnbuf_tail(sc, bf); 4821 return; 4822 } 4823 ath_txq_freeholdingbuf(sc, txq); 4824 txq->axq_holdingbf = bf; 4825 } 4826 4827 /* 4828 * Return a buffer to the pool and update the 'busy' flag on the 4829 * previous 'tail' entry. 4830 * 4831 * This _must_ only be called when the buffer is involved in a completed 4832 * TX. The logic is that if it was part of an active TX, the previous 4833 * buffer on the list is now not involved in a halted TX DMA queue, waiting 4834 * for restart (eg for TDMA.) 4835 * 4836 * The caller must free the mbuf and recycle the node reference. 4837 * 4838 * XXX This method of handling busy / holding buffers is insanely stupid. 4839 * It requires bf_state.bfs_tx_queue to be correctly assigned. It would 4840 * be much nicer if buffers in the processq() methods would instead be 4841 * always completed there (pushed onto a txq or ath_bufhead) so we knew 4842 * exactly what hardware queue they came from in the first place. 4843 */ 4844 void 4845 ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 4846 { 4847 struct ath_txq *txq; 4848 4849 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4850 4851 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 4852 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 4853 4854 /* 4855 * If this buffer is busy, push it onto the holding queue. 4856 */ 4857 if (bf->bf_flags & ATH_BUF_BUSY) { 4858 ATH_TXQ_LOCK(txq); 4859 ath_txq_addholdingbuf(sc, bf); 4860 ATH_TXQ_UNLOCK(txq); 4861 return; 4862 } 4863 4864 /* 4865 * Not a busy buffer, so free normally 4866 */ 4867 ATH_TXBUF_LOCK(sc); 4868 ath_returnbuf_tail(sc, bf); 4869 ATH_TXBUF_UNLOCK(sc); 4870 } 4871 4872 /* 4873 * This is currently used by ath_tx_draintxq() and 4874 * ath_tx_tid_free_pkts(). 4875 * 4876 * It recycles a single ath_buf. 4877 */ 4878 void 4879 ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 4880 { 4881 struct ieee80211_node *ni = bf->bf_node; 4882 struct mbuf *m0 = bf->bf_m; 4883 4884 /* 4885 * Make sure that we only sync/unload if there's an mbuf. 4886 * If not (eg we cloned a buffer), the unload will have already 4887 * occurred. 4888 */ 4889 if (bf->bf_m != NULL) { 4890 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4891 BUS_DMASYNC_POSTWRITE); 4892 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4893 } 4894 4895 bf->bf_node = NULL; 4896 bf->bf_m = NULL; 4897 4898 /* Free the buffer, it's not needed any longer */ 4899 ath_freebuf(sc, bf); 4900 4901 /* Pass the buffer back to net80211 - completing it */ 4902 ieee80211_tx_complete(ni, m0, status); 4903 } 4904 4905 static struct ath_buf * 4906 ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq) 4907 { 4908 struct ath_buf *bf; 4909 4910 ATH_TXQ_LOCK_ASSERT(txq); 4911 4912 /* 4913 * Drain the FIFO queue first, then if it's 4914 * empty, move to the normal frame queue. 4915 */ 4916 bf = TAILQ_FIRST(&txq->fifo.axq_q); 4917 if (bf != NULL) { 4918 /* 4919 * Is it the last buffer in this set? 4920 * Decrement the FIFO counter. 4921 */ 4922 if (bf->bf_flags & ATH_BUF_FIFOEND) { 4923 if (txq->axq_fifo_depth == 0) { 4924 device_printf(sc->sc_dev, 4925 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n", 4926 __func__, 4927 txq->axq_qnum, 4928 txq->fifo.axq_depth); 4929 } else 4930 txq->axq_fifo_depth--; 4931 } 4932 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 4933 return (bf); 4934 } 4935 4936 /* 4937 * Debugging! 4938 */ 4939 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) { 4940 device_printf(sc->sc_dev, 4941 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n", 4942 __func__, 4943 txq->axq_qnum, 4944 txq->axq_fifo_depth, 4945 txq->fifo.axq_depth); 4946 } 4947 4948 /* 4949 * Now drain the pending queue. 4950 */ 4951 bf = TAILQ_FIRST(&txq->axq_q); 4952 if (bf == NULL) { 4953 txq->axq_link = NULL; 4954 return (NULL); 4955 } 4956 ATH_TXQ_REMOVE(txq, bf, bf_list); 4957 return (bf); 4958 } 4959 4960 void 4961 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 4962 { 4963 #ifdef ATH_DEBUG 4964 struct ath_hal *ah = sc->sc_ah; 4965 #endif 4966 struct ath_buf *bf; 4967 u_int ix; 4968 4969 /* 4970 * NB: this assumes output has been stopped and 4971 * we do not need to block ath_tx_proc 4972 */ 4973 for (ix = 0;; ix++) { 4974 ATH_TXQ_LOCK(txq); 4975 bf = ath_tx_draintxq_get_one(sc, txq); 4976 if (bf == NULL) { 4977 ATH_TXQ_UNLOCK(txq); 4978 break; 4979 } 4980 if (bf->bf_state.bfs_aggr) 4981 txq->axq_aggr_depth--; 4982 #ifdef ATH_DEBUG 4983 if (sc->sc_debug & ATH_DEBUG_RESET) { 4984 struct ieee80211com *ic = &sc->sc_ic; 4985 int status = 0; 4986 4987 /* 4988 * EDMA operation has a TX completion FIFO 4989 * separate from the TX descriptor, so this 4990 * method of checking the "completion" status 4991 * is wrong. 4992 */ 4993 if (! sc->sc_isedma) { 4994 status = (ath_hal_txprocdesc(ah, 4995 bf->bf_lastds, 4996 &bf->bf_status.ds_txstat) == HAL_OK); 4997 } 4998 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); 4999 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 5000 bf->bf_m->m_len, 0, -1); 5001 } 5002 #endif /* ATH_DEBUG */ 5003 /* 5004 * Since we're now doing magic in the completion 5005 * functions, we -must- call it for aggregation 5006 * destinations or BAW tracking will get upset. 5007 */ 5008 /* 5009 * Clear ATH_BUF_BUSY; the completion handler 5010 * will free the buffer. 5011 */ 5012 ATH_TXQ_UNLOCK(txq); 5013 bf->bf_flags &= ~ATH_BUF_BUSY; 5014 if (bf->bf_comp) 5015 bf->bf_comp(sc, bf, 1); 5016 else 5017 ath_tx_default_comp(sc, bf, 1); 5018 } 5019 5020 /* 5021 * Free the holding buffer if it exists 5022 */ 5023 ATH_TXQ_LOCK(txq); 5024 ath_txq_freeholdingbuf(sc, txq); 5025 ATH_TXQ_UNLOCK(txq); 5026 5027 /* 5028 * Drain software queued frames which are on 5029 * active TIDs. 5030 */ 5031 ath_tx_txq_drain(sc, txq); 5032 } 5033 5034 static void 5035 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5036 { 5037 struct ath_hal *ah = sc->sc_ah; 5038 5039 ATH_TXQ_LOCK_ASSERT(txq); 5040 5041 DPRINTF(sc, ATH_DEBUG_RESET, 5042 "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, " 5043 "link %p, holdingbf=%p\n", 5044 __func__, 5045 txq->axq_qnum, 5046 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 5047 (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)), 5048 (int) ath_hal_numtxpending(ah, txq->axq_qnum), 5049 txq->axq_flags, 5050 txq->axq_link, 5051 txq->axq_holdingbf); 5052 5053 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5054 /* We've stopped TX DMA, so mark this as stopped. */ 5055 txq->axq_flags &= ~ATH_TXQ_PUTRUNNING; 5056 5057 #ifdef ATH_DEBUG 5058 if ((sc->sc_debug & ATH_DEBUG_RESET) 5059 && (txq->axq_holdingbf != NULL)) { 5060 ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0); 5061 } 5062 #endif 5063 } 5064 5065 int 5066 ath_stoptxdma(struct ath_softc *sc) 5067 { 5068 struct ath_hal *ah = sc->sc_ah; 5069 int i; 5070 5071 /* XXX return value */ 5072 if (sc->sc_invalid) 5073 return 0; 5074 5075 if (!sc->sc_invalid) { 5076 /* don't touch the hardware if marked invalid */ 5077 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5078 __func__, sc->sc_bhalq, 5079 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 5080 NULL); 5081 5082 /* stop the beacon queue */ 5083 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5084 5085 /* Stop the data queues */ 5086 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5087 if (ATH_TXQ_SETUP(sc, i)) { 5088 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5089 ath_tx_stopdma(sc, &sc->sc_txq[i]); 5090 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5091 } 5092 } 5093 } 5094 5095 return 1; 5096 } 5097 5098 #ifdef ATH_DEBUG 5099 void 5100 ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq) 5101 { 5102 struct ath_hal *ah = sc->sc_ah; 5103 struct ath_buf *bf; 5104 int i = 0; 5105 5106 if (! (sc->sc_debug & ATH_DEBUG_RESET)) 5107 return; 5108 5109 device_printf(sc->sc_dev, "%s: Q%d: begin\n", 5110 __func__, txq->axq_qnum); 5111 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 5112 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 5113 ath_hal_txprocdesc(ah, bf->bf_lastds, 5114 &bf->bf_status.ds_txstat) == HAL_OK); 5115 i++; 5116 } 5117 device_printf(sc->sc_dev, "%s: Q%d: end\n", 5118 __func__, txq->axq_qnum); 5119 } 5120 #endif /* ATH_DEBUG */ 5121 5122 /* 5123 * Drain the transmit queues and reclaim resources. 5124 */ 5125 void 5126 ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 5127 { 5128 struct ath_hal *ah = sc->sc_ah; 5129 struct ath_buf *bf_last; 5130 int i; 5131 5132 (void) ath_stoptxdma(sc); 5133 5134 /* 5135 * Dump the queue contents 5136 */ 5137 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5138 /* 5139 * XXX TODO: should we just handle the completed TX frames 5140 * here, whether or not the reset is a full one or not? 5141 */ 5142 if (ATH_TXQ_SETUP(sc, i)) { 5143 #ifdef ATH_DEBUG 5144 if (sc->sc_debug & ATH_DEBUG_RESET) 5145 ath_tx_dump(sc, &sc->sc_txq[i]); 5146 #endif /* ATH_DEBUG */ 5147 if (reset_type == ATH_RESET_NOLOSS) { 5148 ath_tx_processq(sc, &sc->sc_txq[i], 0); 5149 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5150 /* 5151 * Free the holding buffer; DMA is now 5152 * stopped. 5153 */ 5154 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 5155 /* 5156 * Setup the link pointer to be the 5157 * _last_ buffer/descriptor in the list. 5158 * If there's nothing in the list, set it 5159 * to NULL. 5160 */ 5161 bf_last = ATH_TXQ_LAST(&sc->sc_txq[i], 5162 axq_q_s); 5163 if (bf_last != NULL) { 5164 ath_hal_gettxdesclinkptr(ah, 5165 bf_last->bf_lastds, 5166 &sc->sc_txq[i].axq_link); 5167 } else { 5168 sc->sc_txq[i].axq_link = NULL; 5169 } 5170 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5171 } else 5172 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5173 } 5174 } 5175 #ifdef ATH_DEBUG 5176 if (sc->sc_debug & ATH_DEBUG_RESET) { 5177 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 5178 if (bf != NULL && bf->bf_m != NULL) { 5179 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 5180 ath_hal_txprocdesc(ah, bf->bf_lastds, 5181 &bf->bf_status.ds_txstat) == HAL_OK); 5182 ieee80211_dump_pkt(&sc->sc_ic, 5183 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5184 0, -1); 5185 } 5186 } 5187 #endif /* ATH_DEBUG */ 5188 sc->sc_wd_timer = 0; 5189 } 5190 5191 /* 5192 * Update internal state after a channel change. 5193 */ 5194 static void 5195 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5196 { 5197 enum ieee80211_phymode mode; 5198 5199 /* 5200 * Change channels and update the h/w rate map 5201 * if we're switching; e.g. 11a to 11b/g. 5202 */ 5203 mode = ieee80211_chan2mode(chan); 5204 if (mode != sc->sc_curmode) 5205 ath_setcurmode(sc, mode); 5206 sc->sc_curchan = chan; 5207 } 5208 5209 /* 5210 * Set/change channels. If the channel is really being changed, 5211 * it's done by resetting the chip. To accomplish this we must 5212 * first cleanup any pending DMA, then restart stuff after a la 5213 * ath_init. 5214 */ 5215 static int 5216 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5217 { 5218 struct ieee80211com *ic = &sc->sc_ic; 5219 struct ath_hal *ah = sc->sc_ah; 5220 int ret = 0; 5221 5222 /* Treat this as an interface reset */ 5223 ATH_PCU_UNLOCK_ASSERT(sc); 5224 ATH_UNLOCK_ASSERT(sc); 5225 5226 /* (Try to) stop TX/RX from occurring */ 5227 taskqueue_block(sc->sc_tq); 5228 5229 ATH_PCU_LOCK(sc); 5230 5231 /* Disable interrupts */ 5232 ath_hal_intrset(ah, 0); 5233 5234 /* Stop new RX/TX/interrupt completion */ 5235 if (ath_reset_grablock(sc, 1) == 0) { 5236 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5237 __func__); 5238 } 5239 5240 /* Stop pending RX/TX completion */ 5241 ath_txrx_stop_locked(sc); 5242 5243 ATH_PCU_UNLOCK(sc); 5244 5245 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 5246 __func__, ieee80211_chan2ieee(ic, chan), 5247 chan->ic_freq, chan->ic_flags); 5248 if (chan != sc->sc_curchan) { 5249 HAL_STATUS status; 5250 /* 5251 * To switch channels clear any pending DMA operations; 5252 * wait long enough for the RX fifo to drain, reset the 5253 * hardware at the new frequency, and then re-enable 5254 * the relevant bits of the h/w. 5255 */ 5256 #if 0 5257 ath_hal_intrset(ah, 0); /* disable interrupts */ 5258 #endif 5259 ath_stoprecv(sc, 1); /* turn off frame recv */ 5260 /* 5261 * First, handle completed TX/RX frames. 5262 */ 5263 ath_rx_flush(sc); 5264 ath_draintxq(sc, ATH_RESET_NOLOSS); 5265 /* 5266 * Next, flush the non-scheduled frames. 5267 */ 5268 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 5269 5270 ath_update_chainmasks(sc, chan); 5271 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 5272 sc->sc_cur_rxchainmask); 5273 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, 5274 HAL_RESET_NORMAL, &status)) { 5275 device_printf(sc->sc_dev, "%s: unable to reset " 5276 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 5277 __func__, ieee80211_chan2ieee(ic, chan), 5278 chan->ic_freq, chan->ic_flags, status); 5279 ret = EIO; 5280 goto finish; 5281 } 5282 sc->sc_diversity = ath_hal_getdiversity(ah); 5283 5284 ATH_RX_LOCK(sc); 5285 sc->sc_rx_stopped = 1; 5286 sc->sc_rx_resetted = 1; 5287 ATH_RX_UNLOCK(sc); 5288 5289 /* Let DFS at it in case it's a DFS channel */ 5290 ath_dfs_radar_enable(sc, chan); 5291 5292 /* Let spectral at in case spectral is enabled */ 5293 ath_spectral_enable(sc, chan); 5294 5295 /* 5296 * Let bluetooth coexistence at in case it's needed for this 5297 * channel 5298 */ 5299 ath_btcoex_enable(sc, ic->ic_curchan); 5300 5301 /* 5302 * If we're doing TDMA, enforce the TXOP limitation for chips 5303 * that support it. 5304 */ 5305 if (sc->sc_hasenforcetxop && sc->sc_tdma) 5306 ath_hal_setenforcetxop(sc->sc_ah, 1); 5307 else 5308 ath_hal_setenforcetxop(sc->sc_ah, 0); 5309 5310 /* 5311 * Re-enable rx framework. 5312 */ 5313 if (ath_startrecv(sc) != 0) { 5314 device_printf(sc->sc_dev, 5315 "%s: unable to restart recv logic\n", __func__); 5316 ret = EIO; 5317 goto finish; 5318 } 5319 5320 /* 5321 * Change channels and update the h/w rate map 5322 * if we're switching; e.g. 11a to 11b/g. 5323 */ 5324 ath_chan_change(sc, chan); 5325 5326 /* 5327 * Reset clears the beacon timers; reset them 5328 * here if needed. 5329 */ 5330 if (sc->sc_beacons) { /* restart beacons */ 5331 #ifdef IEEE80211_SUPPORT_TDMA 5332 if (sc->sc_tdma) 5333 ath_tdma_config(sc, NULL); 5334 else 5335 #endif 5336 ath_beacon_config(sc, NULL); 5337 } 5338 5339 /* 5340 * Re-enable interrupts. 5341 */ 5342 #if 0 5343 ath_hal_intrset(ah, sc->sc_imask); 5344 #endif 5345 } 5346 5347 finish: 5348 ATH_PCU_LOCK(sc); 5349 sc->sc_inreset_cnt--; 5350 /* XXX only do this if sc_inreset_cnt == 0? */ 5351 ath_hal_intrset(ah, sc->sc_imask); 5352 ATH_PCU_UNLOCK(sc); 5353 5354 ath_txrx_start(sc); 5355 /* XXX ath_start? */ 5356 5357 return ret; 5358 } 5359 5360 /* 5361 * Periodically recalibrate the PHY to account 5362 * for temperature/environment changes. 5363 */ 5364 static void 5365 ath_calibrate(void *arg) 5366 { 5367 struct ath_softc *sc = arg; 5368 struct ath_hal *ah = sc->sc_ah; 5369 struct ieee80211com *ic = &sc->sc_ic; 5370 HAL_BOOL longCal, isCalDone = AH_TRUE; 5371 HAL_BOOL aniCal, shortCal = AH_FALSE; 5372 int nextcal; 5373 5374 ATH_LOCK_ASSERT(sc); 5375 5376 /* 5377 * Force the hardware awake for ANI work. 5378 */ 5379 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5380 5381 /* Skip trying to do this if we're in reset */ 5382 if (sc->sc_inreset_cnt) 5383 goto restart; 5384 5385 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 5386 goto restart; 5387 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5388 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5389 if (sc->sc_doresetcal) 5390 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5391 5392 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5393 if (aniCal) { 5394 sc->sc_stats.ast_ani_cal++; 5395 sc->sc_lastani = ticks; 5396 ath_hal_ani_poll(ah, sc->sc_curchan); 5397 } 5398 5399 if (longCal) { 5400 sc->sc_stats.ast_per_cal++; 5401 sc->sc_lastlongcal = ticks; 5402 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5403 /* 5404 * Rfgain is out of bounds, reset the chip 5405 * to load new gain values. 5406 */ 5407 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5408 "%s: rfgain change\n", __func__); 5409 sc->sc_stats.ast_per_rfgain++; 5410 sc->sc_resetcal = 0; 5411 sc->sc_doresetcal = AH_TRUE; 5412 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5413 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5414 ath_power_restore_power_state(sc); 5415 return; 5416 } 5417 /* 5418 * If this long cal is after an idle period, then 5419 * reset the data collection state so we start fresh. 5420 */ 5421 if (sc->sc_resetcal) { 5422 (void) ath_hal_calreset(ah, sc->sc_curchan); 5423 sc->sc_lastcalreset = ticks; 5424 sc->sc_lastshortcal = ticks; 5425 sc->sc_resetcal = 0; 5426 sc->sc_doresetcal = AH_TRUE; 5427 } 5428 } 5429 5430 /* Only call if we're doing a short/long cal, not for ANI calibration */ 5431 if (shortCal || longCal) { 5432 isCalDone = AH_FALSE; 5433 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 5434 if (longCal) { 5435 /* 5436 * Calibrate noise floor data again in case of change. 5437 */ 5438 ath_hal_process_noisefloor(ah); 5439 } 5440 } else { 5441 DPRINTF(sc, ATH_DEBUG_ANY, 5442 "%s: calibration of channel %u failed\n", 5443 __func__, sc->sc_curchan->ic_freq); 5444 sc->sc_stats.ast_per_calfail++; 5445 } 5446 if (shortCal) 5447 sc->sc_lastshortcal = ticks; 5448 } 5449 if (!isCalDone) { 5450 restart: 5451 /* 5452 * Use a shorter interval to potentially collect multiple 5453 * data samples required to complete calibration. Once 5454 * we're told the work is done we drop back to a longer 5455 * interval between requests. We're more aggressive doing 5456 * work when operating as an AP to improve operation right 5457 * after startup. 5458 */ 5459 sc->sc_lastshortcal = ticks; 5460 nextcal = ath_shortcalinterval*hz/1000; 5461 if (sc->sc_opmode != HAL_M_HOSTAP) 5462 nextcal *= 10; 5463 sc->sc_doresetcal = AH_TRUE; 5464 } else { 5465 /* nextcal should be the shortest time for next event */ 5466 nextcal = ath_longcalinterval*hz; 5467 if (sc->sc_lastcalreset == 0) 5468 sc->sc_lastcalreset = sc->sc_lastlongcal; 5469 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 5470 sc->sc_resetcal = 1; /* setup reset next trip */ 5471 sc->sc_doresetcal = AH_FALSE; 5472 } 5473 /* ANI calibration may occur more often than short/long/resetcal */ 5474 if (ath_anicalinterval > 0) 5475 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5476 5477 if (nextcal != 0) { 5478 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 5479 __func__, nextcal, isCalDone ? "" : "!"); 5480 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 5481 } else { 5482 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 5483 __func__); 5484 /* NB: don't rearm timer */ 5485 } 5486 /* 5487 * Restore power state now that we're done. 5488 */ 5489 ath_power_restore_power_state(sc); 5490 } 5491 5492 static void 5493 ath_scan_start(struct ieee80211com *ic) 5494 { 5495 struct ath_softc *sc = ic->ic_softc; 5496 struct ath_hal *ah = sc->sc_ah; 5497 u_int32_t rfilt; 5498 5499 /* XXX calibration timer? */ 5500 /* XXXGL: is constant ieee80211broadcastaddr a correct choice? */ 5501 5502 ATH_LOCK(sc); 5503 sc->sc_scanning = 1; 5504 sc->sc_syncbeacon = 0; 5505 rfilt = ath_calcrxfilter(sc); 5506 ATH_UNLOCK(sc); 5507 5508 ATH_PCU_LOCK(sc); 5509 ath_hal_setrxfilter(ah, rfilt); 5510 ath_hal_setassocid(ah, ieee80211broadcastaddr, 0); 5511 ATH_PCU_UNLOCK(sc); 5512 5513 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5514 __func__, rfilt, ether_sprintf(ieee80211broadcastaddr)); 5515 } 5516 5517 static void 5518 ath_scan_end(struct ieee80211com *ic) 5519 { 5520 struct ath_softc *sc = ic->ic_softc; 5521 struct ath_hal *ah = sc->sc_ah; 5522 u_int32_t rfilt; 5523 5524 ATH_LOCK(sc); 5525 sc->sc_scanning = 0; 5526 rfilt = ath_calcrxfilter(sc); 5527 ATH_UNLOCK(sc); 5528 5529 ATH_PCU_LOCK(sc); 5530 ath_hal_setrxfilter(ah, rfilt); 5531 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5532 5533 ath_hal_process_noisefloor(ah); 5534 ATH_PCU_UNLOCK(sc); 5535 5536 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5537 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 5538 sc->sc_curaid); 5539 } 5540 5541 #ifdef ATH_ENABLE_11N 5542 /* 5543 * For now, just do a channel change. 5544 * 5545 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 5546 * control state and resetting the hardware without dropping frames out 5547 * of the queue. 5548 * 5549 * The unfortunate trouble here is making absolutely sure that the 5550 * channel width change has propagated enough so the hardware 5551 * absolutely isn't handed bogus frames for it's current operating 5552 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 5553 * does occur in parallel, we need to make certain we've blocked 5554 * any further ongoing TX (and RX, that can cause raw TX) 5555 * before we do this. 5556 */ 5557 static void 5558 ath_update_chw(struct ieee80211com *ic) 5559 { 5560 struct ath_softc *sc = ic->ic_softc; 5561 5562 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 5563 ath_set_channel(ic); 5564 } 5565 #endif /* ATH_ENABLE_11N */ 5566 5567 static void 5568 ath_set_channel(struct ieee80211com *ic) 5569 { 5570 struct ath_softc *sc = ic->ic_softc; 5571 5572 ATH_LOCK(sc); 5573 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5574 ATH_UNLOCK(sc); 5575 5576 (void) ath_chan_set(sc, ic->ic_curchan); 5577 /* 5578 * If we are returning to our bss channel then mark state 5579 * so the next recv'd beacon's tsf will be used to sync the 5580 * beacon timers. Note that since we only hear beacons in 5581 * sta/ibss mode this has no effect in other operating modes. 5582 */ 5583 ATH_LOCK(sc); 5584 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5585 sc->sc_syncbeacon = 1; 5586 ath_power_restore_power_state(sc); 5587 ATH_UNLOCK(sc); 5588 } 5589 5590 /* 5591 * Walk the vap list and check if there any vap's in RUN state. 5592 */ 5593 static int 5594 ath_isanyrunningvaps(struct ieee80211vap *this) 5595 { 5596 struct ieee80211com *ic = this->iv_ic; 5597 struct ieee80211vap *vap; 5598 5599 IEEE80211_LOCK_ASSERT(ic); 5600 5601 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5602 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5603 return 1; 5604 } 5605 return 0; 5606 } 5607 5608 static int 5609 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5610 { 5611 struct ieee80211com *ic = vap->iv_ic; 5612 struct ath_softc *sc = ic->ic_softc; 5613 struct ath_vap *avp = ATH_VAP(vap); 5614 struct ath_hal *ah = sc->sc_ah; 5615 struct ieee80211_node *ni = NULL; 5616 int i, error, stamode; 5617 u_int32_t rfilt; 5618 int csa_run_transition = 0; 5619 enum ieee80211_state ostate = vap->iv_state; 5620 5621 static const HAL_LED_STATE leds[] = { 5622 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5623 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5624 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5625 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5626 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5627 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5628 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5629 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5630 }; 5631 5632 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5633 ieee80211_state_name[ostate], 5634 ieee80211_state_name[nstate]); 5635 5636 /* 5637 * net80211 _should_ have the comlock asserted at this point. 5638 * There are some comments around the calls to vap->iv_newstate 5639 * which indicate that it (newstate) may end up dropping the 5640 * lock. This and the subsequent lock assert check after newstate 5641 * are an attempt to catch these and figure out how/why. 5642 */ 5643 IEEE80211_LOCK_ASSERT(ic); 5644 5645 /* Before we touch the hardware - wake it up */ 5646 ATH_LOCK(sc); 5647 /* 5648 * If the NIC is in anything other than SLEEP state, 5649 * we need to ensure that self-generated frames are 5650 * set for PWRMGT=0. Otherwise we may end up with 5651 * strange situations. 5652 * 5653 * XXX TODO: is this actually the case? :-) 5654 */ 5655 if (nstate != IEEE80211_S_SLEEP) 5656 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5657 5658 /* 5659 * Now, wake the thing up. 5660 */ 5661 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5662 5663 /* 5664 * And stop the calibration callout whilst we have 5665 * ATH_LOCK held. 5666 */ 5667 #if defined(__DragonFly__) 5668 callout_stop_sync(&sc->sc_cal_ch); 5669 #else 5670 callout_stop(&sc->sc_cal_ch); 5671 #endif 5672 ATH_UNLOCK(sc); 5673 5674 if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5675 csa_run_transition = 1; 5676 5677 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5678 5679 if (nstate == IEEE80211_S_SCAN) { 5680 /* 5681 * Scanning: turn off beacon miss and don't beacon. 5682 * Mark beacon state so when we reach RUN state we'll 5683 * [re]setup beacons. Unblock the task q thread so 5684 * deferred interrupt processing is done. 5685 */ 5686 5687 /* Ensure we stay awake during scan */ 5688 ATH_LOCK(sc); 5689 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5690 ath_power_setpower(sc, HAL_PM_AWAKE); 5691 ATH_UNLOCK(sc); 5692 5693 ath_hal_intrset(ah, 5694 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5695 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5696 sc->sc_beacons = 0; 5697 taskqueue_unblock(sc->sc_tq); 5698 } 5699 5700 ni = ieee80211_ref_node(vap->iv_bss); 5701 rfilt = ath_calcrxfilter(sc); 5702 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5703 vap->iv_opmode == IEEE80211_M_AHDEMO || 5704 vap->iv_opmode == IEEE80211_M_IBSS); 5705 5706 /* 5707 * XXX Dont need to do this (and others) if we've transitioned 5708 * from SLEEP->RUN. 5709 */ 5710 if (stamode && nstate == IEEE80211_S_RUN) { 5711 sc->sc_curaid = ni->ni_associd; 5712 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5713 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5714 } 5715 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5716 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 5717 ath_hal_setrxfilter(ah, rfilt); 5718 5719 /* XXX is this to restore keycache on resume? */ 5720 if (vap->iv_opmode != IEEE80211_M_STA && 5721 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 5722 for (i = 0; i < IEEE80211_WEP_NKID; i++) 5723 if (ath_hal_keyisvalid(ah, i)) 5724 ath_hal_keysetmac(ah, i, ni->ni_bssid); 5725 } 5726 5727 /* 5728 * Invoke the parent method to do net80211 work. 5729 */ 5730 error = avp->av_newstate(vap, nstate, arg); 5731 if (error != 0) 5732 goto bad; 5733 5734 /* 5735 * See above: ensure av_newstate() doesn't drop the lock 5736 * on us. 5737 */ 5738 IEEE80211_LOCK_ASSERT(ic); 5739 5740 if (nstate == IEEE80211_S_RUN) { 5741 /* NB: collect bss node again, it may have changed */ 5742 ieee80211_free_node(ni); 5743 ni = ieee80211_ref_node(vap->iv_bss); 5744 5745 DPRINTF(sc, ATH_DEBUG_STATE, 5746 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5747 "capinfo 0x%04x chan %d\n", __func__, 5748 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5749 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5750 5751 switch (vap->iv_opmode) { 5752 #ifdef IEEE80211_SUPPORT_TDMA 5753 case IEEE80211_M_AHDEMO: 5754 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 5755 break; 5756 /* fall thru... */ 5757 #endif 5758 case IEEE80211_M_HOSTAP: 5759 case IEEE80211_M_IBSS: 5760 case IEEE80211_M_MBSS: 5761 /* 5762 * Allocate and setup the beacon frame. 5763 * 5764 * Stop any previous beacon DMA. This may be 5765 * necessary, for example, when an ibss merge 5766 * causes reconfiguration; there will be a state 5767 * transition from RUN->RUN that means we may 5768 * be called with beacon transmission active. 5769 */ 5770 ath_hal_stoptxdma(ah, sc->sc_bhalq); 5771 5772 error = ath_beacon_alloc(sc, ni); 5773 if (error != 0) 5774 goto bad; 5775 /* 5776 * If joining an adhoc network defer beacon timer 5777 * configuration to the next beacon frame so we 5778 * have a current TSF to use. Otherwise we're 5779 * starting an ibss/bss so there's no need to delay; 5780 * if this is the first vap moving to RUN state, then 5781 * beacon state needs to be [re]configured. 5782 */ 5783 if (vap->iv_opmode == IEEE80211_M_IBSS && 5784 ni->ni_tstamp.tsf != 0) { 5785 sc->sc_syncbeacon = 1; 5786 } else if (!sc->sc_beacons) { 5787 #ifdef IEEE80211_SUPPORT_TDMA 5788 if (vap->iv_caps & IEEE80211_C_TDMA) 5789 ath_tdma_config(sc, vap); 5790 else 5791 #endif 5792 ath_beacon_config(sc, vap); 5793 sc->sc_beacons = 1; 5794 } 5795 break; 5796 case IEEE80211_M_STA: 5797 /* 5798 * Defer beacon timer configuration to the next 5799 * beacon frame so we have a current TSF to use 5800 * (any TSF collected when scanning is likely old). 5801 * However if it's due to a CSA -> RUN transition, 5802 * force a beacon update so we pick up a lack of 5803 * beacons from an AP in CAC and thus force a 5804 * scan. 5805 * 5806 * And, there's also corner cases here where 5807 * after a scan, the AP may have disappeared. 5808 * In that case, we may not receive an actual 5809 * beacon to update the beacon timer and thus we 5810 * won't get notified of the missing beacons. 5811 */ 5812 if (ostate != IEEE80211_S_RUN && 5813 ostate != IEEE80211_S_SLEEP) { 5814 DPRINTF(sc, ATH_DEBUG_BEACON, 5815 "%s: STA; syncbeacon=1\n", __func__); 5816 sc->sc_syncbeacon = 1; 5817 5818 if (csa_run_transition) 5819 ath_beacon_config(sc, vap); 5820 5821 /* 5822 * PR: kern/175227 5823 * 5824 * Reconfigure beacons during reset; as otherwise 5825 * we won't get the beacon timers reprogrammed 5826 * after a reset and thus we won't pick up a 5827 * beacon miss interrupt. 5828 * 5829 * Hopefully we'll see a beacon before the BMISS 5830 * timer fires (too often), leading to a STA 5831 * disassociation. 5832 */ 5833 sc->sc_beacons = 1; 5834 } 5835 break; 5836 case IEEE80211_M_MONITOR: 5837 /* 5838 * Monitor mode vaps have only INIT->RUN and RUN->RUN 5839 * transitions so we must re-enable interrupts here to 5840 * handle the case of a single monitor mode vap. 5841 */ 5842 ath_hal_intrset(ah, sc->sc_imask); 5843 break; 5844 case IEEE80211_M_WDS: 5845 break; 5846 default: 5847 break; 5848 } 5849 /* 5850 * Let the hal process statistics collected during a 5851 * scan so it can provide calibrated noise floor data. 5852 */ 5853 ath_hal_process_noisefloor(ah); 5854 /* 5855 * Reset rssi stats; maybe not the best place... 5856 */ 5857 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5858 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5859 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 5860 5861 /* 5862 * Force awake for RUN mode. 5863 */ 5864 ATH_LOCK(sc); 5865 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5866 ath_power_setpower(sc, HAL_PM_AWAKE); 5867 5868 /* 5869 * Finally, start any timers and the task q thread 5870 * (in case we didn't go through SCAN state). 5871 */ 5872 if (ath_longcalinterval != 0) { 5873 /* start periodic recalibration timer */ 5874 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5875 } else { 5876 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5877 "%s: calibration disabled\n", __func__); 5878 } 5879 ATH_UNLOCK(sc); 5880 5881 taskqueue_unblock(sc->sc_tq); 5882 } else if (nstate == IEEE80211_S_INIT) { 5883 /* 5884 * If there are no vaps left in RUN state then 5885 * shutdown host/driver operation: 5886 * o disable interrupts 5887 * o disable the task queue thread 5888 * o mark beacon processing as stopped 5889 */ 5890 if (!ath_isanyrunningvaps(vap)) { 5891 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5892 /* disable interrupts */ 5893 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5894 taskqueue_block(sc->sc_tq); 5895 sc->sc_beacons = 0; 5896 } 5897 #ifdef IEEE80211_SUPPORT_TDMA 5898 ath_hal_setcca(ah, AH_TRUE); 5899 #endif 5900 } else if (nstate == IEEE80211_S_SLEEP) { 5901 /* We're going to sleep, so transition appropriately */ 5902 /* For now, only do this if we're a single STA vap */ 5903 if (sc->sc_nvaps == 1 && 5904 vap->iv_opmode == IEEE80211_M_STA) { 5905 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon); 5906 ATH_LOCK(sc); 5907 /* 5908 * Always at least set the self-generated 5909 * frame config to set PWRMGT=1. 5910 */ 5911 ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP); 5912 5913 /* 5914 * If we're not syncing beacons, transition 5915 * to NETWORK_SLEEP. 5916 * 5917 * We stay awake if syncbeacon > 0 in case 5918 * we need to listen for some beacons otherwise 5919 * our beacon timer config may be wrong. 5920 */ 5921 if (sc->sc_syncbeacon == 0) { 5922 ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP); 5923 } 5924 ATH_UNLOCK(sc); 5925 } 5926 } 5927 bad: 5928 ieee80211_free_node(ni); 5929 5930 /* 5931 * Restore the power state - either to what it was, or 5932 * to network_sleep if it's alright. 5933 */ 5934 ATH_LOCK(sc); 5935 ath_power_restore_power_state(sc); 5936 ATH_UNLOCK(sc); 5937 return error; 5938 } 5939 5940 /* 5941 * Allocate a key cache slot to the station so we can 5942 * setup a mapping from key index to node. The key cache 5943 * slot is needed for managing antenna state and for 5944 * compression when stations do not use crypto. We do 5945 * it uniliaterally here; if crypto is employed this slot 5946 * will be reassigned. 5947 */ 5948 static void 5949 ath_setup_stationkey(struct ieee80211_node *ni) 5950 { 5951 struct ieee80211vap *vap = ni->ni_vap; 5952 struct ath_softc *sc = vap->iv_ic->ic_softc; 5953 ieee80211_keyix keyix, rxkeyix; 5954 5955 /* XXX should take a locked ref to vap->iv_bss */ 5956 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 5957 /* 5958 * Key cache is full; we'll fall back to doing 5959 * the more expensive lookup in software. Note 5960 * this also means no h/w compression. 5961 */ 5962 /* XXX msg+statistic */ 5963 } else { 5964 /* XXX locking? */ 5965 ni->ni_ucastkey.wk_keyix = keyix; 5966 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 5967 /* NB: must mark device key to get called back on delete */ 5968 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 5969 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 5970 /* NB: this will create a pass-thru key entry */ 5971 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 5972 } 5973 } 5974 5975 /* 5976 * Setup driver-specific state for a newly associated node. 5977 * Note that we're called also on a re-associate, the isnew 5978 * param tells us if this is the first time or not. 5979 */ 5980 static void 5981 ath_newassoc(struct ieee80211_node *ni, int isnew) 5982 { 5983 struct ath_node *an = ATH_NODE(ni); 5984 struct ieee80211vap *vap = ni->ni_vap; 5985 struct ath_softc *sc = vap->iv_ic->ic_softc; 5986 const struct ieee80211_txparam *tp = ni->ni_txparms; 5987 5988 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 5989 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 5990 5991 #if defined(__DragonFly__) 5992 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: reassoc; isnew=%d, is_powersave=%d\n", 5993 __func__, 5994 ath_hal_ether_sprintf(ni->ni_macaddr), 5995 isnew, 5996 an->an_is_powersave); 5997 #else 5998 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n", 5999 __func__, 6000 ni->ni_macaddr, 6001 ":", 6002 isnew, 6003 an->an_is_powersave); 6004 #endif 6005 6006 ATH_NODE_LOCK(an); 6007 ath_rate_newassoc(sc, an, isnew); 6008 ATH_NODE_UNLOCK(an); 6009 6010 if (isnew && 6011 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 6012 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 6013 ath_setup_stationkey(ni); 6014 6015 /* 6016 * If we're reassociating, make sure that any paused queues 6017 * get unpaused. 6018 * 6019 * Now, we may have frames in the hardware queue for this node. 6020 * So if we are reassociating and there are frames in the queue, 6021 * we need to go through the cleanup path to ensure that they're 6022 * marked as non-aggregate. 6023 */ 6024 if (! isnew) { 6025 #if defined(__DragonFly__) 6026 DPRINTF(sc, ATH_DEBUG_NODE, 6027 "%s: %s: reassoc; is_powersave=%d\n", 6028 __func__, 6029 ath_hal_ether_sprintf(ni->ni_macaddr), 6030 an->an_is_powersave); 6031 #else 6032 DPRINTF(sc, ATH_DEBUG_NODE, 6033 "%s: %6D: reassoc; is_powersave=%d\n", 6034 __func__, 6035 ni->ni_macaddr, 6036 ":", 6037 an->an_is_powersave); 6038 #endif 6039 6040 /* XXX for now, we can't hold the lock across assoc */ 6041 ath_tx_node_reassoc(sc, an); 6042 6043 /* XXX for now, we can't hold the lock across wakeup */ 6044 if (an->an_is_powersave) 6045 ath_tx_node_wakeup(sc, an); 6046 } 6047 } 6048 6049 static int 6050 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 6051 int nchans, struct ieee80211_channel chans[]) 6052 { 6053 struct ath_softc *sc = ic->ic_softc; 6054 struct ath_hal *ah = sc->sc_ah; 6055 HAL_STATUS status; 6056 6057 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6058 "%s: rd %u cc %u location %c%s\n", 6059 __func__, reg->regdomain, reg->country, reg->location, 6060 reg->ecm ? " ecm" : ""); 6061 6062 status = ath_hal_set_channels(ah, chans, nchans, 6063 reg->country, reg->regdomain); 6064 if (status != HAL_OK) { 6065 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 6066 __func__, status); 6067 return EINVAL; /* XXX */ 6068 } 6069 6070 return 0; 6071 } 6072 6073 static void 6074 ath_getradiocaps(struct ieee80211com *ic, 6075 int maxchans, int *nchans, struct ieee80211_channel chans[]) 6076 { 6077 struct ath_softc *sc = ic->ic_softc; 6078 struct ath_hal *ah = sc->sc_ah; 6079 6080 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 6081 __func__, SKU_DEBUG, CTRY_DEFAULT); 6082 6083 /* XXX check return */ 6084 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 6085 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 6086 6087 } 6088 6089 static int 6090 ath_getchannels(struct ath_softc *sc) 6091 { 6092 struct ieee80211com *ic = &sc->sc_ic; 6093 struct ath_hal *ah = sc->sc_ah; 6094 HAL_STATUS status; 6095 6096 /* 6097 * Collect channel set based on EEPROM contents. 6098 */ 6099 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 6100 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 6101 if (status != HAL_OK) { 6102 device_printf(sc->sc_dev, 6103 "%s: unable to collect channel list from hal, status %d\n", 6104 __func__, status); 6105 return EINVAL; 6106 } 6107 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6108 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 6109 /* XXX map Atheros sku's to net80211 SKU's */ 6110 /* XXX net80211 types too small */ 6111 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 6112 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 6113 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 6114 ic->ic_regdomain.isocc[1] = ' '; 6115 6116 ic->ic_regdomain.ecm = 1; 6117 ic->ic_regdomain.location = 'I'; 6118 6119 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6120 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 6121 __func__, sc->sc_eerd, sc->sc_eecc, 6122 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 6123 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 6124 return 0; 6125 } 6126 6127 static int 6128 ath_rate_setup(struct ath_softc *sc, u_int mode) 6129 { 6130 struct ath_hal *ah = sc->sc_ah; 6131 const HAL_RATE_TABLE *rt; 6132 6133 switch (mode) { 6134 case IEEE80211_MODE_11A: 6135 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 6136 break; 6137 case IEEE80211_MODE_HALF: 6138 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6139 break; 6140 case IEEE80211_MODE_QUARTER: 6141 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6142 break; 6143 case IEEE80211_MODE_11B: 6144 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 6145 break; 6146 case IEEE80211_MODE_11G: 6147 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 6148 break; 6149 case IEEE80211_MODE_TURBO_A: 6150 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 6151 break; 6152 case IEEE80211_MODE_TURBO_G: 6153 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6154 break; 6155 case IEEE80211_MODE_STURBO_A: 6156 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6157 break; 6158 case IEEE80211_MODE_11NA: 6159 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6160 break; 6161 case IEEE80211_MODE_11NG: 6162 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6163 break; 6164 default: 6165 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6166 __func__, mode); 6167 return 0; 6168 } 6169 sc->sc_rates[mode] = rt; 6170 return (rt != NULL); 6171 } 6172 6173 static void 6174 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6175 { 6176 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6177 static const struct { 6178 u_int rate; /* tx/rx 802.11 rate */ 6179 u_int16_t timeOn; /* LED on time (ms) */ 6180 u_int16_t timeOff; /* LED off time (ms) */ 6181 } blinkrates[] = { 6182 { 108, 40, 10 }, 6183 { 96, 44, 11 }, 6184 { 72, 50, 13 }, 6185 { 48, 57, 14 }, 6186 { 36, 67, 16 }, 6187 { 24, 80, 20 }, 6188 { 22, 100, 25 }, 6189 { 18, 133, 34 }, 6190 { 12, 160, 40 }, 6191 { 10, 200, 50 }, 6192 { 6, 240, 58 }, 6193 { 4, 267, 66 }, 6194 { 2, 400, 100 }, 6195 { 0, 500, 130 }, 6196 /* XXX half/quarter rates */ 6197 }; 6198 const HAL_RATE_TABLE *rt; 6199 int i, j; 6200 6201 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6202 rt = sc->sc_rates[mode]; 6203 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6204 for (i = 0; i < rt->rateCount; i++) { 6205 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6206 if (rt->info[i].phy != IEEE80211_T_HT) 6207 sc->sc_rixmap[ieeerate] = i; 6208 else 6209 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6210 } 6211 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6212 for (i = 0; i < nitems(sc->sc_hwmap); i++) { 6213 if (i >= rt->rateCount) { 6214 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6215 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6216 continue; 6217 } 6218 sc->sc_hwmap[i].ieeerate = 6219 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6220 if (rt->info[i].phy == IEEE80211_T_HT) 6221 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6222 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6223 if (rt->info[i].shortPreamble || 6224 rt->info[i].phy == IEEE80211_T_OFDM) 6225 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6226 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 6227 for (j = 0; j < nitems(blinkrates)-1; j++) 6228 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6229 break; 6230 /* NB: this uses the last entry if the rate isn't found */ 6231 /* XXX beware of overlow */ 6232 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6233 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6234 } 6235 sc->sc_currates = rt; 6236 sc->sc_curmode = mode; 6237 /* 6238 * All protection frames are transmitted at 2Mb/s for 6239 * 11g, otherwise at 1Mb/s. 6240 */ 6241 if (mode == IEEE80211_MODE_11G) 6242 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6243 else 6244 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 6245 /* NB: caller is responsible for resetting rate control state */ 6246 } 6247 6248 static void 6249 ath_watchdog(void *arg) 6250 { 6251 struct ath_softc *sc = arg; 6252 struct ieee80211com *ic = &sc->sc_ic; 6253 int do_reset = 0; 6254 6255 ATH_LOCK_ASSERT(sc); 6256 6257 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 6258 uint32_t hangs; 6259 6260 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6261 6262 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6263 hangs != 0) { 6264 device_printf(sc->sc_dev, "%s hang detected (0x%x)\n", 6265 hangs & 0xff ? "bb" : "mac", hangs); 6266 } else 6267 device_printf(sc->sc_dev, "device timeout\n"); 6268 do_reset = 1; 6269 #if defined(__DragonFly__) 6270 ++ic->ic_oerrors; /* don't care about SMP races */ 6271 #else 6272 counter_u64_add(ic->ic_oerrors, 1); 6273 #endif 6274 sc->sc_stats.ast_watchdog++; 6275 6276 ath_power_restore_power_state(sc); 6277 } 6278 6279 /* 6280 * We can't hold the lock across the ath_reset() call. 6281 * 6282 * And since this routine can't hold a lock and sleep, 6283 * do the reset deferred. 6284 */ 6285 if (do_reset) { 6286 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 6287 } 6288 6289 #if defined(__DragonFly__) 6290 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 6291 #else 6292 callout_schedule(&sc->sc_wd_ch, hz); 6293 #endif 6294 } 6295 6296 static void 6297 ath_parent(struct ieee80211com *ic) 6298 { 6299 struct ath_softc *sc = ic->ic_softc; 6300 int error = EDOOFUS; 6301 6302 ATH_LOCK(sc); 6303 if (ic->ic_nrunning > 0) { 6304 /* 6305 * To avoid rescanning another access point, 6306 * do not call ath_init() here. Instead, 6307 * only reflect promisc mode settings. 6308 */ 6309 if (sc->sc_running) { 6310 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6311 ath_mode_init(sc); 6312 ath_power_restore_power_state(sc); 6313 } else if (!sc->sc_invalid) { 6314 /* 6315 * Beware of being called during attach/detach 6316 * to reset promiscuous mode. In that case we 6317 * will still be marked UP but not RUNNING. 6318 * However trying to re-init the interface 6319 * is the wrong thing to do as we've already 6320 * torn down much of our state. There's 6321 * probably a better way to deal with this. 6322 */ 6323 error = ath_init(sc); 6324 } 6325 } else { 6326 ath_stop(sc); 6327 if (!sc->sc_invalid) 6328 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 6329 } 6330 ATH_UNLOCK(sc); 6331 6332 if (error == 0) { 6333 #ifdef ATH_TX99_DIAG 6334 if (sc->sc_tx99 != NULL) 6335 sc->sc_tx99->start(sc->sc_tx99); 6336 else 6337 #endif 6338 ieee80211_start_all(ic); 6339 } 6340 } 6341 6342 /* 6343 * Announce various information on device/driver attach. 6344 */ 6345 static void 6346 ath_announce(struct ath_softc *sc) 6347 { 6348 struct ath_hal *ah = sc->sc_ah; 6349 6350 device_printf(sc->sc_dev, "%s mac %d.%d RF%s phy %d.%d\n", 6351 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6352 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 6353 device_printf(sc->sc_dev, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 6354 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6355 if (bootverbose) { 6356 int i; 6357 for (i = 0; i <= WME_AC_VO; i++) { 6358 struct ath_txq *txq = sc->sc_ac2q[i]; 6359 device_printf(sc->sc_dev, 6360 "Use hw queue %u for %s traffic\n", 6361 txq->axq_qnum, ieee80211_wme_acnames[i]); 6362 } 6363 device_printf(sc->sc_dev, "Use hw queue %u for CAB traffic\n", 6364 sc->sc_cabq->axq_qnum); 6365 device_printf(sc->sc_dev, "Use hw queue %u for beacons\n", 6366 sc->sc_bhalq); 6367 } 6368 if (ath_rxbuf != ATH_RXBUF) 6369 device_printf(sc->sc_dev, "using %u rx buffers\n", ath_rxbuf); 6370 if (ath_txbuf != ATH_TXBUF) 6371 device_printf(sc->sc_dev, "using %u tx buffers\n", ath_txbuf); 6372 if (sc->sc_mcastkey && bootverbose) 6373 device_printf(sc->sc_dev, "using multicast key search\n"); 6374 } 6375 6376 static void 6377 ath_dfs_tasklet(void *p, int npending) 6378 { 6379 struct ath_softc *sc = (struct ath_softc *) p; 6380 struct ieee80211com *ic = &sc->sc_ic; 6381 6382 /* 6383 * If previous processing has found a radar event, 6384 * signal this to the net80211 layer to begin DFS 6385 * processing. 6386 */ 6387 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 6388 /* DFS event found, initiate channel change */ 6389 /* 6390 * XXX doesn't currently tell us whether the event 6391 * XXX was found in the primary or extension 6392 * XXX channel! 6393 */ 6394 IEEE80211_LOCK(ic); 6395 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6396 IEEE80211_UNLOCK(ic); 6397 } 6398 } 6399 6400 /* 6401 * Enable/disable power save. This must be called with 6402 * no TX driver locks currently held, so it should only 6403 * be called from the RX path (which doesn't hold any 6404 * TX driver locks.) 6405 */ 6406 static void 6407 ath_node_powersave(struct ieee80211_node *ni, int enable) 6408 { 6409 #ifdef ATH_SW_PSQ 6410 struct ath_node *an = ATH_NODE(ni); 6411 struct ieee80211com *ic = ni->ni_ic; 6412 struct ath_softc *sc = ic->ic_softc; 6413 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6414 6415 /* XXX and no TXQ locks should be held here */ 6416 6417 #if defined(__DragonFly__) 6418 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6s: enable=%d\n", 6419 __func__, 6420 ath_hal_ether_sprintf(ni->ni_macaddr), 6421 !! enable); 6422 #else 6423 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n", 6424 __func__, 6425 ni->ni_macaddr, 6426 ":", 6427 !! enable); 6428 #endif 6429 6430 /* Suspend or resume software queue handling */ 6431 if (enable) 6432 ath_tx_node_sleep(sc, an); 6433 else 6434 ath_tx_node_wakeup(sc, an); 6435 6436 /* Update net80211 state */ 6437 avp->av_node_ps(ni, enable); 6438 #else 6439 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6440 6441 /* Update net80211 state */ 6442 avp->av_node_ps(ni, enable); 6443 #endif/* ATH_SW_PSQ */ 6444 } 6445 6446 /* 6447 * Notification from net80211 that the powersave queue state has 6448 * changed. 6449 * 6450 * Since the software queue also may have some frames: 6451 * 6452 * + if the node software queue has frames and the TID state 6453 * is 0, we set the TIM; 6454 * + if the node and the stack are both empty, we clear the TIM bit. 6455 * + If the stack tries to set the bit, always set it. 6456 * + If the stack tries to clear the bit, only clear it if the 6457 * software queue in question is also cleared. 6458 * 6459 * TODO: this is called during node teardown; so let's ensure this 6460 * is all correctly handled and that the TIM bit is cleared. 6461 * It may be that the node flush is called _AFTER_ the net80211 6462 * stack clears the TIM. 6463 * 6464 * Here is the racy part. Since it's possible >1 concurrent, 6465 * overlapping TXes will appear complete with a TX completion in 6466 * another thread, it's possible that the concurrent TIM calls will 6467 * clash. We can't hold the node lock here because setting the 6468 * TIM grabs the net80211 comlock and this may cause a LOR. 6469 * The solution is either to totally serialise _everything_ at 6470 * this point (ie, all TX, completion and any reset/flush go into 6471 * one taskqueue) or a new "ath TIM lock" needs to be created that 6472 * just wraps the driver state change and this call to avp->av_set_tim(). 6473 * 6474 * The same race exists in the net80211 power save queue handling 6475 * as well. Since multiple transmitting threads may queue frames 6476 * into the driver, as well as ps-poll and the driver transmitting 6477 * frames (and thus clearing the psq), it's quite possible that 6478 * a packet entering the PSQ and a ps-poll being handled will 6479 * race, causing the TIM to be cleared and not re-set. 6480 */ 6481 static int 6482 ath_node_set_tim(struct ieee80211_node *ni, int enable) 6483 { 6484 #ifdef ATH_SW_PSQ 6485 struct ieee80211com *ic = ni->ni_ic; 6486 struct ath_softc *sc = ic->ic_softc; 6487 struct ath_node *an = ATH_NODE(ni); 6488 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6489 int changed = 0; 6490 6491 ATH_TX_LOCK(sc); 6492 an->an_stack_psq = enable; 6493 6494 /* 6495 * This will get called for all operating modes, 6496 * even if avp->av_set_tim is unset. 6497 * It's currently set for hostap/ibss modes; but 6498 * the same infrastructure is used for both STA 6499 * and AP/IBSS node power save. 6500 */ 6501 if (avp->av_set_tim == NULL) { 6502 ATH_TX_UNLOCK(sc); 6503 return (0); 6504 } 6505 6506 /* 6507 * If setting the bit, always set it here. 6508 * If clearing the bit, only clear it if the 6509 * software queue is also empty. 6510 * 6511 * If the node has left power save, just clear the TIM 6512 * bit regardless of the state of the power save queue. 6513 * 6514 * XXX TODO: although atomics are used, it's quite possible 6515 * that a race will occur between this and setting/clearing 6516 * in another thread. TX completion will occur always in 6517 * one thread, however setting/clearing the TIM bit can come 6518 * from a variety of different process contexts! 6519 */ 6520 if (enable && an->an_tim_set == 1) { 6521 #if defined(__DragonFly__) 6522 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6523 "%s: %s: enable=%d, tim_set=1, ignoring\n", 6524 __func__, 6525 ath_hal_ether_sprintf(ni->ni_macaddr), 6526 enable); 6527 #else 6528 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6529 "%s: %6D: enable=%d, tim_set=1, ignoring\n", 6530 __func__, 6531 ni->ni_macaddr, 6532 ":", 6533 enable); 6534 #endif 6535 ATH_TX_UNLOCK(sc); 6536 } else if (enable) { 6537 #if defined(__DragonFly__) 6538 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6539 "%s: %s: enable=%d, enabling TIM\n", 6540 __func__, 6541 ath_hal_ether_sprintf(ni->ni_macaddr), 6542 enable); 6543 #else 6544 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6545 "%s: %6D: enable=%d, enabling TIM\n", 6546 __func__, 6547 ni->ni_macaddr, 6548 ":", 6549 enable); 6550 #endif 6551 an->an_tim_set = 1; 6552 ATH_TX_UNLOCK(sc); 6553 changed = avp->av_set_tim(ni, enable); 6554 } else if (an->an_swq_depth == 0) { 6555 /* disable */ 6556 #if defined(__DragonFly__) 6557 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6558 "%s: %s: enable=%d, an_swq_depth == 0, disabling\n", 6559 __func__, 6560 ath_hal_ether_sprintf(ni->ni_macaddr), 6561 enable); 6562 #else 6563 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6564 "%s: %6D: enable=%d, an_swq_depth == 0, disabling\n", 6565 __func__, 6566 ni->ni_macaddr, 6567 ":", 6568 enable); 6569 #endif 6570 an->an_tim_set = 0; 6571 ATH_TX_UNLOCK(sc); 6572 changed = avp->av_set_tim(ni, enable); 6573 } else if (! an->an_is_powersave) { 6574 /* 6575 * disable regardless; the node isn't in powersave now 6576 */ 6577 #if defined(__DragonFly__) 6578 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6579 "%s: %s: enable=%d, an_pwrsave=0, disabling\n", 6580 __func__, 6581 ath_hal_ether_sprintf(ni->ni_macaddr), 6582 enable); 6583 #else 6584 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6585 "%s: %6D: enable=%d, an_pwrsave=0, disabling\n", 6586 __func__, 6587 ni->ni_macaddr, 6588 ":", 6589 enable); 6590 #endif 6591 an->an_tim_set = 0; 6592 ATH_TX_UNLOCK(sc); 6593 changed = avp->av_set_tim(ni, enable); 6594 } else { 6595 /* 6596 * psq disable, node is currently in powersave, node 6597 * software queue isn't empty, so don't clear the TIM bit 6598 * for now. 6599 */ 6600 ATH_TX_UNLOCK(sc); 6601 #if defined(__DragonFly__) 6602 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6603 "%s: %s: enable=%d, an_swq_depth > 0, ignoring\n", 6604 __func__, 6605 ath_hal_ether_sprintf(ni->ni_macaddr), 6606 enable); 6607 #else 6608 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6609 "%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n", 6610 __func__, 6611 ni->ni_macaddr, 6612 ":", 6613 enable); 6614 #endif 6615 changed = 0; 6616 } 6617 6618 return (changed); 6619 #else 6620 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6621 6622 /* 6623 * Some operating modes don't set av_set_tim(), so don't 6624 * update it here. 6625 */ 6626 if (avp->av_set_tim == NULL) 6627 return (0); 6628 6629 return (avp->av_set_tim(ni, enable)); 6630 #endif /* ATH_SW_PSQ */ 6631 } 6632 6633 /* 6634 * Set or update the TIM from the software queue. 6635 * 6636 * Check the software queue depth before attempting to do lock 6637 * anything; that avoids trying to obtain the lock. Then, 6638 * re-check afterwards to ensure nothing has changed in the 6639 * meantime. 6640 * 6641 * set: This is designed to be called from the TX path, after 6642 * a frame has been queued; to see if the swq > 0. 6643 * 6644 * clear: This is designed to be called from the buffer completion point 6645 * (right now it's ath_tx_default_comp()) where the state of 6646 * a software queue has changed. 6647 * 6648 * It makes sense to place it at buffer free / completion rather 6649 * than after each software queue operation, as there's no real 6650 * point in churning the TIM bit as the last frames in the software 6651 * queue are transmitted. If they fail and we retry them, we'd 6652 * just be setting the TIM bit again anyway. 6653 */ 6654 void 6655 ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, 6656 int enable) 6657 { 6658 #ifdef ATH_SW_PSQ 6659 struct ath_node *an; 6660 struct ath_vap *avp; 6661 6662 /* Don't do this for broadcast/etc frames */ 6663 if (ni == NULL) 6664 return; 6665 6666 an = ATH_NODE(ni); 6667 avp = ATH_VAP(ni->ni_vap); 6668 6669 /* 6670 * And for operating modes without the TIM handler set, let's 6671 * just skip those. 6672 */ 6673 if (avp->av_set_tim == NULL) 6674 return; 6675 6676 ATH_TX_LOCK_ASSERT(sc); 6677 6678 if (enable) { 6679 if (an->an_is_powersave && 6680 an->an_tim_set == 0 && 6681 an->an_swq_depth != 0) { 6682 #if defined(__DragonFly__) 6683 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6684 "%s: %s: swq_depth>0, tim_set=0, set!\n", 6685 __func__, 6686 ath_hal_ether_sprintf(ni->ni_macaddr)); 6687 #else 6688 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6689 "%s: %6D: swq_depth>0, tim_set=0, set!\n", 6690 __func__, 6691 ni->ni_macaddr, 6692 ":"); 6693 #endif 6694 an->an_tim_set = 1; 6695 (void) avp->av_set_tim(ni, 1); 6696 } 6697 } else { 6698 /* 6699 * Don't bother grabbing the lock unless the queue is empty. 6700 */ 6701 if (an->an_swq_depth != 0) 6702 return; 6703 6704 if (an->an_is_powersave && 6705 an->an_stack_psq == 0 && 6706 an->an_tim_set == 1 && 6707 an->an_swq_depth == 0) { 6708 #if defined(__DragonFly__) 6709 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6710 "%s: %s: swq_depth=0, tim_set=1, psq_set=0," 6711 " clear!\n", 6712 __func__, 6713 ath_hal_ether_sprintf(ni->ni_macaddr)); 6714 #else 6715 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6716 "%s: %6D: swq_depth=0, tim_set=1, psq_set=0," 6717 " clear!\n", 6718 __func__, 6719 ni->ni_macaddr, 6720 ":"); 6721 #endif 6722 an->an_tim_set = 0; 6723 (void) avp->av_set_tim(ni, 0); 6724 } 6725 } 6726 #else 6727 return; 6728 #endif /* ATH_SW_PSQ */ 6729 } 6730 6731 /* 6732 * Received a ps-poll frame from net80211. 6733 * 6734 * Here we get a chance to serve out a software-queued frame ourselves 6735 * before we punt it to net80211 to transmit us one itself - either 6736 * because there's traffic in the net80211 psq, or a NULL frame to 6737 * indicate there's nothing else. 6738 */ 6739 static void 6740 ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m) 6741 { 6742 #ifdef ATH_SW_PSQ 6743 struct ath_node *an; 6744 struct ath_vap *avp; 6745 struct ieee80211com *ic = ni->ni_ic; 6746 struct ath_softc *sc = ic->ic_softc; 6747 int tid; 6748 6749 /* Just paranoia */ 6750 if (ni == NULL) 6751 return; 6752 6753 /* 6754 * Unassociated (temporary node) station. 6755 */ 6756 if (ni->ni_associd == 0) 6757 return; 6758 6759 /* 6760 * We do have an active node, so let's begin looking into it. 6761 */ 6762 an = ATH_NODE(ni); 6763 avp = ATH_VAP(ni->ni_vap); 6764 6765 /* 6766 * For now, we just call the original ps-poll method. 6767 * Once we're ready to flip this on: 6768 * 6769 * + Set leak to 1, as no matter what we're going to have 6770 * to send a frame; 6771 * + Check the software queue and if there's something in it, 6772 * schedule the highest TID thas has traffic from this node. 6773 * Then make sure we schedule the software scheduler to 6774 * run so it picks up said frame. 6775 * 6776 * That way whatever happens, we'll at least send _a_ frame 6777 * to the given node. 6778 * 6779 * Again, yes, it's crappy QoS if the node has multiple 6780 * TIDs worth of traffic - but let's get it working first 6781 * before we optimise it. 6782 * 6783 * Also yes, there's definitely latency here - we're not 6784 * direct dispatching to the hardware in this path (and 6785 * we're likely being called from the packet receive path, 6786 * so going back into TX may be a little hairy!) but again 6787 * I'd like to get this working first before optimising 6788 * turn-around time. 6789 */ 6790 6791 ATH_TX_LOCK(sc); 6792 6793 /* 6794 * Legacy - we're called and the node isn't asleep. 6795 * Immediately punt. 6796 */ 6797 if (! an->an_is_powersave) { 6798 #if defined(__DragonFly__) 6799 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6800 "%s: %s: not in powersave?\n", 6801 __func__, 6802 ath_hal_ether_sprintf(ni->ni_macaddr)); 6803 #else 6804 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6805 "%s: %6D: not in powersave?\n", 6806 __func__, 6807 ni->ni_macaddr, 6808 ":"); 6809 #endif 6810 ATH_TX_UNLOCK(sc); 6811 avp->av_recv_pspoll(ni, m); 6812 return; 6813 } 6814 6815 /* 6816 * We're in powersave. 6817 * 6818 * Leak a frame. 6819 */ 6820 an->an_leak_count = 1; 6821 6822 /* 6823 * Now, if there's no frames in the node, just punt to 6824 * recv_pspoll. 6825 * 6826 * Don't bother checking if the TIM bit is set, we really 6827 * only care if there are any frames here! 6828 */ 6829 if (an->an_swq_depth == 0) { 6830 ATH_TX_UNLOCK(sc); 6831 #if defined(__DragonFly__) 6832 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6833 "%s: %s: SWQ empty; punting to net80211\n", 6834 __func__, 6835 ath_hal_ether_sprintf(ni->ni_macaddr)); 6836 #else 6837 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6838 "%s: %6D: SWQ empty; punting to net80211\n", 6839 __func__, 6840 ni->ni_macaddr, 6841 ":"); 6842 #endif 6843 avp->av_recv_pspoll(ni, m); 6844 return; 6845 } 6846 6847 /* 6848 * Ok, let's schedule the highest TID that has traffic 6849 * and then schedule something. 6850 */ 6851 for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) { 6852 struct ath_tid *atid = &an->an_tid[tid]; 6853 /* 6854 * No frames? Skip. 6855 */ 6856 if (atid->axq_depth == 0) 6857 continue; 6858 ath_tx_tid_sched(sc, atid); 6859 /* 6860 * XXX we could do a direct call to the TXQ 6861 * scheduler code here to optimise latency 6862 * at the expense of a REALLY deep callstack. 6863 */ 6864 ATH_TX_UNLOCK(sc); 6865 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 6866 #if defined(__DragonFly__) 6867 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6868 "%s: %s: leaking frame to TID %d\n", 6869 __func__, 6870 ath_hal_ether_sprintf(ni->ni_macaddr), 6871 tid); 6872 #else 6873 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6874 "%s: %6D: leaking frame to TID %d\n", 6875 __func__, 6876 ni->ni_macaddr, 6877 ":", 6878 tid); 6879 #endif 6880 return; 6881 } 6882 6883 ATH_TX_UNLOCK(sc); 6884 6885 /* 6886 * XXX nothing in the TIDs at this point? Eek. 6887 */ 6888 #if defined(__DragonFly__) 6889 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6890 "%s: %s: TIDs empty, but ath_node showed traffic?!\n", 6891 __func__, 6892 ath_hal_ether_sprintf(ni->ni_macaddr)); 6893 #else 6894 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6895 "%s: %6D: TIDs empty, but ath_node showed traffic?!\n", 6896 __func__, 6897 ni->ni_macaddr, 6898 ":"); 6899 #endif 6900 avp->av_recv_pspoll(ni, m); 6901 #else 6902 avp->av_recv_pspoll(ni, m); 6903 #endif /* ATH_SW_PSQ */ 6904 } 6905 6906 MODULE_VERSION(if_ath, 1); 6907 MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 6908 #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ) 6909 MODULE_DEPEND(if_ath, alq, 1, 1, 1); 6910 #endif 6911