1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 32 /* 33 * Driver for the Atheros Wireless LAN controller. 34 * 35 * This software is derived from work of Atsushi Onoe; his contribution 36 * is greatly appreciated. 37 */ 38 39 #include "opt_inet.h" 40 #include "opt_ath.h" 41 /* 42 * This is needed for register operations which are performed 43 * by the driver - eg, calls to ath_hal_gettsf32(). 44 * 45 * It's also required for any AH_DEBUG checks in here, eg the 46 * module dependencies. 47 */ 48 #include "opt_ah.h" 49 #include "opt_wlan.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/sysctl.h> 54 #include <sys/mbuf.h> 55 #include <sys/malloc.h> 56 #include <sys/lock.h> 57 #include <sys/mutex.h> 58 #include <sys/kernel.h> 59 #include <sys/socket.h> 60 #include <sys/sockio.h> 61 #include <sys/errno.h> 62 #include <sys/callout.h> 63 #include <sys/bus.h> 64 #include <sys/endian.h> 65 #include <sys/kthread.h> 66 #include <sys/taskqueue.h> 67 #include <sys/priv.h> 68 #include <sys/module.h> 69 #include <sys/ktr.h> 70 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 #include <net/if_types.h> 76 #include <net/if_arp.h> 77 #include <net/ethernet.h> 78 #include <net/if_llc.h> 79 #include <net/ifq_var.h> 80 81 #include <netproto/802_11/ieee80211_var.h> 82 #include <netproto/802_11/ieee80211_regdomain.h> 83 #ifdef IEEE80211_SUPPORT_SUPERG 84 #include <netproto/802_11/ieee80211_superg.h> 85 #endif 86 #ifdef IEEE80211_SUPPORT_TDMA 87 #include <netproto/802_11/ieee80211_tdma.h> 88 #endif 89 90 #include <net/bpf.h> 91 92 #ifdef INET 93 #include <netinet/in.h> 94 #include <netinet/if_ether.h> 95 #endif 96 97 #include <dev/netif/ath/ath/if_athvar.h> 98 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 99 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 100 101 #include <dev/netif/ath/ath/if_ath_debug.h> 102 #include <dev/netif/ath/ath/if_ath_misc.h> 103 #include <dev/netif/ath/ath/if_ath_tsf.h> 104 #include <dev/netif/ath/ath/if_ath_tx.h> 105 #include <dev/netif/ath/ath/if_ath_sysctl.h> 106 #include <dev/netif/ath/ath/if_ath_led.h> 107 #include <dev/netif/ath/ath/if_ath_keycache.h> 108 #include <dev/netif/ath/ath/if_ath_rx.h> 109 #include <dev/netif/ath/ath/if_ath_rx_edma.h> 110 #include <dev/netif/ath/ath/if_ath_tx_edma.h> 111 #include <dev/netif/ath/ath/if_ath_beacon.h> 112 #include <dev/netif/ath/ath/if_ath_btcoex.h> 113 #include <dev/netif/ath/ath/if_ath_spectral.h> 114 #include <dev/netif/ath/ath/if_ath_lna_div.h> 115 #include <dev/netif/ath/ath/if_athdfs.h> 116 117 #ifdef ATH_TX99_DIAG 118 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 119 #endif 120 121 #ifdef ATH_DEBUG_ALQ 122 #include <dev/netif/ath/ath/if_ath_alq.h> 123 #endif 124 125 /* 126 * Only enable this if you're working on PS-POLL support. 127 */ 128 #define ATH_SW_PSQ 129 130 #ifdef __DragonFly__ 131 #define CURVNET_SET(name) 132 #define CURVNET_RESTORE() 133 #endif 134 135 /* 136 * ATH_BCBUF determines the number of vap's that can transmit 137 * beacons and also (currently) the number of vap's that can 138 * have unique mac addresses/bssid. When staggering beacons 139 * 4 is probably a good max as otherwise the beacons become 140 * very closely spaced and there is limited time for cab q traffic 141 * to go out. You can burst beacons instead but that is not good 142 * for stations in power save and at some point you really want 143 * another radio (and channel). 144 * 145 * The limit on the number of mac addresses is tied to our use of 146 * the U/L bit and tracking addresses in a byte; it would be 147 * worthwhile to allow more for applications like proxy sta. 148 */ 149 CTASSERT(ATH_BCBUF <= 8); 150 151 static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 152 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 153 const uint8_t [IEEE80211_ADDR_LEN], 154 const uint8_t [IEEE80211_ADDR_LEN]); 155 static void ath_vap_delete(struct ieee80211vap *); 156 static void ath_init(void *); 157 static void ath_stop_locked(struct ifnet *); 158 static void ath_stop(struct ifnet *); 159 static int ath_reset_vap(struct ieee80211vap *, u_long); 160 static int ath_transmit(struct ifnet *ifp, struct mbuf *m); 161 #if 0 162 static void ath_qflush(struct ifnet *ifp); 163 #endif 164 static int ath_media_change(struct ifnet *); 165 static void ath_watchdog(void *); 166 static void ath_start(struct ifnet *, struct ifaltq_subque *); 167 static int ath_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 168 static void ath_fatal_proc(void *, int); 169 static void ath_bmiss_vap(struct ieee80211vap *); 170 static void ath_bmiss_proc(void *, int); 171 static void ath_key_update_begin(struct ieee80211vap *); 172 static void ath_key_update_end(struct ieee80211vap *); 173 static void ath_update_mcast(struct ifnet *); 174 static void ath_update_promisc(struct ifnet *); 175 static void ath_updateslot(struct ifnet *); 176 static void ath_bstuck_proc(void *, int); 177 static void ath_reset_proc(void *, int); 178 static int ath_desc_alloc(struct ath_softc *); 179 static void ath_desc_free(struct ath_softc *); 180 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 181 const uint8_t [IEEE80211_ADDR_LEN]); 182 static void ath_node_cleanup(struct ieee80211_node *); 183 static void ath_node_free(struct ieee80211_node *); 184 static void ath_node_getsignal(const struct ieee80211_node *, 185 int8_t *, int8_t *); 186 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 187 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 188 static int ath_tx_setup(struct ath_softc *, int, int); 189 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 190 static void ath_tx_cleanup(struct ath_softc *); 191 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, 192 int dosched); 193 static void ath_tx_proc_q0(void *, int); 194 static void ath_tx_proc_q0123(void *, int); 195 static void ath_tx_proc(void *, int); 196 static void ath_txq_sched_tasklet(void *, int); 197 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 198 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 199 static void ath_scan_start(struct ieee80211com *); 200 static void ath_scan_end(struct ieee80211com *); 201 static void ath_set_channel(struct ieee80211com *); 202 #ifdef ATH_ENABLE_11N 203 static void ath_update_chw(struct ieee80211com *); 204 #endif /* ATH_ENABLE_11N */ 205 static void ath_calibrate(void *); 206 static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 207 static void ath_setup_stationkey(struct ieee80211_node *); 208 static void ath_newassoc(struct ieee80211_node *, int); 209 static int ath_setregdomain(struct ieee80211com *, 210 struct ieee80211_regdomain *, int, 211 struct ieee80211_channel []); 212 static void ath_getradiocaps(struct ieee80211com *, int, int *, 213 struct ieee80211_channel []); 214 static int ath_getchannels(struct ath_softc *); 215 216 static int ath_rate_setup(struct ath_softc *, u_int mode); 217 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 218 219 static void ath_announce(struct ath_softc *); 220 221 static void ath_dfs_tasklet(void *, int); 222 #if 0 223 static void ath_node_powersave(struct ieee80211_node *, int); 224 static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *); 225 #endif 226 static int ath_node_set_tim(struct ieee80211_node *, int); 227 228 #ifdef IEEE80211_SUPPORT_TDMA 229 #include <dev/netif/ath/ath/if_ath_tdma.h> 230 #endif 231 232 extern const char* ath_hal_ether_sprintf(const u_int8_t *mac); 233 234 SYSCTL_DECL(_hw_ath); 235 236 /* XXX validate sysctl values */ 237 static int ath_longcalinterval = 30; /* long cals every 30 secs */ 238 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 239 0, "long chip calibration interval (secs)"); 240 static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 241 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 242 0, "short chip calibration interval (msecs)"); 243 static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 244 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 245 0, "reset chip calibration results (secs)"); 246 static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 247 SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 248 0, "ANI calibration (msecs)"); 249 250 int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 251 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 252 0, "rx buffers allocated"); 253 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 254 int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 255 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 256 0, "tx buffers allocated"); 257 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 258 int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 259 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt, 260 0, "tx (mgmt) buffers allocated"); 261 TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); 262 263 int ath_bstuck_threshold = 4; /* max missed beacons */ 264 SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 265 0, "max missed beacon xmits before chip reset"); 266 267 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 268 269 void 270 ath_legacy_attach_comp_func(struct ath_softc *sc) 271 { 272 273 /* 274 * Special case certain configurations. Note the 275 * CAB queue is handled by these specially so don't 276 * include them when checking the txq setup mask. 277 */ 278 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 279 case 0x01: 280 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 281 break; 282 case 0x0f: 283 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 284 break; 285 default: 286 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 287 break; 288 } 289 } 290 291 /* 292 * Set the target power mode. 293 * 294 * If this is called during a point in time where 295 * the hardware is being programmed elsewhere, it will 296 * simply store it away and update it when all current 297 * uses of the hardware are completed. 298 */ 299 void 300 _ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line) 301 { 302 ATH_LOCK_ASSERT(sc); 303 304 sc->sc_target_powerstate = power_state; 305 306 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 307 __func__, 308 file, 309 line, 310 power_state, 311 sc->sc_powersave_refcnt); 312 313 if (sc->sc_powersave_refcnt == 0 && 314 power_state != sc->sc_cur_powerstate) { 315 sc->sc_cur_powerstate = power_state; 316 ath_hal_setpower(sc->sc_ah, power_state); 317 318 /* 319 * If the NIC is force-awake, then set the 320 * self-gen frame state appropriately. 321 * 322 * If the nic is in network sleep or full-sleep, 323 * we let the above call leave the self-gen 324 * state as "sleep". 325 */ 326 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 327 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 328 ath_hal_setselfgenpower(sc->sc_ah, 329 sc->sc_target_selfgen_state); 330 } 331 } 332 } 333 334 /* 335 * Set the current self-generated frames state. 336 * 337 * This is separate from the target power mode. The chip may be 338 * awake but the desired state is "sleep", so frames sent to the 339 * destination has PWRMGT=1 in the 802.11 header. The NIC also 340 * needs to know to set PWRMGT=1 in self-generated frames. 341 */ 342 void 343 _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line) 344 { 345 346 ATH_LOCK_ASSERT(sc); 347 348 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 349 __func__, 350 file, 351 line, 352 power_state, 353 sc->sc_target_selfgen_state); 354 355 sc->sc_target_selfgen_state = power_state; 356 357 /* 358 * If the NIC is force-awake, then set the power state. 359 * Network-state and full-sleep will already transition it to 360 * mark self-gen frames as sleeping - and we can't 361 * guarantee the NIC is awake to program the self-gen frame 362 * setting anyway. 363 */ 364 if (sc->sc_cur_powerstate == HAL_PM_AWAKE) { 365 ath_hal_setselfgenpower(sc->sc_ah, power_state); 366 } 367 } 368 369 /* 370 * Set the hardware power mode and take a reference. 371 * 372 * This doesn't update the target power mode in the driver; 373 * it just updates the hardware power state. 374 * 375 * XXX it should only ever force the hardware awake; it should 376 * never be called to set it asleep. 377 */ 378 void 379 _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line) 380 { 381 ATH_LOCK_ASSERT(sc); 382 383 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 384 __func__, 385 file, 386 line, 387 power_state, 388 sc->sc_powersave_refcnt); 389 390 sc->sc_powersave_refcnt++; 391 392 if (power_state != sc->sc_cur_powerstate) { 393 ath_hal_setpower(sc->sc_ah, power_state); 394 sc->sc_cur_powerstate = power_state; 395 396 /* 397 * Adjust the self-gen powerstate if appropriate. 398 */ 399 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 400 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 401 ath_hal_setselfgenpower(sc->sc_ah, 402 sc->sc_target_selfgen_state); 403 } 404 405 } 406 } 407 408 /* 409 * Restore the power save mode to what it once was. 410 * 411 * This will decrement the reference counter and once it hits 412 * zero, it'll restore the powersave state. 413 */ 414 void 415 _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line) 416 { 417 418 ATH_LOCK_ASSERT(sc); 419 420 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n", 421 __func__, 422 file, 423 line, 424 sc->sc_powersave_refcnt, 425 sc->sc_target_powerstate); 426 427 if (sc->sc_powersave_refcnt == 0) 428 device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__); 429 else 430 sc->sc_powersave_refcnt--; 431 432 if (sc->sc_powersave_refcnt == 0 && 433 sc->sc_target_powerstate != sc->sc_cur_powerstate) { 434 sc->sc_cur_powerstate = sc->sc_target_powerstate; 435 ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate); 436 } 437 438 /* 439 * Adjust the self-gen powerstate if appropriate. 440 */ 441 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 442 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 443 ath_hal_setselfgenpower(sc->sc_ah, 444 sc->sc_target_selfgen_state); 445 } 446 447 } 448 449 #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 450 #define HAL_MODE_HT40 \ 451 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 452 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 453 int 454 ath_attach(u_int16_t devid, struct ath_softc *sc) 455 { 456 struct ifnet *ifp; 457 struct ieee80211com *ic; 458 struct ath_hal *ah = NULL; 459 HAL_STATUS status; 460 int error = 0, i; 461 u_int wmodes; 462 uint8_t macaddr[IEEE80211_ADDR_LEN]; 463 int rx_chainmask, tx_chainmask; 464 465 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 466 467 CURVNET_SET(vnet0); 468 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 469 if (ifp == NULL) { 470 device_printf(sc->sc_dev, "can not if_alloc()\n"); 471 error = ENOSPC; 472 CURVNET_RESTORE(); 473 goto bad; 474 } 475 ic = ifp->if_l2com; 476 477 /* set these up early for if_printf use */ 478 if_initname(ifp, device_get_name(sc->sc_dev), 479 device_get_unit(sc->sc_dev)); 480 CURVNET_RESTORE(); 481 482 sc->sc_rxfifo_state = ATH_RXFIFO_RESET; 483 484 /* prepare sysctl tree for use in sub modules */ 485 sysctl_ctx_init(&sc->sc_sysctl_ctx); 486 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 487 SYSCTL_STATIC_CHILDREN(_hw), 488 OID_AUTO, 489 device_get_nameunit(sc->sc_dev), 490 CTLFLAG_RD, 0, ""); 491 492 493 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 494 sc->sc_eepromdata, &status); 495 if (ah == NULL) { 496 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 497 status); 498 error = ENXIO; 499 goto bad; 500 } 501 sc->sc_ah = ah; 502 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 503 #ifdef ATH_DEBUG 504 sc->sc_debug = ath_debug; 505 #endif 506 507 /* 508 * Setup the DMA/EDMA functions based on the current 509 * hardware support. 510 * 511 * This is required before the descriptors are allocated. 512 */ 513 if (ath_hal_hasedma(sc->sc_ah)) { 514 sc->sc_isedma = 1; 515 ath_recv_setup_edma(sc); 516 ath_xmit_setup_edma(sc); 517 } else { 518 ath_recv_setup_legacy(sc); 519 ath_xmit_setup_legacy(sc); 520 } 521 522 if (ath_hal_hasmybeacon(sc->sc_ah)) { 523 sc->sc_do_mybeacon = 1; 524 } 525 526 /* 527 * Check if the MAC has multi-rate retry support. 528 * We do this by trying to setup a fake extended 529 * descriptor. MAC's that don't have support will 530 * return false w/o doing anything. MAC's that do 531 * support it will return true w/o doing anything. 532 */ 533 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 534 535 /* 536 * Check if the device has hardware counters for PHY 537 * errors. If so we need to enable the MIB interrupt 538 * so we can act on stat triggers. 539 */ 540 if (ath_hal_hwphycounters(ah)) 541 sc->sc_needmib = 1; 542 543 /* 544 * Get the hardware key cache size. 545 */ 546 sc->sc_keymax = ath_hal_keycachesize(ah); 547 if (sc->sc_keymax > ATH_KEYMAX) { 548 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 549 ATH_KEYMAX, sc->sc_keymax); 550 sc->sc_keymax = ATH_KEYMAX; 551 } 552 /* 553 * Reset the key cache since some parts do not 554 * reset the contents on initial power up. 555 */ 556 for (i = 0; i < sc->sc_keymax; i++) 557 ath_hal_keyreset(ah, i); 558 559 /* 560 * Collect the default channel list. 561 */ 562 error = ath_getchannels(sc); 563 if (error != 0) 564 goto bad; 565 566 /* 567 * Setup rate tables for all potential media types. 568 */ 569 ath_rate_setup(sc, IEEE80211_MODE_11A); 570 ath_rate_setup(sc, IEEE80211_MODE_11B); 571 ath_rate_setup(sc, IEEE80211_MODE_11G); 572 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 573 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 574 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 575 ath_rate_setup(sc, IEEE80211_MODE_11NA); 576 ath_rate_setup(sc, IEEE80211_MODE_11NG); 577 ath_rate_setup(sc, IEEE80211_MODE_HALF); 578 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 579 580 /* NB: setup here so ath_rate_update is happy */ 581 ath_setcurmode(sc, IEEE80211_MODE_11A); 582 583 /* 584 * Allocate TX descriptors and populate the lists. 585 */ 586 wlan_assert_serialized(); 587 wlan_serialize_exit(); 588 error = ath_desc_alloc(sc); 589 wlan_serialize_enter(); 590 if (error != 0) { 591 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 592 error); 593 goto bad; 594 } 595 error = ath_txdma_setup(sc); 596 if (error != 0) { 597 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 598 error); 599 goto bad; 600 } 601 602 /* 603 * Allocate RX descriptors and populate the lists. 604 */ 605 error = ath_rxdma_setup(sc); 606 if (error != 0) { 607 if_printf(ifp, "failed to allocate RX descriptors: %d\n", 608 error); 609 goto bad; 610 } 611 612 callout_init_mp(&sc->sc_cal_ch); 613 callout_init_mp(&sc->sc_wd_ch); 614 615 ATH_TXBUF_LOCK_INIT(sc); 616 617 sc->sc_tq = taskqueue_create("ath_taskq", M_INTWAIT, 618 taskqueue_thread_enqueue, &sc->sc_tq); 619 taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1, 620 "%s taskq", ifp->if_xname); 621 622 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 623 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 624 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 625 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 626 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); 627 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); 628 629 /* 630 * Allocate hardware transmit queues: one queue for 631 * beacon frames and one data queue for each QoS 632 * priority. Note that the hal handles resetting 633 * these queues at the needed time. 634 * 635 * XXX PS-Poll 636 */ 637 sc->sc_bhalq = ath_beaconq_setup(sc); 638 if (sc->sc_bhalq == (u_int) -1) { 639 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 640 error = EIO; 641 goto bad2; 642 } 643 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 644 if (sc->sc_cabq == NULL) { 645 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 646 error = EIO; 647 goto bad2; 648 } 649 /* NB: insure BK queue is the lowest priority h/w queue */ 650 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 651 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 652 ieee80211_wme_acnames[WME_AC_BK]); 653 error = EIO; 654 goto bad2; 655 } 656 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 657 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 658 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 659 /* 660 * Not enough hardware tx queues to properly do WME; 661 * just punt and assign them all to the same h/w queue. 662 * We could do a better job of this if, for example, 663 * we allocate queues when we switch from station to 664 * AP mode. 665 */ 666 if (sc->sc_ac2q[WME_AC_VI] != NULL) 667 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 668 if (sc->sc_ac2q[WME_AC_BE] != NULL) 669 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 670 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 671 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 672 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 673 } 674 675 /* 676 * Attach the TX completion function. 677 * 678 * The non-EDMA chips may have some special case optimisations; 679 * this method gives everyone a chance to attach cleanly. 680 */ 681 sc->sc_tx.xmit_attach_comp_func(sc); 682 683 /* 684 * Setup rate control. Some rate control modules 685 * call back to change the anntena state so expose 686 * the necessary entry points. 687 * XXX maybe belongs in struct ath_ratectrl? 688 */ 689 sc->sc_setdefantenna = ath_setdefantenna; 690 sc->sc_rc = ath_rate_attach(sc); 691 if (sc->sc_rc == NULL) { 692 error = EIO; 693 goto bad2; 694 } 695 696 /* Attach DFS module */ 697 if (! ath_dfs_attach(sc)) { 698 device_printf(sc->sc_dev, 699 "%s: unable to attach DFS\n", __func__); 700 error = EIO; 701 goto bad2; 702 } 703 704 /* Attach spectral module */ 705 if (ath_spectral_attach(sc) < 0) { 706 device_printf(sc->sc_dev, 707 "%s: unable to attach spectral\n", __func__); 708 error = EIO; 709 goto bad2; 710 } 711 712 /* Attach bluetooth coexistence module */ 713 if (ath_btcoex_attach(sc) < 0) { 714 device_printf(sc->sc_dev, 715 "%s: unable to attach bluetooth coexistence\n", __func__); 716 error = EIO; 717 goto bad2; 718 } 719 720 /* Attach LNA diversity module */ 721 if (ath_lna_div_attach(sc) < 0) { 722 device_printf(sc->sc_dev, 723 "%s: unable to attach LNA diversity\n", __func__); 724 error = EIO; 725 goto bad2; 726 } 727 728 /* Start DFS processing tasklet */ 729 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 730 731 /* Configure LED state */ 732 sc->sc_blinking = 0; 733 sc->sc_ledstate = 1; 734 sc->sc_ledon = 0; /* low true */ 735 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 736 callout_init_mp(&sc->sc_ledtimer); 737 738 /* 739 * Don't setup hardware-based blinking. 740 * 741 * Although some NICs may have this configured in the 742 * default reset register values, the user may wish 743 * to alter which pins have which function. 744 * 745 * The reference driver attaches the MAC network LED to GPIO1 and 746 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 747 * NIC has these reversed. 748 */ 749 sc->sc_hardled = (1 == 0); 750 sc->sc_led_net_pin = -1; 751 sc->sc_led_pwr_pin = -1; 752 /* 753 * Auto-enable soft led processing for IBM cards and for 754 * 5211 minipci cards. Users can also manually enable/disable 755 * support with a sysctl. 756 */ 757 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 758 ath_led_config(sc); 759 ath_hal_setledstate(ah, HAL_LED_INIT); 760 761 ifp->if_softc = sc; 762 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 763 #if 0 764 ifp->if_transmit = ath_transmit; 765 ifp->if_qflush = ath_qflush; 766 #endif 767 ifp->if_start = ath_start; 768 ifp->if_ioctl = ath_ioctl; 769 ifp->if_init = ath_init; 770 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN); 771 #if 0 772 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 773 IFQ_SET_READY(&ifp->if_snd); 774 #endif 775 776 ic->ic_ifp = ifp; 777 /* XXX not right but it's not used anywhere important */ 778 ic->ic_phytype = IEEE80211_T_OFDM; 779 ic->ic_opmode = IEEE80211_M_STA; 780 ic->ic_caps = 781 IEEE80211_C_STA /* station mode */ 782 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 783 | IEEE80211_C_HOSTAP /* hostap mode */ 784 | IEEE80211_C_MONITOR /* monitor mode */ 785 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 786 | IEEE80211_C_WDS /* 4-address traffic works */ 787 | IEEE80211_C_MBSS /* mesh point link mode */ 788 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 789 | IEEE80211_C_SHSLOT /* short slot time supported */ 790 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 791 #ifndef ATH_ENABLE_11N 792 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 793 #endif 794 | IEEE80211_C_TXFRAG /* handle tx frags */ 795 #ifdef ATH_ENABLE_DFS 796 | IEEE80211_C_DFS /* Enable radar detection */ 797 #endif 798 | IEEE80211_C_PMGT /* Station side power mgmt */ 799 | IEEE80211_C_SWSLEEP 800 ; 801 /* 802 * Query the hal to figure out h/w crypto support. 803 */ 804 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 805 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 806 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 807 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 808 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 809 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 810 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 811 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 812 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 813 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 814 /* 815 * Check if h/w does the MIC and/or whether the 816 * separate key cache entries are required to 817 * handle both tx+rx MIC keys. 818 */ 819 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 820 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 821 /* 822 * If the h/w supports storing tx+rx MIC keys 823 * in one cache slot automatically enable use. 824 */ 825 if (ath_hal_hastkipsplit(ah) || 826 !ath_hal_settkipsplit(ah, AH_FALSE)) 827 sc->sc_splitmic = 1; 828 /* 829 * If the h/w can do TKIP MIC together with WME then 830 * we use it; otherwise we force the MIC to be done 831 * in software by the net80211 layer. 832 */ 833 if (ath_hal_haswmetkipmic(ah)) 834 sc->sc_wmetkipmic = 1; 835 } 836 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 837 /* 838 * Check for multicast key search support. 839 */ 840 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 841 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 842 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 843 } 844 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 845 /* 846 * Mark key cache slots associated with global keys 847 * as in use. If we knew TKIP was not to be used we 848 * could leave the +32, +64, and +32+64 slots free. 849 */ 850 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 851 setbit(sc->sc_keymap, i); 852 setbit(sc->sc_keymap, i+64); 853 if (sc->sc_splitmic) { 854 setbit(sc->sc_keymap, i+32); 855 setbit(sc->sc_keymap, i+32+64); 856 } 857 } 858 /* 859 * TPC support can be done either with a global cap or 860 * per-packet support. The latter is not available on 861 * all parts. We're a bit pedantic here as all parts 862 * support a global cap. 863 */ 864 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 865 ic->ic_caps |= IEEE80211_C_TXPMGT; 866 867 /* 868 * Mark WME capability only if we have sufficient 869 * hardware queues to do proper priority scheduling. 870 */ 871 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 872 ic->ic_caps |= IEEE80211_C_WME; 873 /* 874 * Check for misc other capabilities. 875 */ 876 if (ath_hal_hasbursting(ah)) 877 ic->ic_caps |= IEEE80211_C_BURST; 878 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 879 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 880 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 881 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 882 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 883 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah); 884 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah); 885 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah); 886 887 if (ath_hal_hasfastframes(ah)) 888 ic->ic_caps |= IEEE80211_C_FF; 889 wmodes = ath_hal_getwirelessmodes(ah); 890 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 891 ic->ic_caps |= IEEE80211_C_TURBOP; 892 #ifdef IEEE80211_SUPPORT_TDMA 893 if (ath_hal_macversion(ah) > 0x78) { 894 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 895 ic->ic_tdma_update = ath_tdma_update; 896 } 897 #endif 898 899 /* 900 * TODO: enforce that at least this many frames are available 901 * in the txbuf list before allowing data frames (raw or 902 * otherwise) to be transmitted. 903 */ 904 sc->sc_txq_data_minfree = 10; 905 /* 906 * Leave this as default to maintain legacy behaviour. 907 * Shortening the cabq/mcastq may end up causing some 908 * undesirable behaviour. 909 */ 910 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 911 912 /* 913 * How deep can the node software TX queue get whilst it's asleep. 914 */ 915 sc->sc_txq_node_psq_maxdepth = 16; 916 917 /* 918 * Default the maximum queue depth for a given node 919 * to 1/4'th the TX buffers, or 64, whichever 920 * is larger. 921 */ 922 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4); 923 924 /* Enable CABQ by default */ 925 sc->sc_cabq_enable = 1; 926 927 /* 928 * Allow the TX and RX chainmasks to be overridden by 929 * environment variables and/or device.hints. 930 * 931 * This must be done early - before the hardware is 932 * calibrated or before the 802.11n stream calculation 933 * is done. 934 */ 935 if (resource_int_value(device_get_name(sc->sc_dev), 936 device_get_unit(sc->sc_dev), "rx_chainmask", 937 &rx_chainmask) == 0) { 938 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 939 rx_chainmask); 940 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 941 } 942 if (resource_int_value(device_get_name(sc->sc_dev), 943 device_get_unit(sc->sc_dev), "tx_chainmask", 944 &tx_chainmask) == 0) { 945 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 946 tx_chainmask); 947 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 948 } 949 950 /* 951 * Query the TX/RX chainmask configuration. 952 * 953 * This is only relevant for 11n devices. 954 */ 955 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 956 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 957 958 /* 959 * Disable MRR with protected frames by default. 960 * Only 802.11n series NICs can handle this. 961 */ 962 sc->sc_mrrprot = 0; /* XXX should be a capability */ 963 964 /* 965 * Query the enterprise mode information the HAL. 966 */ 967 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0, 968 &sc->sc_ent_cfg) == HAL_OK) 969 sc->sc_use_ent = 1; 970 971 #ifdef ATH_ENABLE_11N 972 /* 973 * Query HT capabilities 974 */ 975 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 976 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 977 uint32_t rxs, txs; 978 979 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 980 981 sc->sc_mrrprot = 1; /* XXX should be a capability */ 982 983 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 984 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 985 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 986 | IEEE80211_HTCAP_MAXAMSDU_3839 987 /* max A-MSDU length */ 988 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 989 990 /* 991 * Enable short-GI for HT20 only if the hardware 992 * advertises support. 993 * Notably, anything earlier than the AR9287 doesn't. 994 */ 995 if ((ath_hal_getcapability(ah, 996 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 997 (wmodes & HAL_MODE_HT20)) { 998 device_printf(sc->sc_dev, 999 "[HT] enabling short-GI in 20MHz mode\n"); 1000 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 1001 } 1002 1003 if (wmodes & HAL_MODE_HT40) 1004 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 1005 | IEEE80211_HTCAP_SHORTGI40; 1006 1007 /* 1008 * TX/RX streams need to be taken into account when 1009 * negotiating which MCS rates it'll receive and 1010 * what MCS rates are available for TX. 1011 */ 1012 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 1013 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 1014 ic->ic_txstream = txs; 1015 ic->ic_rxstream = rxs; 1016 1017 /* 1018 * Setup TX and RX STBC based on what the HAL allows and 1019 * the currently configured chainmask set. 1020 * Ie - don't enable STBC TX if only one chain is enabled. 1021 * STBC RX is fine on a single RX chain; it just won't 1022 * provide any real benefit. 1023 */ 1024 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0, 1025 NULL) == HAL_OK) { 1026 sc->sc_rx_stbc = 1; 1027 device_printf(sc->sc_dev, 1028 "[HT] 1 stream STBC receive enabled\n"); 1029 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM; 1030 } 1031 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0, 1032 NULL) == HAL_OK) { 1033 sc->sc_tx_stbc = 1; 1034 device_printf(sc->sc_dev, 1035 "[HT] 1 stream STBC transmit enabled\n"); 1036 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC; 1037 } 1038 1039 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 1040 &sc->sc_rts_aggr_limit); 1041 if (sc->sc_rts_aggr_limit != (64 * 1024)) 1042 device_printf(sc->sc_dev, 1043 "[HT] RTS aggregates limited to %d KiB\n", 1044 sc->sc_rts_aggr_limit / 1024); 1045 1046 device_printf(sc->sc_dev, 1047 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 1048 } 1049 #endif 1050 1051 /* 1052 * Initial aggregation settings. 1053 */ 1054 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH; 1055 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH; 1056 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1057 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1058 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE; 1059 sc->sc_delim_min_pad = 0; 1060 1061 /* 1062 * Check if the hardware requires PCI register serialisation. 1063 * Some of the Owl based MACs require this. 1064 */ 1065 if (ncpus > 1 && 1066 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 1067 0, NULL) == HAL_OK) { 1068 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 1069 device_printf(sc->sc_dev, 1070 "Enabling register serialisation\n"); 1071 } 1072 1073 /* 1074 * Initialise the deferred completed RX buffer list. 1075 */ 1076 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 1077 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 1078 1079 /* 1080 * Indicate we need the 802.11 header padded to a 1081 * 32-bit boundary for 4-address and QoS frames. 1082 */ 1083 ic->ic_flags |= IEEE80211_F_DATAPAD; 1084 1085 /* 1086 * Query the hal about antenna support. 1087 */ 1088 sc->sc_defant = ath_hal_getdefantenna(ah); 1089 1090 /* 1091 * Not all chips have the VEOL support we want to 1092 * use with IBSS beacons; check here for it. 1093 */ 1094 sc->sc_hasveol = ath_hal_hasveol(ah); 1095 1096 /* get mac address from hardware */ 1097 ath_hal_getmac(ah, macaddr); 1098 if (sc->sc_hasbmask) 1099 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 1100 1101 /* NB: used to size node table key mapping array */ 1102 ic->ic_max_keyix = sc->sc_keymax; 1103 /* call MI attach routine. */ 1104 ieee80211_ifattach(ic, macaddr); 1105 ic->ic_setregdomain = ath_setregdomain; 1106 ic->ic_getradiocaps = ath_getradiocaps; 1107 sc->sc_opmode = HAL_M_STA; 1108 1109 /* override default methods */ 1110 ic->ic_newassoc = ath_newassoc; 1111 ic->ic_updateslot = ath_updateslot; 1112 ic->ic_wme.wme_update = ath_wme_update; 1113 ic->ic_vap_create = ath_vap_create; 1114 ic->ic_vap_delete = ath_vap_delete; 1115 ic->ic_raw_xmit = ath_raw_xmit; 1116 ic->ic_update_mcast = ath_update_mcast; 1117 ic->ic_update_promisc = ath_update_promisc; 1118 ic->ic_node_alloc = ath_node_alloc; 1119 sc->sc_node_free = ic->ic_node_free; 1120 ic->ic_node_free = ath_node_free; 1121 sc->sc_node_cleanup = ic->ic_node_cleanup; 1122 ic->ic_node_cleanup = ath_node_cleanup; 1123 ic->ic_node_getsignal = ath_node_getsignal; 1124 ic->ic_scan_start = ath_scan_start; 1125 ic->ic_scan_end = ath_scan_end; 1126 ic->ic_set_channel = ath_set_channel; 1127 #ifdef ATH_ENABLE_11N 1128 /* 802.11n specific - but just override anyway */ 1129 sc->sc_addba_request = ic->ic_addba_request; 1130 sc->sc_addba_response = ic->ic_addba_response; 1131 sc->sc_addba_stop = ic->ic_addba_stop; 1132 sc->sc_bar_response = ic->ic_bar_response; 1133 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 1134 1135 ic->ic_addba_request = ath_addba_request; 1136 ic->ic_addba_response = ath_addba_response; 1137 ic->ic_addba_response_timeout = ath_addba_response_timeout; 1138 ic->ic_addba_stop = ath_addba_stop; 1139 ic->ic_bar_response = ath_bar_response; 1140 1141 ic->ic_update_chw = ath_update_chw; 1142 #endif /* ATH_ENABLE_11N */ 1143 1144 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 1145 /* 1146 * There's one vendor bitmap entry in the RX radiotap 1147 * header; make sure that's taken into account. 1148 */ 1149 ieee80211_radiotap_attachv(ic, 1150 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 1151 ATH_TX_RADIOTAP_PRESENT, 1152 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 1153 ATH_RX_RADIOTAP_PRESENT); 1154 #else 1155 /* 1156 * No vendor bitmap/extensions are present. 1157 */ 1158 ieee80211_radiotap_attach(ic, 1159 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 1160 ATH_TX_RADIOTAP_PRESENT, 1161 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1162 ATH_RX_RADIOTAP_PRESENT); 1163 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 1164 1165 /* 1166 * Setup the ALQ logging if required 1167 */ 1168 #ifdef ATH_DEBUG_ALQ 1169 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev)); 1170 if_ath_alq_setcfg(&sc->sc_alq, 1171 sc->sc_ah->ah_macVersion, 1172 sc->sc_ah->ah_macRev, 1173 sc->sc_ah->ah_phyRev, 1174 sc->sc_ah->ah_magic); 1175 #endif 1176 1177 /* 1178 * Setup dynamic sysctl's now that country code and 1179 * regdomain are available from the hal. 1180 */ 1181 ath_sysctlattach(sc); 1182 ath_sysctl_stats_attach(sc); 1183 ath_sysctl_hal_attach(sc); 1184 1185 if (bootverbose) 1186 ieee80211_announce(ic); 1187 ath_announce(sc); 1188 1189 /* 1190 * Put it to sleep for now. 1191 */ 1192 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 1193 1194 return 0; 1195 bad2: 1196 ath_tx_cleanup(sc); 1197 ath_desc_free(sc); 1198 ath_txdma_teardown(sc); 1199 ath_rxdma_teardown(sc); 1200 bad: 1201 if (ah) 1202 ath_hal_detach(ah); 1203 1204 /* 1205 * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE.. 1206 */ 1207 #if !defined(__DragonFly__) 1208 if (ifp != NULL && ifp->if_vnet) { 1209 CURVNET_SET(ifp->if_vnet); 1210 if_free(ifp); 1211 CURVNET_RESTORE(); 1212 } else 1213 #endif 1214 if (ifp != NULL) 1215 if_free(ifp); 1216 sc->sc_invalid = 1; 1217 return error; 1218 } 1219 1220 int 1221 ath_detach(struct ath_softc *sc) 1222 { 1223 struct ifnet *ifp = sc->sc_ifp; 1224 1225 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1226 __func__, ifp->if_flags); 1227 1228 /* 1229 * NB: the order of these is important: 1230 * o stop the chip so no more interrupts will fire 1231 * o call the 802.11 layer before detaching the hal to 1232 * insure callbacks into the driver to delete global 1233 * key cache entries can be handled 1234 * o free the taskqueue which drains any pending tasks 1235 * o reclaim the tx queue data structures after calling 1236 * the 802.11 layer as we'll get called back to reclaim 1237 * node state and potentially want to use them 1238 * o to cleanup the tx queues the hal is called, so detach 1239 * it last 1240 * Other than that, it's straightforward... 1241 */ 1242 1243 /* 1244 * XXX Wake the hardware up first. ath_stop() will still 1245 * wake it up first, but I'd rather do it here just to 1246 * ensure it's awake. 1247 */ 1248 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1249 ath_power_setpower(sc, HAL_PM_AWAKE); 1250 1251 /* 1252 * Stop things cleanly. 1253 */ 1254 ath_stop(ifp); 1255 wlan_serialize_enter(); 1256 ieee80211_ifdetach(ifp->if_l2com); 1257 wlan_serialize_exit(); 1258 taskqueue_free(sc->sc_tq); 1259 #ifdef ATH_TX99_DIAG 1260 if (sc->sc_tx99 != NULL) 1261 sc->sc_tx99->detach(sc->sc_tx99); 1262 #endif 1263 ath_rate_detach(sc->sc_rc); 1264 #ifdef ATH_DEBUG_ALQ 1265 if_ath_alq_tidyup(&sc->sc_alq); 1266 #endif 1267 ath_lna_div_detach(sc); 1268 ath_btcoex_detach(sc); 1269 ath_spectral_detach(sc); 1270 ath_dfs_detach(sc); 1271 ath_desc_free(sc); 1272 ath_txdma_teardown(sc); 1273 ath_rxdma_teardown(sc); 1274 ath_tx_cleanup(sc); 1275 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 1276 1277 CURVNET_SET(ifp->if_vnet); 1278 if_free(ifp); 1279 CURVNET_RESTORE(); 1280 1281 if (sc->sc_sysctl_tree) { 1282 sysctl_ctx_free(&sc->sc_sysctl_ctx); 1283 sc->sc_sysctl_tree = NULL; 1284 } 1285 1286 return 0; 1287 } 1288 1289 /* 1290 * MAC address handling for multiple BSS on the same radio. 1291 * The first vap uses the MAC address from the EEPROM. For 1292 * subsequent vap's we set the U/L bit (bit 1) in the MAC 1293 * address and use the next six bits as an index. 1294 */ 1295 static void 1296 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 1297 { 1298 int i; 1299 1300 if (clone && sc->sc_hasbmask) { 1301 /* NB: we only do this if h/w supports multiple bssid */ 1302 for (i = 0; i < 8; i++) 1303 if ((sc->sc_bssidmask & (1<<i)) == 0) 1304 break; 1305 if (i != 0) 1306 mac[0] |= (i << 2)|0x2; 1307 } else 1308 i = 0; 1309 sc->sc_bssidmask |= 1<<i; 1310 sc->sc_hwbssidmask[0] &= ~mac[0]; 1311 if (i == 0) 1312 sc->sc_nbssid0++; 1313 } 1314 1315 static void 1316 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 1317 { 1318 int i = mac[0] >> 2; 1319 uint8_t mask; 1320 1321 if (i != 0 || --sc->sc_nbssid0 == 0) { 1322 sc->sc_bssidmask &= ~(1<<i); 1323 /* recalculate bssid mask from remaining addresses */ 1324 mask = 0xff; 1325 for (i = 1; i < 8; i++) 1326 if (sc->sc_bssidmask & (1<<i)) 1327 mask &= ~((i<<2)|0x2); 1328 sc->sc_hwbssidmask[0] |= mask; 1329 } 1330 } 1331 1332 /* 1333 * Assign a beacon xmit slot. We try to space out 1334 * assignments so when beacons are staggered the 1335 * traffic coming out of the cab q has maximal time 1336 * to go out before the next beacon is scheduled. 1337 */ 1338 static int 1339 assign_bslot(struct ath_softc *sc) 1340 { 1341 u_int slot, free; 1342 1343 free = 0; 1344 for (slot = 0; slot < ATH_BCBUF; slot++) 1345 if (sc->sc_bslot[slot] == NULL) { 1346 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 1347 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 1348 return slot; 1349 free = slot; 1350 /* NB: keep looking for a double slot */ 1351 } 1352 return free; 1353 } 1354 1355 static struct ieee80211vap * 1356 ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1357 enum ieee80211_opmode opmode, int flags, 1358 const uint8_t bssid[IEEE80211_ADDR_LEN], 1359 const uint8_t mac0[IEEE80211_ADDR_LEN]) 1360 { 1361 struct ath_softc *sc = ic->ic_ifp->if_softc; 1362 struct ath_vap *avp; 1363 struct ieee80211vap *vap; 1364 uint8_t mac[IEEE80211_ADDR_LEN]; 1365 int needbeacon, error; 1366 enum ieee80211_opmode ic_opmode; 1367 1368 avp = (struct ath_vap *) kmalloc(sizeof(struct ath_vap), 1369 M_80211_VAP, M_WAITOK | M_ZERO); 1370 needbeacon = 0; 1371 IEEE80211_ADDR_COPY(mac, mac0); 1372 1373 ATH_LOCK(sc); 1374 ic_opmode = opmode; /* default to opmode of new vap */ 1375 switch (opmode) { 1376 case IEEE80211_M_STA: 1377 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1378 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1379 goto bad; 1380 } 1381 if (sc->sc_nvaps) { 1382 /* 1383 * With multiple vaps we must fall back 1384 * to s/w beacon miss handling. 1385 */ 1386 flags |= IEEE80211_CLONE_NOBEACONS; 1387 } 1388 if (flags & IEEE80211_CLONE_NOBEACONS) { 1389 /* 1390 * Station mode w/o beacons are implemented w/ AP mode. 1391 */ 1392 ic_opmode = IEEE80211_M_HOSTAP; 1393 } 1394 break; 1395 case IEEE80211_M_IBSS: 1396 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1397 device_printf(sc->sc_dev, 1398 "only 1 ibss vap supported\n"); 1399 goto bad; 1400 } 1401 needbeacon = 1; 1402 break; 1403 case IEEE80211_M_AHDEMO: 1404 #ifdef IEEE80211_SUPPORT_TDMA 1405 if (flags & IEEE80211_CLONE_TDMA) { 1406 if (sc->sc_nvaps != 0) { 1407 device_printf(sc->sc_dev, 1408 "only 1 tdma vap supported\n"); 1409 goto bad; 1410 } 1411 needbeacon = 1; 1412 flags |= IEEE80211_CLONE_NOBEACONS; 1413 } 1414 /* fall thru... */ 1415 #endif 1416 case IEEE80211_M_MONITOR: 1417 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1418 /* 1419 * Adopt existing mode. Adding a monitor or ahdemo 1420 * vap to an existing configuration is of dubious 1421 * value but should be ok. 1422 */ 1423 /* XXX not right for monitor mode */ 1424 ic_opmode = ic->ic_opmode; 1425 } 1426 break; 1427 case IEEE80211_M_HOSTAP: 1428 case IEEE80211_M_MBSS: 1429 needbeacon = 1; 1430 break; 1431 case IEEE80211_M_WDS: 1432 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1433 device_printf(sc->sc_dev, 1434 "wds not supported in sta mode\n"); 1435 goto bad; 1436 } 1437 /* 1438 * Silently remove any request for a unique 1439 * bssid; WDS vap's always share the local 1440 * mac address. 1441 */ 1442 flags &= ~IEEE80211_CLONE_BSSID; 1443 if (sc->sc_nvaps == 0) 1444 ic_opmode = IEEE80211_M_HOSTAP; 1445 else 1446 ic_opmode = ic->ic_opmode; 1447 break; 1448 default: 1449 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1450 goto bad; 1451 } 1452 /* 1453 * Check that a beacon buffer is available; the code below assumes it. 1454 */ 1455 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1456 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1457 goto bad; 1458 } 1459 1460 /* STA, AHDEMO? */ 1461 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1462 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1463 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1464 } 1465 1466 vap = &avp->av_vap; 1467 /* XXX can't hold mutex across if_alloc */ 1468 ATH_UNLOCK(sc); 1469 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1470 bssid, mac); 1471 ATH_LOCK(sc); 1472 if (error != 0) { 1473 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1474 __func__, error); 1475 goto bad2; 1476 } 1477 1478 /* h/w crypto support */ 1479 vap->iv_key_alloc = ath_key_alloc; 1480 vap->iv_key_delete = ath_key_delete; 1481 vap->iv_key_set = ath_key_set; 1482 vap->iv_key_update_begin = ath_key_update_begin; 1483 vap->iv_key_update_end = ath_key_update_end; 1484 1485 /* override various methods */ 1486 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1487 vap->iv_recv_mgmt = ath_recv_mgmt; 1488 vap->iv_reset = ath_reset_vap; 1489 vap->iv_update_beacon = ath_beacon_update; 1490 avp->av_newstate = vap->iv_newstate; 1491 vap->iv_newstate = ath_newstate; 1492 avp->av_bmiss = vap->iv_bmiss; 1493 vap->iv_bmiss = ath_bmiss_vap; 1494 1495 #if 0 1496 avp->av_node_ps = vap->iv_node_ps; 1497 vap->iv_node_ps = ath_node_powersave; 1498 #endif 1499 1500 avp->av_set_tim = vap->iv_set_tim; 1501 vap->iv_set_tim = ath_node_set_tim; 1502 1503 #if 0 1504 avp->av_recv_pspoll = vap->iv_recv_pspoll; 1505 vap->iv_recv_pspoll = ath_node_recv_pspoll; 1506 #endif 1507 1508 /* Set default parameters */ 1509 1510 /* 1511 * Anything earlier than some AR9300 series MACs don't 1512 * support a smaller MPDU density. 1513 */ 1514 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1515 /* 1516 * All NICs can handle the maximum size, however 1517 * AR5416 based MACs can only TX aggregates w/ RTS 1518 * protection when the total aggregate size is <= 8k. 1519 * However, for now that's enforced by the TX path. 1520 */ 1521 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1522 1523 avp->av_bslot = -1; 1524 if (needbeacon) { 1525 /* 1526 * Allocate beacon state and setup the q for buffered 1527 * multicast frames. We know a beacon buffer is 1528 * available because we checked above. 1529 */ 1530 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1531 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1532 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1533 /* 1534 * Assign the vap to a beacon xmit slot. As above 1535 * this cannot fail to find a free one. 1536 */ 1537 avp->av_bslot = assign_bslot(sc); 1538 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1539 ("beacon slot %u not empty", avp->av_bslot)); 1540 sc->sc_bslot[avp->av_bslot] = vap; 1541 sc->sc_nbcnvaps++; 1542 } 1543 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1544 /* 1545 * Multple vaps are to transmit beacons and we 1546 * have h/w support for TSF adjusting; enable 1547 * use of staggered beacons. 1548 */ 1549 sc->sc_stagbeacons = 1; 1550 } 1551 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1552 } 1553 1554 ic->ic_opmode = ic_opmode; 1555 if (opmode != IEEE80211_M_WDS) { 1556 sc->sc_nvaps++; 1557 if (opmode == IEEE80211_M_STA) 1558 sc->sc_nstavaps++; 1559 if (opmode == IEEE80211_M_MBSS) 1560 sc->sc_nmeshvaps++; 1561 } 1562 switch (ic_opmode) { 1563 case IEEE80211_M_IBSS: 1564 sc->sc_opmode = HAL_M_IBSS; 1565 break; 1566 case IEEE80211_M_STA: 1567 sc->sc_opmode = HAL_M_STA; 1568 break; 1569 case IEEE80211_M_AHDEMO: 1570 #ifdef IEEE80211_SUPPORT_TDMA 1571 if (vap->iv_caps & IEEE80211_C_TDMA) { 1572 sc->sc_tdma = 1; 1573 /* NB: disable tsf adjust */ 1574 sc->sc_stagbeacons = 0; 1575 } 1576 /* 1577 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1578 * just ap mode. 1579 */ 1580 /* fall thru... */ 1581 #endif 1582 case IEEE80211_M_HOSTAP: 1583 case IEEE80211_M_MBSS: 1584 sc->sc_opmode = HAL_M_HOSTAP; 1585 break; 1586 case IEEE80211_M_MONITOR: 1587 sc->sc_opmode = HAL_M_MONITOR; 1588 break; 1589 default: 1590 /* XXX should not happen */ 1591 break; 1592 } 1593 if (sc->sc_hastsfadd) { 1594 /* 1595 * Configure whether or not TSF adjust should be done. 1596 */ 1597 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1598 } 1599 if (flags & IEEE80211_CLONE_NOBEACONS) { 1600 /* 1601 * Enable s/w beacon miss handling. 1602 */ 1603 sc->sc_swbmiss = 1; 1604 } 1605 ATH_UNLOCK(sc); 1606 1607 /* complete setup */ 1608 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1609 return vap; 1610 bad2: 1611 reclaim_address(sc, mac); 1612 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1613 bad: 1614 kfree(avp, M_80211_VAP); 1615 ATH_UNLOCK(sc); 1616 return NULL; 1617 } 1618 1619 static void 1620 ath_vap_delete(struct ieee80211vap *vap) 1621 { 1622 struct ieee80211com *ic = vap->iv_ic; 1623 struct ifnet *ifp = ic->ic_ifp; 1624 struct ath_softc *sc = ifp->if_softc; 1625 struct ath_hal *ah = sc->sc_ah; 1626 struct ath_vap *avp = ATH_VAP(vap); 1627 1628 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1629 1630 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1631 if (ifp->if_flags & IFF_RUNNING) { 1632 /* 1633 * Quiesce the hardware while we remove the vap. In 1634 * particular we need to reclaim all references to 1635 * the vap state by any frames pending on the tx queues. 1636 */ 1637 ath_hal_intrset(ah, 0); /* disable interrupts */ 1638 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1639 /* XXX Do all frames from all vaps/nodes need draining here? */ 1640 ath_stoprecv(sc, 1); /* stop recv side */ 1641 } 1642 1643 /* .. leave the hardware awake for now. */ 1644 1645 ieee80211_vap_detach(vap); 1646 1647 /* 1648 * XXX Danger Will Robinson! Danger! 1649 * 1650 * Because ieee80211_vap_detach() can queue a frame (the station 1651 * diassociate message?) after we've drained the TXQ and 1652 * flushed the software TXQ, we will end up with a frame queued 1653 * to a node whose vap is about to be freed. 1654 * 1655 * To work around this, flush the hardware/software again. 1656 * This may be racy - the ath task may be running and the packet 1657 * may be being scheduled between sw->hw txq. Tsk. 1658 * 1659 * TODO: figure out why a new node gets allocated somewhere around 1660 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1661 * call!) 1662 */ 1663 1664 ath_draintxq(sc, ATH_RESET_DEFAULT); 1665 1666 ATH_LOCK(sc); 1667 /* 1668 * Reclaim beacon state. Note this must be done before 1669 * the vap instance is reclaimed as we may have a reference 1670 * to it in the buffer for the beacon frame. 1671 */ 1672 if (avp->av_bcbuf != NULL) { 1673 if (avp->av_bslot != -1) { 1674 sc->sc_bslot[avp->av_bslot] = NULL; 1675 sc->sc_nbcnvaps--; 1676 } 1677 ath_beacon_return(sc, avp->av_bcbuf); 1678 avp->av_bcbuf = NULL; 1679 if (sc->sc_nbcnvaps == 0) { 1680 sc->sc_stagbeacons = 0; 1681 if (sc->sc_hastsfadd) 1682 ath_hal_settsfadjust(sc->sc_ah, 0); 1683 } 1684 /* 1685 * Reclaim any pending mcast frames for the vap. 1686 */ 1687 ath_tx_draintxq(sc, &avp->av_mcastq); 1688 } 1689 /* 1690 * Update bookkeeping. 1691 */ 1692 if (vap->iv_opmode == IEEE80211_M_STA) { 1693 sc->sc_nstavaps--; 1694 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1695 sc->sc_swbmiss = 0; 1696 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1697 vap->iv_opmode == IEEE80211_M_MBSS) { 1698 reclaim_address(sc, vap->iv_myaddr); 1699 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1700 if (vap->iv_opmode == IEEE80211_M_MBSS) 1701 sc->sc_nmeshvaps--; 1702 } 1703 if (vap->iv_opmode != IEEE80211_M_WDS) 1704 sc->sc_nvaps--; 1705 #ifdef IEEE80211_SUPPORT_TDMA 1706 /* TDMA operation ceases when the last vap is destroyed */ 1707 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1708 sc->sc_tdma = 0; 1709 sc->sc_swbmiss = 0; 1710 } 1711 #endif 1712 kfree(avp, M_80211_VAP); 1713 1714 if (ifp->if_flags & IFF_RUNNING) { 1715 /* 1716 * Restart rx+tx machines if still running (RUNNING will 1717 * be reset if we just destroyed the last vap). 1718 */ 1719 if (ath_startrecv(sc) != 0) 1720 if_printf(ifp, "%s: unable to restart recv logic\n", 1721 __func__); 1722 if (sc->sc_beacons) { /* restart beacons */ 1723 #ifdef IEEE80211_SUPPORT_TDMA 1724 if (sc->sc_tdma) 1725 ath_tdma_config(sc, NULL); 1726 else 1727 #endif 1728 ath_beacon_config(sc, NULL); 1729 } 1730 ath_hal_intrset(ah, sc->sc_imask); 1731 } 1732 1733 /* Ok, let the hardware asleep. */ 1734 ath_power_restore_power_state(sc); 1735 ATH_UNLOCK(sc); 1736 } 1737 1738 void 1739 ath_suspend(struct ath_softc *sc) 1740 { 1741 struct ifnet *ifp = sc->sc_ifp; 1742 struct ieee80211com *ic = ifp->if_l2com; 1743 1744 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1745 __func__, ifp->if_flags); 1746 1747 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1748 1749 ieee80211_suspend_all(ic); 1750 /* 1751 * NB: don't worry about putting the chip in low power 1752 * mode; pci will power off our socket on suspend and 1753 * CardBus detaches the device. 1754 */ 1755 1756 /* 1757 * XXX ensure none of the taskqueues are running 1758 * XXX ensure sc_invalid is 1 1759 * XXX ensure the calibration callout is disabled 1760 */ 1761 1762 /* Disable the PCIe PHY, complete with workarounds */ 1763 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1764 } 1765 1766 /* 1767 * Reset the key cache since some parts do not reset the 1768 * contents on resume. First we clear all entries, then 1769 * re-load keys that the 802.11 layer assumes are setup 1770 * in h/w. 1771 */ 1772 static void 1773 ath_reset_keycache(struct ath_softc *sc) 1774 { 1775 struct ifnet *ifp = sc->sc_ifp; 1776 struct ieee80211com *ic = ifp->if_l2com; 1777 struct ath_hal *ah = sc->sc_ah; 1778 int i; 1779 1780 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1781 for (i = 0; i < sc->sc_keymax; i++) 1782 ath_hal_keyreset(ah, i); 1783 ath_power_restore_power_state(sc); 1784 ieee80211_crypto_reload_keys(ic); 1785 } 1786 1787 /* 1788 * Fetch the current chainmask configuration based on the current 1789 * operating channel and options. 1790 */ 1791 static void 1792 ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan) 1793 { 1794 1795 /* 1796 * Set TX chainmask to the currently configured chainmask; 1797 * the TX chainmask depends upon the current operating mode. 1798 */ 1799 sc->sc_cur_rxchainmask = sc->sc_rxchainmask; 1800 if (IEEE80211_IS_CHAN_HT(chan)) { 1801 sc->sc_cur_txchainmask = sc->sc_txchainmask; 1802 } else { 1803 sc->sc_cur_txchainmask = 1; 1804 } 1805 1806 DPRINTF(sc, ATH_DEBUG_RESET, 1807 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n", 1808 __func__, 1809 sc->sc_cur_txchainmask, 1810 sc->sc_cur_rxchainmask); 1811 } 1812 1813 void 1814 ath_resume(struct ath_softc *sc) 1815 { 1816 struct ifnet *ifp = sc->sc_ifp; 1817 struct ieee80211com *ic = ifp->if_l2com; 1818 struct ath_hal *ah = sc->sc_ah; 1819 HAL_STATUS status; 1820 1821 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1822 __func__, ifp->if_flags); 1823 1824 /* Re-enable PCIe, re-enable the PCIe bus */ 1825 ath_hal_enablepcie(ah, 0, 0); 1826 1827 /* 1828 * Must reset the chip before we reload the 1829 * keycache as we were powered down on suspend. 1830 */ 1831 ath_update_chainmasks(sc, 1832 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan); 1833 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 1834 sc->sc_cur_rxchainmask); 1835 1836 /* Ensure we set the current power state to on */ 1837 ath_power_setselfgen(sc, HAL_PM_AWAKE); 1838 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1839 ath_power_setpower(sc, HAL_PM_AWAKE); 1840 1841 sc->sc_rxfifo_state = ATH_RXFIFO_RESET; 1842 ath_hal_reset(ah, sc->sc_opmode, 1843 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1844 AH_FALSE, &status); 1845 ath_reset_keycache(sc); 1846 1847 /* Let DFS at it in case it's a DFS channel */ 1848 ath_dfs_radar_enable(sc, ic->ic_curchan); 1849 1850 /* Let spectral at in case spectral is enabled */ 1851 ath_spectral_enable(sc, ic->ic_curchan); 1852 1853 /* 1854 * Let bluetooth coexistence at in case it's needed for this channel 1855 */ 1856 ath_btcoex_enable(sc, ic->ic_curchan); 1857 1858 /* 1859 * If we're doing TDMA, enforce the TXOP limitation for chips that 1860 * support it. 1861 */ 1862 if (sc->sc_hasenforcetxop && sc->sc_tdma) 1863 ath_hal_setenforcetxop(sc->sc_ah, 1); 1864 else 1865 ath_hal_setenforcetxop(sc->sc_ah, 0); 1866 1867 /* Restore the LED configuration */ 1868 ath_led_config(sc); 1869 ath_hal_setledstate(ah, HAL_LED_INIT); 1870 1871 if (sc->sc_resume_up) 1872 ieee80211_resume_all(ic); 1873 1874 ath_power_restore_power_state(sc); 1875 1876 /* XXX beacons ? */ 1877 } 1878 1879 void 1880 ath_shutdown(struct ath_softc *sc) 1881 { 1882 struct ifnet *ifp = sc->sc_ifp; 1883 1884 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1885 __func__, ifp->if_flags); 1886 1887 ath_stop(ifp); 1888 /* NB: no point powering down chip as we're about to reboot */ 1889 } 1890 1891 /* 1892 * Interrupt handler. Most of the actual processing is deferred. 1893 */ 1894 void 1895 ath_intr(void *arg) 1896 { 1897 struct ath_softc *sc = arg; 1898 struct ifnet *ifp = sc->sc_ifp; 1899 struct ath_hal *ah = sc->sc_ah; 1900 HAL_INT status = 0; 1901 uint32_t txqs; 1902 1903 /* 1904 * If we're inside a reset path, just print a warning and 1905 * clear the ISR. The reset routine will finish it for us. 1906 */ 1907 ATH_PCU_LOCK(sc); 1908 if (sc->sc_inreset_cnt) { 1909 HAL_INT status; 1910 ath_hal_getisr(ah, &status); /* clear ISR */ 1911 ath_hal_intrset(ah, 0); /* disable further intr's */ 1912 DPRINTF(sc, ATH_DEBUG_ANY, 1913 "%s: in reset, ignoring: status=0x%x\n", 1914 __func__, status); 1915 ATH_PCU_UNLOCK(sc); 1916 return; 1917 } 1918 1919 if (sc->sc_invalid) { 1920 /* 1921 * The hardware is not ready/present, don't touch anything. 1922 * Note this can happen early on if the IRQ is shared. 1923 */ 1924 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1925 ATH_PCU_UNLOCK(sc); 1926 return; 1927 } 1928 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1929 ATH_PCU_UNLOCK(sc); 1930 return; 1931 } 1932 1933 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1934 1935 if ((ifp->if_flags & IFF_UP) == 0 || 1936 (ifp->if_flags & IFF_RUNNING) == 0) { 1937 HAL_INT status; 1938 1939 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1940 __func__, ifp->if_flags); 1941 ath_hal_getisr(ah, &status); /* clear ISR */ 1942 ath_hal_intrset(ah, 0); /* disable further intr's */ 1943 ATH_PCU_UNLOCK(sc); 1944 ath_power_restore_power_state(sc); 1945 return; 1946 } 1947 1948 /* 1949 * Figure out the reason(s) for the interrupt. Note 1950 * that the hal returns a pseudo-ISR that may include 1951 * bits we haven't explicitly enabled so we mask the 1952 * value to insure we only process bits we requested. 1953 */ 1954 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1955 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1956 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); 1957 #ifdef ATH_DEBUG_ALQ 1958 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate, 1959 ah->ah_syncstate); 1960 #endif /* ATH_DEBUG_ALQ */ 1961 #ifdef ATH_KTR_INTR_DEBUG 1962 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, 1963 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1964 ah->ah_intrstate[0], 1965 ah->ah_intrstate[1], 1966 ah->ah_intrstate[2], 1967 ah->ah_intrstate[3], 1968 ah->ah_intrstate[6]); 1969 #endif 1970 1971 /* Squirrel away SYNC interrupt debugging */ 1972 if (ah->ah_syncstate != 0) { 1973 int i; 1974 for (i = 0; i < 32; i++) 1975 if (ah->ah_syncstate & (i << i)) 1976 sc->sc_intr_stats.sync_intr[i]++; 1977 } 1978 1979 status &= sc->sc_imask; /* discard unasked for bits */ 1980 1981 /* Short-circuit un-handled interrupts */ 1982 if (status == 0x0) { 1983 ATH_PCU_UNLOCK(sc); 1984 ath_power_restore_power_state(sc); 1985 return; 1986 } 1987 1988 /* 1989 * Take a note that we're inside the interrupt handler, so 1990 * the reset routines know to wait. 1991 */ 1992 sc->sc_intr_cnt++; 1993 ATH_PCU_UNLOCK(sc); 1994 1995 /* 1996 * Handle the interrupt. We won't run concurrent with the reset 1997 * or channel change routines as they'll wait for sc_intr_cnt 1998 * to be 0 before continuing. 1999 */ 2000 if (status & HAL_INT_FATAL) { 2001 sc->sc_stats.ast_hardware++; 2002 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 2003 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 2004 } else { 2005 if (status & HAL_INT_SWBA) { 2006 /* 2007 * Software beacon alert--time to send a beacon. 2008 * Handle beacon transmission directly; deferring 2009 * this is too slow to meet timing constraints 2010 * under load. 2011 */ 2012 #ifdef IEEE80211_SUPPORT_TDMA 2013 if (sc->sc_tdma) { 2014 if (sc->sc_tdmaswba == 0) { 2015 struct ieee80211com *ic = ifp->if_l2com; 2016 struct ieee80211vap *vap = 2017 TAILQ_FIRST(&ic->ic_vaps); 2018 ath_tdma_beacon_send(sc, vap); 2019 sc->sc_tdmaswba = 2020 vap->iv_tdma->tdma_bintval; 2021 } else 2022 sc->sc_tdmaswba--; 2023 } else 2024 #endif 2025 { 2026 ath_beacon_proc(sc, 0); 2027 #ifdef IEEE80211_SUPPORT_SUPERG 2028 /* 2029 * Schedule the rx taskq in case there's no 2030 * traffic so any frames held on the staging 2031 * queue are aged and potentially flushed. 2032 */ 2033 sc->sc_rx.recv_sched(sc, 1); 2034 #endif 2035 } 2036 } 2037 if ((status & HAL_INT_RXEOL) && sc->sc_kickpcu == 0) { 2038 int imask; 2039 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); 2040 ATH_PCU_LOCK(sc); 2041 /* 2042 * NB: the hardware should re-read the link when 2043 * RXE bit is written, but it doesn't work at 2044 * least on older hardware revs. 2045 */ 2046 sc->sc_stats.ast_rxeol++; 2047 /* 2048 * Disable RXEOL/RXORN - prevent an interrupt 2049 * storm until the PCU logic can be reset. 2050 * In case the interface is reset some other 2051 * way before "sc_kickpcu" is called, don't 2052 * modify sc_imask - that way if it is reset 2053 * by a call to ath_reset() somehow, the 2054 * interrupt mask will be correctly reprogrammed. 2055 */ 2056 imask = sc->sc_imask; 2057 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 2058 ath_hal_intrset(ah, imask); 2059 /* 2060 * Only blank sc_rxlink if we've not yet kicked 2061 * the PCU. 2062 * 2063 * This isn't entirely correct - the correct solution 2064 * would be to have a PCU lock and engage that for 2065 * the duration of the PCU fiddling; which would include 2066 * running the RX process. Otherwise we could end up 2067 * messing up the RX descriptor chain and making the 2068 * RX desc list much shorter. 2069 */ 2070 sc->sc_rxlink = NULL; 2071 sc->sc_kickpcu = 1; 2072 ATH_PCU_UNLOCK(sc); 2073 /* 2074 * Enqueue an RX proc, to handled whatever 2075 * is in the RX queue. 2076 * This will then kick the PCU. 2077 */ 2078 sc->sc_rx.recv_sched(sc, 1); 2079 } 2080 if (status & HAL_INT_TXURN) { 2081 sc->sc_stats.ast_txurn++; 2082 /* bump tx trigger level */ 2083 ath_hal_updatetxtriglevel(ah, AH_TRUE); 2084 } 2085 /* 2086 * Handle both the legacy and RX EDMA interrupt bits. 2087 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 2088 */ 2089 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 2090 sc->sc_stats.ast_rx_intr++; 2091 sc->sc_rx.recv_sched(sc, 1); 2092 } 2093 if (status & HAL_INT_TX) { 2094 sc->sc_stats.ast_tx_intr++; 2095 /* 2096 * Grab all the currently set bits in the HAL txq bitmap 2097 * and blank them. This is the only place we should be 2098 * doing this. 2099 */ 2100 if (! sc->sc_isedma) { 2101 ATH_PCU_LOCK(sc); 2102 txqs = 0xffffffff; 2103 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 2104 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, 2105 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", 2106 txqs, 2107 sc->sc_txq_active, 2108 sc->sc_txq_active | txqs); 2109 sc->sc_txq_active |= txqs; 2110 ATH_PCU_UNLOCK(sc); 2111 } 2112 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 2113 } 2114 if (status & HAL_INT_BMISS) { 2115 sc->sc_stats.ast_bmiss++; 2116 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 2117 } 2118 if (status & HAL_INT_GTT) 2119 sc->sc_stats.ast_tx_timeout++; 2120 if (status & HAL_INT_CST) 2121 sc->sc_stats.ast_tx_cst++; 2122 if (status & HAL_INT_MIB) { 2123 sc->sc_stats.ast_mib++; 2124 ATH_PCU_LOCK(sc); 2125 /* 2126 * Disable interrupts until we service the MIB 2127 * interrupt; otherwise it will continue to fire. 2128 */ 2129 ath_hal_intrset(ah, 0); 2130 /* 2131 * Let the hal handle the event. We assume it will 2132 * clear whatever condition caused the interrupt. 2133 */ 2134 ath_hal_mibevent(ah, &sc->sc_halstats); 2135 /* 2136 * Don't reset the interrupt if we've just 2137 * kicked the PCU, or we may get a nested 2138 * RXEOL before the rxproc has had a chance 2139 * to run. 2140 */ 2141 if (sc->sc_kickpcu == 0) 2142 ath_hal_intrset(ah, sc->sc_imask); 2143 ATH_PCU_UNLOCK(sc); 2144 } 2145 if (status & HAL_INT_RXORN) { 2146 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 2147 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); 2148 sc->sc_stats.ast_rxorn++; 2149 } 2150 if (status & HAL_INT_TSFOOR) { 2151 device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__); 2152 sc->sc_syncbeacon = 1; 2153 } 2154 } 2155 ATH_PCU_LOCK(sc); 2156 sc->sc_intr_cnt--; 2157 ATH_PCU_UNLOCK(sc); 2158 2159 ath_power_restore_power_state(sc); 2160 } 2161 2162 static void 2163 ath_fatal_proc(void *arg, int pending) 2164 { 2165 struct ath_softc *sc = arg; 2166 struct ifnet *ifp = sc->sc_ifp; 2167 u_int32_t *state; 2168 u_int32_t len; 2169 void *sp; 2170 2171 if_printf(ifp, "hardware error; resetting\n"); 2172 /* 2173 * Fatal errors are unrecoverable. Typically these 2174 * are caused by DMA errors. Collect h/w state from 2175 * the hal so we can diagnose what's going on. 2176 */ 2177 wlan_serialize_enter(); 2178 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 2179 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 2180 state = sp; 2181 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 2182 state[0], state[1] , state[2], state[3], 2183 state[4], state[5]); 2184 } 2185 ath_reset(ifp, ATH_RESET_NOLOSS); 2186 wlan_serialize_exit(); 2187 } 2188 2189 static void 2190 ath_bmiss_vap(struct ieee80211vap *vap) 2191 { 2192 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 2193 2194 /* 2195 * Workaround phantom bmiss interrupts by sanity-checking 2196 * the time of our last rx'd frame. If it is within the 2197 * beacon miss interval then ignore the interrupt. If it's 2198 * truly a bmiss we'll get another interrupt soon and that'll 2199 * be dispatched up for processing. Note this applies only 2200 * for h/w beacon miss events. 2201 */ 2202 2203 /* 2204 * XXX TODO: Just read the TSF during the interrupt path; 2205 * that way we don't have to wake up again just to read it 2206 * again. 2207 */ 2208 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2209 2210 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 2211 u_int64_t lastrx = sc->sc_lastrx; 2212 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 2213 /* XXX should take a locked ref to iv_bss */ 2214 u_int bmisstimeout = 2215 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 2216 2217 DPRINTF(sc, ATH_DEBUG_BEACON, 2218 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 2219 __func__, (unsigned long long) tsf, 2220 (unsigned long long)(tsf - lastrx), 2221 (unsigned long long) lastrx, bmisstimeout); 2222 2223 if (tsf - lastrx <= bmisstimeout) { 2224 sc->sc_stats.ast_bmiss_phantom++; 2225 ath_power_restore_power_state(sc); 2226 return; 2227 } 2228 } 2229 2230 /* 2231 * There's no need to keep the hardware awake during the call 2232 * to av_bmiss(). 2233 */ 2234 ath_power_restore_power_state(sc); 2235 2236 /* 2237 * Attempt to force a beacon resync. 2238 */ 2239 sc->sc_syncbeacon = 1; 2240 2241 ATH_VAP(vap)->av_bmiss(vap); 2242 } 2243 2244 /* XXX this needs a force wakeup! */ 2245 int 2246 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 2247 { 2248 uint32_t rsize; 2249 void *sp; 2250 2251 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 2252 return 0; 2253 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 2254 *hangs = *(uint32_t *)sp; 2255 return 1; 2256 } 2257 2258 static void 2259 ath_bmiss_proc(void *arg, int pending) 2260 { 2261 struct ath_softc *sc = arg; 2262 struct ifnet *ifp = sc->sc_ifp; 2263 uint32_t hangs; 2264 2265 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 2266 2267 /* 2268 * Do a reset upon any becaon miss event. 2269 * 2270 * It may be a non-recognised RX clear hang which needs a reset 2271 * to clear. 2272 */ 2273 wlan_serialize_enter(); 2274 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2275 ath_beacon_miss(sc); 2276 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 2277 ath_reset(ifp, ATH_RESET_NOLOSS); 2278 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 2279 } else { 2280 ath_reset(ifp, ATH_RESET_NOLOSS); 2281 ieee80211_beacon_miss(ifp->if_l2com); 2282 } 2283 2284 /* Force a beacon resync, in case they've drifted */ 2285 sc->sc_syncbeacon = 1; 2286 ath_power_restore_power_state(sc); 2287 2288 wlan_serialize_exit(); 2289 } 2290 2291 /* 2292 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 2293 * calcs together with WME. If necessary disable the crypto 2294 * hardware and mark the 802.11 state so keys will be setup 2295 * with the MIC work done in software. 2296 */ 2297 static void 2298 ath_settkipmic(struct ath_softc *sc) 2299 { 2300 struct ifnet *ifp = sc->sc_ifp; 2301 struct ieee80211com *ic = ifp->if_l2com; 2302 2303 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 2304 if (ic->ic_flags & IEEE80211_F_WME) { 2305 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 2306 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 2307 } else { 2308 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 2309 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 2310 } 2311 } 2312 } 2313 2314 static void 2315 ath_init(void *arg) 2316 { 2317 struct ath_softc *sc = (struct ath_softc *) arg; 2318 struct ifnet *ifp = sc->sc_ifp; 2319 struct ieee80211com *ic = ifp->if_l2com; 2320 struct ath_hal *ah = sc->sc_ah; 2321 HAL_STATUS status; 2322 2323 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 2324 __func__, ifp->if_flags); 2325 2326 ATH_LOCK(sc); 2327 /* 2328 * Stop anything previously setup. This is safe 2329 * whether this is the first time through or not. 2330 */ 2331 ath_stop_locked(ifp); 2332 2333 /* 2334 * The basic interface to setting the hardware in a good 2335 * state is ``reset''. On return the hardware is known to 2336 * be powered up and with interrupts disabled. This must 2337 * be followed by initialization of the appropriate bits 2338 * and then setup of the interrupt mask. 2339 */ 2340 ath_settkipmic(sc); 2341 ath_update_chainmasks(sc, ic->ic_curchan); 2342 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2343 sc->sc_cur_rxchainmask); 2344 sc->sc_rxfifo_state = ATH_RXFIFO_RESET; 2345 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 2346 if_printf(ifp, "unable to reset hardware; hal status %u\n", 2347 status); 2348 ATH_UNLOCK(sc); 2349 return; 2350 } 2351 ath_chan_change(sc, ic->ic_curchan); 2352 2353 /* Let DFS at it in case it's a DFS channel */ 2354 ath_dfs_radar_enable(sc, ic->ic_curchan); 2355 2356 /* Let spectral at in case spectral is enabled */ 2357 ath_spectral_enable(sc, ic->ic_curchan); 2358 2359 /* 2360 * Let bluetooth coexistence at in case it's needed for this channel 2361 */ 2362 ath_btcoex_enable(sc, ic->ic_curchan); 2363 2364 /* 2365 * If we're doing TDMA, enforce the TXOP limitation for chips that 2366 * support it. 2367 */ 2368 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2369 ath_hal_setenforcetxop(sc->sc_ah, 1); 2370 else 2371 ath_hal_setenforcetxop(sc->sc_ah, 0); 2372 2373 /* 2374 * Likewise this is set during reset so update 2375 * state cached in the driver. 2376 */ 2377 sc->sc_diversity = ath_hal_getdiversity(ah); 2378 sc->sc_lastlongcal = 0; 2379 sc->sc_resetcal = 1; 2380 sc->sc_lastcalreset = 0; 2381 sc->sc_lastani = 0; 2382 sc->sc_lastshortcal = 0; 2383 sc->sc_doresetcal = AH_FALSE; 2384 /* 2385 * Beacon timers were cleared here; give ath_newstate() 2386 * a hint that the beacon timers should be poked when 2387 * things transition to the RUN state. 2388 */ 2389 sc->sc_beacons = 0; 2390 2391 /* 2392 * Setup the hardware after reset: the key cache 2393 * is filled as needed and the receive engine is 2394 * set going. Frame transmit is handled entirely 2395 * in the frame output path; there's nothing to do 2396 * here except setup the interrupt mask. 2397 */ 2398 if (ath_startrecv(sc) != 0) { 2399 if_printf(ifp, "unable to start recv logic\n"); 2400 ath_power_restore_power_state(sc); 2401 ATH_UNLOCK(sc); 2402 return; 2403 } 2404 2405 /* 2406 * Enable interrupts. 2407 */ 2408 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 2409 | HAL_INT_RXEOL | HAL_INT_RXORN 2410 | HAL_INT_TXURN 2411 | HAL_INT_FATAL | HAL_INT_GLOBAL; 2412 2413 /* 2414 * Enable RX EDMA bits. Note these overlap with 2415 * HAL_INT_RX and HAL_INT_RXDESC respectively. 2416 */ 2417 if (sc->sc_isedma) 2418 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 2419 2420 /* 2421 * Enable MIB interrupts when there are hardware phy counters. 2422 * Note we only do this (at the moment) for station mode. 2423 */ 2424 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 2425 sc->sc_imask |= HAL_INT_MIB; 2426 2427 /* 2428 * XXX add capability for this. 2429 * 2430 * If we're in STA mode (and maybe IBSS?) then register for 2431 * TSFOOR interrupts. 2432 */ 2433 if (ic->ic_opmode == IEEE80211_M_STA) 2434 sc->sc_imask |= HAL_INT_TSFOOR; 2435 2436 /* Enable global TX timeout and carrier sense timeout if available */ 2437 if (ath_hal_gtxto_supported(ah)) 2438 sc->sc_imask |= HAL_INT_GTT; 2439 2440 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 2441 __func__, sc->sc_imask); 2442 2443 ifp->if_flags |= IFF_RUNNING; 2444 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 2445 ath_hal_intrset(ah, sc->sc_imask); 2446 2447 ath_power_restore_power_state(sc); 2448 ATH_UNLOCK(sc); 2449 2450 #ifdef ATH_TX99_DIAG 2451 if (sc->sc_tx99 != NULL) 2452 sc->sc_tx99->start(sc->sc_tx99); 2453 else 2454 #endif 2455 ieee80211_start_all(ic); /* start all vap's */ 2456 } 2457 2458 static void 2459 ath_stop_locked(struct ifnet *ifp) 2460 { 2461 struct ath_softc *sc = ifp->if_softc; 2462 struct ath_hal *ah = sc->sc_ah; 2463 2464 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 2465 __func__, sc->sc_invalid, ifp->if_flags); 2466 2467 ATH_LOCK_ASSERT(sc); 2468 2469 /* 2470 * Wake the hardware up before fiddling with it. 2471 */ 2472 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2473 2474 if (ifp->if_flags & IFF_RUNNING) { 2475 /* 2476 * Shutdown the hardware and driver: 2477 * reset 802.11 state machine 2478 * turn off timers 2479 * disable interrupts 2480 * turn off the radio 2481 * clear transmit machinery 2482 * clear receive machinery 2483 * drain and release tx queues 2484 * reclaim beacon resources 2485 * power down hardware 2486 * 2487 * Note that some of this work is not possible if the 2488 * hardware is gone (invalid). 2489 */ 2490 #ifdef ATH_TX99_DIAG 2491 if (sc->sc_tx99 != NULL) 2492 sc->sc_tx99->stop(sc->sc_tx99); 2493 #endif 2494 callout_stop(&sc->sc_wd_ch); 2495 sc->sc_wd_timer = 0; 2496 ifp->if_flags &= ~IFF_RUNNING; 2497 if (!sc->sc_invalid) { 2498 if (sc->sc_softled) { 2499 callout_stop(&sc->sc_ledtimer); 2500 ath_hal_gpioset(ah, sc->sc_ledpin, 2501 !sc->sc_ledon); 2502 sc->sc_blinking = 0; 2503 } 2504 ath_hal_intrset(ah, 0); 2505 } 2506 ath_draintxq(sc, ATH_RESET_DEFAULT); 2507 if (!sc->sc_invalid) { 2508 ath_stoprecv(sc, 1); 2509 ath_hal_phydisable(ah); 2510 } else 2511 sc->sc_rxlink = NULL; 2512 ath_beacon_free(sc); /* XXX not needed */ 2513 } 2514 2515 /* And now, restore the current power state */ 2516 ath_power_restore_power_state(sc); 2517 } 2518 2519 /* 2520 * Wait until all pending TX/RX has completed. 2521 * 2522 * This waits until all existing transmit, receive and interrupts 2523 * have completed. It's assumed that the caller has first 2524 * grabbed the reset lock so it doesn't try to do overlapping 2525 * chip resets. 2526 */ 2527 #define MAX_TXRX_ITERATIONS 100 2528 static void 2529 ath_txrx_stop_locked(struct ath_softc *sc) 2530 { 2531 int i = MAX_TXRX_ITERATIONS; 2532 2533 ATH_UNLOCK_ASSERT(sc); 2534 ATH_PCU_LOCK_ASSERT(sc); 2535 2536 /* 2537 * Sleep until all the pending operations have completed. 2538 * 2539 * The caller must ensure that reset has been incremented 2540 * or the pending operations may continue being queued. 2541 */ 2542 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 2543 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 2544 if (i <= 0) 2545 break; 2546 wlan_serialize_sleep(sc, 0, "ath_txrx_stop", (hz + 99) / 100); 2547 i--; 2548 } 2549 2550 if (i <= 0) 2551 device_printf(sc->sc_dev, 2552 "%s: didn't finish after %d iterations\n", 2553 __func__, MAX_TXRX_ITERATIONS); 2554 } 2555 #undef MAX_TXRX_ITERATIONS 2556 2557 #if 0 2558 static void 2559 ath_txrx_stop(struct ath_softc *sc) 2560 { 2561 ATH_UNLOCK_ASSERT(sc); 2562 ATH_PCU_UNLOCK_ASSERT(sc); 2563 2564 ATH_PCU_LOCK(sc); 2565 ath_txrx_stop_locked(sc); 2566 ATH_PCU_UNLOCK(sc); 2567 } 2568 #endif 2569 2570 static void 2571 ath_txrx_start(struct ath_softc *sc) 2572 { 2573 2574 taskqueue_unblock(sc->sc_tq); 2575 } 2576 2577 /* 2578 * Grab the reset lock, and wait around until noone else 2579 * is trying to do anything with it. 2580 * 2581 * This is totally horrible but we can't hold this lock for 2582 * long enough to do TX/RX or we end up with net80211/ip stack 2583 * LORs and eventual deadlock. 2584 * 2585 * "dowait" signals whether to spin, waiting for the reset 2586 * lock count to reach 0. This should (for now) only be used 2587 * during the reset path, as the rest of the code may not 2588 * be locking-reentrant enough to behave correctly. 2589 * 2590 * Another, cleaner way should be found to serialise all of 2591 * these operations. 2592 */ 2593 #define MAX_RESET_ITERATIONS 25 2594 static int 2595 ath_reset_grablock(struct ath_softc *sc, int dowait) 2596 { 2597 int w = 0; 2598 int i = MAX_RESET_ITERATIONS; 2599 2600 ATH_PCU_LOCK_ASSERT(sc); 2601 do { 2602 if (sc->sc_inreset_cnt == 0) { 2603 w = 1; 2604 break; 2605 } 2606 if (dowait == 0) { 2607 w = 0; 2608 break; 2609 } 2610 ATH_PCU_UNLOCK(sc); 2611 wlan_serialize_sleep(sc, 0, "ath_reset_grablock", 2612 (hz + 9) / 10); 2613 i--; 2614 ATH_PCU_LOCK(sc); 2615 } while (i > 0); 2616 2617 /* 2618 * We always increment the refcounter, regardless 2619 * of whether we succeeded to get it in an exclusive 2620 * way. 2621 */ 2622 sc->sc_inreset_cnt++; 2623 2624 if (i <= 0) 2625 device_printf(sc->sc_dev, 2626 "%s: didn't finish after %d iterations\n", 2627 __func__, MAX_RESET_ITERATIONS); 2628 2629 if (w == 0) 2630 device_printf(sc->sc_dev, 2631 "%s: warning, recursive reset path!\n", 2632 __func__); 2633 2634 return w; 2635 } 2636 #undef MAX_RESET_ITERATIONS 2637 2638 /* 2639 * XXX TODO: write ath_reset_releaselock 2640 */ 2641 2642 static void 2643 ath_stop(struct ifnet *ifp) 2644 { 2645 struct ath_softc *sc __unused = ifp->if_softc; 2646 2647 ATH_LOCK(sc); 2648 ath_stop_locked(ifp); 2649 ATH_UNLOCK(sc); 2650 } 2651 2652 /* 2653 * Reset the hardware w/o losing operational state. This is 2654 * basically a more efficient way of doing ath_stop, ath_init, 2655 * followed by state transitions to the current 802.11 2656 * operational state. Used to recover from various errors and 2657 * to reset or reload hardware state. 2658 */ 2659 int 2660 ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2661 { 2662 struct ath_softc *sc = ifp->if_softc; 2663 struct ieee80211com *ic = ifp->if_l2com; 2664 struct ath_hal *ah = sc->sc_ah; 2665 HAL_STATUS status; 2666 int i; 2667 2668 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2669 2670 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2671 ATH_PCU_UNLOCK_ASSERT(sc); 2672 ATH_UNLOCK_ASSERT(sc); 2673 2674 /* Try to (stop any further TX/RX from occuring */ 2675 taskqueue_block(sc->sc_tq); 2676 2677 /* 2678 * Wake the hardware up. 2679 */ 2680 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2681 2682 ATH_PCU_LOCK(sc); 2683 2684 /* 2685 * Grab the reset lock before TX/RX is stopped. 2686 * 2687 * This is needed to ensure that when the TX/RX actually does finish, 2688 * no further TX/RX/reset runs in parallel with this. 2689 */ 2690 if (ath_reset_grablock(sc, 1) == 0) { 2691 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2692 __func__); 2693 } 2694 2695 /* disable interrupts */ 2696 ath_hal_intrset(ah, 0); 2697 2698 /* 2699 * Now, ensure that any in progress TX/RX completes before we 2700 * continue. 2701 */ 2702 ath_txrx_stop_locked(sc); 2703 2704 ATH_PCU_UNLOCK(sc); 2705 2706 /* 2707 * Should now wait for pending TX/RX to complete 2708 * and block future ones from occuring. This needs to be 2709 * done before the TX queue is drained. 2710 */ 2711 ath_draintxq(sc, reset_type); /* stop xmit side */ 2712 2713 /* 2714 * Regardless of whether we're doing a no-loss flush or 2715 * not, stop the PCU and handle what's in the RX queue. 2716 * That way frames aren't dropped which shouldn't be. 2717 */ 2718 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2719 ath_rx_flush(sc); 2720 2721 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2722 /* NB: indicate channel change so we do a full reset */ 2723 ath_update_chainmasks(sc, ic->ic_curchan); 2724 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2725 sc->sc_cur_rxchainmask); 2726 sc->sc_rxfifo_state = ATH_RXFIFO_RESET; 2727 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2728 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2729 __func__, status); 2730 sc->sc_diversity = ath_hal_getdiversity(ah); 2731 2732 /* Let DFS at it in case it's a DFS channel */ 2733 ath_dfs_radar_enable(sc, ic->ic_curchan); 2734 2735 /* Let spectral at in case spectral is enabled */ 2736 ath_spectral_enable(sc, ic->ic_curchan); 2737 2738 /* 2739 * Let bluetooth coexistence at in case it's needed for this channel 2740 */ 2741 ath_btcoex_enable(sc, ic->ic_curchan); 2742 2743 /* 2744 * If we're doing TDMA, enforce the TXOP limitation for chips that 2745 * support it. 2746 */ 2747 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2748 ath_hal_setenforcetxop(sc->sc_ah, 1); 2749 else 2750 ath_hal_setenforcetxop(sc->sc_ah, 0); 2751 2752 if (ath_startrecv(sc) != 0) /* restart recv */ 2753 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2754 /* 2755 * We may be doing a reset in response to an ioctl 2756 * that changes the channel so update any state that 2757 * might change as a result. 2758 */ 2759 ath_chan_change(sc, ic->ic_curchan); 2760 if (sc->sc_beacons) { /* restart beacons */ 2761 #ifdef IEEE80211_SUPPORT_TDMA 2762 if (sc->sc_tdma) 2763 ath_tdma_config(sc, NULL); 2764 else 2765 #endif 2766 ath_beacon_config(sc, NULL); 2767 } 2768 2769 /* 2770 * Release the reset lock and re-enable interrupts here. 2771 * If an interrupt was being processed in ath_intr(), 2772 * it would disable interrupts at this point. So we have 2773 * to atomically enable interrupts and decrement the 2774 * reset counter - this way ath_intr() doesn't end up 2775 * disabling interrupts without a corresponding enable 2776 * in the rest or channel change path. 2777 * 2778 * Grab the TX reference in case we need to transmit. 2779 * That way a parallel transmit doesn't. 2780 */ 2781 ATH_PCU_LOCK(sc); 2782 sc->sc_inreset_cnt--; 2783 sc->sc_txstart_cnt++; 2784 /* XXX only do this if sc_inreset_cnt == 0? */ 2785 ath_hal_intrset(ah, sc->sc_imask); 2786 ATH_PCU_UNLOCK(sc); 2787 2788 /* 2789 * TX and RX can be started here. If it were started with 2790 * sc_inreset_cnt > 0, the TX and RX path would abort. 2791 * Thus if this is a nested call through the reset or 2792 * channel change code, TX completion will occur but 2793 * RX completion and ath_start / ath_tx_start will not 2794 * run. 2795 */ 2796 2797 /* XXX TODO: we need to hold the tx refcount here! */ 2798 2799 /* Restart TX/RX as needed */ 2800 ath_txrx_start(sc); 2801 2802 /* Restart TX completion and pending TX */ 2803 if (reset_type == ATH_RESET_NOLOSS) { 2804 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2805 if (ATH_TXQ_SETUP(sc, i)) { 2806 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2807 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2808 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2809 2810 ATH_TX_LOCK(sc); 2811 ath_txq_sched(sc, &sc->sc_txq[i]); 2812 ATH_TX_UNLOCK(sc); 2813 } 2814 } 2815 } 2816 2817 #if 0 2818 /* remove, DragonFly uses OACTIVE to control if_start calls */ 2819 /* 2820 * This may have been set during an ath_start() call which 2821 * set this once it detected a concurrent TX was going on. 2822 * So, clear it. 2823 */ 2824 IF_LOCK(&ifp->if_snd); 2825 ifq_clr_oactive(&ifp->if_snd); 2826 IF_UNLOCK(&ifp->if_snd); 2827 #endif 2828 2829 ath_power_restore_power_state(sc); 2830 2831 ATH_PCU_LOCK(sc); 2832 sc->sc_txstart_cnt--; 2833 ATH_PCU_UNLOCK(sc); 2834 2835 /* Handle any frames in the TX queue */ 2836 /* 2837 * XXX should this be done by the caller, rather than 2838 * ath_reset() ? 2839 */ 2840 ath_tx_kick(sc); /* restart xmit */ 2841 return 0; 2842 } 2843 2844 static int 2845 ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2846 { 2847 struct ieee80211com *ic = vap->iv_ic; 2848 struct ifnet *ifp = ic->ic_ifp; 2849 struct ath_softc *sc = ifp->if_softc; 2850 struct ath_hal *ah = sc->sc_ah; 2851 2852 switch (cmd) { 2853 case IEEE80211_IOC_TXPOWER: 2854 /* 2855 * If per-packet TPC is enabled, then we have nothing 2856 * to do; otherwise we need to force the global limit. 2857 * All this can happen directly; no need to reset. 2858 */ 2859 if (!ath_hal_gettpc(ah)) 2860 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2861 return 0; 2862 } 2863 /* XXX? Full or NOLOSS? */ 2864 return ath_reset(ifp, ATH_RESET_FULL); 2865 } 2866 2867 struct ath_buf * 2868 _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 2869 { 2870 struct ath_buf *bf; 2871 2872 ATH_TXBUF_LOCK_ASSERT(sc); 2873 2874 if (btype == ATH_BUFTYPE_MGMT) 2875 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 2876 else 2877 bf = TAILQ_FIRST(&sc->sc_txbuf); 2878 2879 if (bf == NULL) { 2880 sc->sc_stats.ast_tx_getnobuf++; 2881 } else { 2882 if (bf->bf_flags & ATH_BUF_BUSY) { 2883 sc->sc_stats.ast_tx_getbusybuf++; 2884 bf = NULL; 2885 } 2886 } 2887 2888 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 2889 if (btype == ATH_BUFTYPE_MGMT) 2890 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 2891 else { 2892 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2893 sc->sc_txbuf_cnt--; 2894 2895 /* 2896 * This shuldn't happen; however just to be 2897 * safe print a warning and fudge the txbuf 2898 * count. 2899 */ 2900 if (sc->sc_txbuf_cnt < 0) { 2901 device_printf(sc->sc_dev, 2902 "%s: sc_txbuf_cnt < 0?\n", 2903 __func__); 2904 sc->sc_txbuf_cnt = 0; 2905 } 2906 } 2907 } else 2908 bf = NULL; 2909 2910 if (bf == NULL) { 2911 /* XXX should check which list, mgmt or otherwise */ 2912 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2913 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2914 "out of xmit buffers" : "xmit buffer busy"); 2915 return NULL; 2916 } 2917 2918 /* XXX TODO: should do this at buffer list initialisation */ 2919 /* XXX (then, ensure the buffer has the right flag set) */ 2920 bf->bf_flags = 0; 2921 if (btype == ATH_BUFTYPE_MGMT) 2922 bf->bf_flags |= ATH_BUF_MGMT; 2923 else 2924 bf->bf_flags &= (~ATH_BUF_MGMT); 2925 2926 /* Valid bf here; clear some basic fields */ 2927 bf->bf_next = NULL; /* XXX just to be sure */ 2928 bf->bf_last = NULL; /* XXX again, just to be sure */ 2929 bf->bf_comp = NULL; /* XXX again, just to be sure */ 2930 bzero(&bf->bf_state, sizeof(bf->bf_state)); 2931 2932 /* 2933 * Track the descriptor ID only if doing EDMA 2934 */ 2935 if (sc->sc_isedma) { 2936 bf->bf_descid = sc->sc_txbuf_descid; 2937 sc->sc_txbuf_descid++; 2938 } 2939 2940 return bf; 2941 } 2942 2943 /* 2944 * When retrying a software frame, buffers marked ATH_BUF_BUSY 2945 * can't be thrown back on the queue as they could still be 2946 * in use by the hardware. 2947 * 2948 * This duplicates the buffer, or returns NULL. 2949 * 2950 * The descriptor is also copied but the link pointers and 2951 * the DMA segments aren't copied; this frame should thus 2952 * be again passed through the descriptor setup/chain routines 2953 * so the link is correct. 2954 * 2955 * The caller must free the buffer using ath_freebuf(). 2956 */ 2957 struct ath_buf * 2958 ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf) 2959 { 2960 struct ath_buf *tbf; 2961 2962 tbf = ath_getbuf(sc, 2963 (bf->bf_flags & ATH_BUF_MGMT) ? 2964 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 2965 if (tbf == NULL) 2966 return NULL; /* XXX failure? Why? */ 2967 2968 /* Copy basics */ 2969 tbf->bf_next = NULL; 2970 tbf->bf_nseg = bf->bf_nseg; 2971 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE; 2972 tbf->bf_status = bf->bf_status; 2973 tbf->bf_m = bf->bf_m; 2974 tbf->bf_node = bf->bf_node; 2975 KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__)); 2976 /* will be setup by the chain/setup function */ 2977 tbf->bf_lastds = NULL; 2978 /* for now, last == self */ 2979 tbf->bf_last = tbf; 2980 tbf->bf_comp = bf->bf_comp; 2981 2982 /* NOTE: DMA segments will be setup by the setup/chain functions */ 2983 2984 /* The caller has to re-init the descriptor + links */ 2985 2986 /* 2987 * Free the DMA mapping here, before we NULL the mbuf. 2988 * We must only call bus_dmamap_unload() once per mbuf chain 2989 * or behaviour is undefined. 2990 */ 2991 if (bf->bf_m != NULL) { 2992 /* 2993 * XXX is this POSTWRITE call required? 2994 */ 2995 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2996 BUS_DMASYNC_POSTWRITE); 2997 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2998 } 2999 3000 bf->bf_m = NULL; 3001 bf->bf_node = NULL; 3002 3003 /* Copy state */ 3004 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 3005 3006 return tbf; 3007 } 3008 3009 struct ath_buf * 3010 ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 3011 { 3012 struct ath_buf *bf; 3013 3014 ATH_TXBUF_LOCK(sc); 3015 bf = _ath_getbuf_locked(sc, btype); 3016 /* 3017 * If a mgmt buffer was requested but we're out of those, 3018 * try requesting a normal one. 3019 */ 3020 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 3021 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 3022 ATH_TXBUF_UNLOCK(sc); 3023 if (bf == NULL) { 3024 #if 0 3025 struct ifnet *ifp = sc->sc_ifp; 3026 #endif 3027 3028 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 3029 sc->sc_stats.ast_tx_qstop++; 3030 #if 0 3031 /* remove, DragonFly uses OACTIVE to control if_start calls */ 3032 IF_LOCK(&ifp->if_snd); 3033 ifq_set_oactive(&ifp->if_snd); 3034 IF_UNLOCK(&ifp->if_snd); 3035 #endif 3036 } 3037 return bf; 3038 } 3039 3040 #if 0 3041 3042 static void 3043 ath_qflush(struct ifnet *ifp) 3044 { 3045 3046 /* XXX TODO */ 3047 } 3048 3049 #endif 3050 3051 /* 3052 * Transmit a single frame. 3053 * 3054 * net80211 will free the node reference if the transmit 3055 * fails, so don't free the node reference here. 3056 */ 3057 static int 3058 ath_transmit(struct ifnet *ifp, struct mbuf *m) 3059 { 3060 struct ieee80211com *ic = ifp->if_l2com; 3061 struct ath_softc *sc = ic->ic_ifp->if_softc; 3062 struct ieee80211_node *ni; 3063 struct mbuf *next; 3064 struct ath_buf *bf; 3065 ath_bufhead frags; 3066 int retval = 0; 3067 3068 /* 3069 * Tell the reset path that we're currently transmitting. 3070 */ 3071 ATH_PCU_LOCK(sc); 3072 if (sc->sc_inreset_cnt > 0) { 3073 DPRINTF(sc, ATH_DEBUG_XMIT, 3074 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 3075 ATH_PCU_UNLOCK(sc); 3076 IF_LOCK(&ifp->if_snd); 3077 sc->sc_stats.ast_tx_qstop++; 3078 #if 0 3079 /* remove, DragonFly uses OACTIVE to control if_start calls */ 3080 ifq_set_oactive(&ifp->if_snd); 3081 #endif 3082 IF_UNLOCK(&ifp->if_snd); 3083 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish"); 3084 m_freem(m); 3085 m = NULL; 3086 return (ENOBUFS); /* XXX should be EINVAL or? */ 3087 } 3088 sc->sc_txstart_cnt++; 3089 ATH_PCU_UNLOCK(sc); 3090 3091 /* Wake the hardware up already */ 3092 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3093 3094 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start"); 3095 /* 3096 * Grab the TX lock - it's ok to do this here; we haven't 3097 * yet started transmitting. 3098 */ 3099 ATH_TX_LOCK(sc); 3100 3101 /* 3102 * Node reference, if there's one. 3103 */ 3104 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 3105 3106 /* 3107 * Enforce how deep a node queue can get. 3108 * 3109 * XXX it would be nicer if we kept an mbuf queue per 3110 * node and only whacked them into ath_bufs when we 3111 * are ready to schedule some traffic from them. 3112 * .. that may come later. 3113 * 3114 * XXX we should also track the per-node hardware queue 3115 * depth so it is easy to limit the _SUM_ of the swq and 3116 * hwq frames. Since we only schedule two HWQ frames 3117 * at a time, this should be OK for now. 3118 */ 3119 if ((!(m->m_flags & M_EAPOL)) && 3120 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) { 3121 sc->sc_stats.ast_tx_nodeq_overflow++; 3122 m_freem(m); 3123 m = NULL; 3124 retval = ENOBUFS; 3125 goto finish; 3126 } 3127 3128 /* 3129 * Check how many TX buffers are available. 3130 * 3131 * If this is for non-EAPOL traffic, just leave some 3132 * space free in order for buffer cloning and raw 3133 * frame transmission to occur. 3134 * 3135 * If it's for EAPOL traffic, ignore this for now. 3136 * Management traffic will be sent via the raw transmit 3137 * method which bypasses this check. 3138 * 3139 * This is needed to ensure that EAPOL frames during 3140 * (re) keying have a chance to go out. 3141 * 3142 * See kern/138379 for more information. 3143 */ 3144 if ((!(m->m_flags & M_EAPOL)) && 3145 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) { 3146 sc->sc_stats.ast_tx_nobuf++; 3147 m_freem(m); 3148 m = NULL; 3149 retval = ENOBUFS; 3150 goto finish; 3151 } 3152 3153 /* 3154 * Grab a TX buffer and associated resources. 3155 * 3156 * If it's an EAPOL frame, allocate a MGMT ath_buf. 3157 * That way even with temporary buffer exhaustion due to 3158 * the data path doesn't leave us without the ability 3159 * to transmit management frames. 3160 * 3161 * Otherwise allocate a normal buffer. 3162 */ 3163 if (m->m_flags & M_EAPOL) 3164 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 3165 else 3166 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 3167 3168 if (bf == NULL) { 3169 /* 3170 * If we failed to allocate a buffer, fail. 3171 * 3172 * We shouldn't fail normally, due to the check 3173 * above. 3174 */ 3175 sc->sc_stats.ast_tx_nobuf++; 3176 #if 0 3177 /* remove, DragonFly uses OACTIVE to control if_start calls */ 3178 IF_LOCK(&ifp->if_snd); 3179 ifq_set_oactive(&ifp->if_snd); 3180 IF_UNLOCK(&ifp->if_snd); 3181 #endif 3182 m_freem(m); 3183 m = NULL; 3184 retval = ENOBUFS; 3185 goto finish; 3186 } 3187 3188 /* 3189 * At this point we have a buffer; so we need to free it 3190 * if we hit any error conditions. 3191 */ 3192 3193 /* 3194 * Check for fragmentation. If this frame 3195 * has been broken up verify we have enough 3196 * buffers to send all the fragments so all 3197 * go out or none... 3198 */ 3199 TAILQ_INIT(&frags); 3200 if ((m->m_flags & M_FRAG) && 3201 !ath_txfrag_setup(sc, &frags, m, ni)) { 3202 DPRINTF(sc, ATH_DEBUG_XMIT, 3203 "%s: out of txfrag buffers\n", __func__); 3204 sc->sc_stats.ast_tx_nofrag++; 3205 ifp->if_oerrors++; 3206 ath_freetx(m); 3207 goto bad; 3208 } 3209 3210 /* 3211 * At this point if we have any TX fragments, then we will 3212 * have bumped the node reference once for each of those. 3213 */ 3214 3215 /* 3216 * XXX Is there anything actually _enforcing_ that the 3217 * fragments are being transmitted in one hit, rather than 3218 * being interleaved with other transmissions on that 3219 * hardware queue? 3220 * 3221 * The ATH TX output lock is the only thing serialising this 3222 * right now. 3223 */ 3224 3225 /* 3226 * Calculate the "next fragment" length field in ath_buf 3227 * in order to let the transmit path know enough about 3228 * what to next write to the hardware. 3229 */ 3230 if (m->m_flags & M_FRAG) { 3231 struct ath_buf *fbf = bf; 3232 struct ath_buf *n_fbf = NULL; 3233 struct mbuf *fm = m->m_nextpkt; 3234 3235 /* 3236 * We need to walk the list of fragments and set 3237 * the next size to the following buffer. 3238 * However, the first buffer isn't in the frag 3239 * list, so we have to do some gymnastics here. 3240 */ 3241 TAILQ_FOREACH(n_fbf, &frags, bf_list) { 3242 fbf->bf_nextfraglen = fm->m_pkthdr.len; 3243 fbf = n_fbf; 3244 fm = fm->m_nextpkt; 3245 } 3246 } 3247 3248 /* 3249 * Bump the ifp output counter. 3250 * 3251 * XXX should use atomics? 3252 */ 3253 ifp->if_opackets++; 3254 nextfrag: 3255 /* 3256 * Pass the frame to the h/w for transmission. 3257 * Fragmented frames have each frag chained together 3258 * with m_nextpkt. We know there are sufficient ath_buf's 3259 * to send all the frags because of work done by 3260 * ath_txfrag_setup. We leave m_nextpkt set while 3261 * calling ath_tx_start so it can use it to extend the 3262 * the tx duration to cover the subsequent frag and 3263 * so it can reclaim all the mbufs in case of an error; 3264 * ath_tx_start clears m_nextpkt once it commits to 3265 * handing the frame to the hardware. 3266 * 3267 * Note: if this fails, then the mbufs are freed but 3268 * not the node reference. 3269 */ 3270 next = m->m_nextpkt; 3271 if (ath_tx_start(sc, ni, bf, m)) { 3272 bad: 3273 ifp->if_oerrors++; 3274 reclaim: 3275 bf->bf_m = NULL; 3276 bf->bf_node = NULL; 3277 ATH_TXBUF_LOCK(sc); 3278 ath_returnbuf_head(sc, bf); 3279 /* 3280 * Free the rest of the node references and 3281 * buffers for the fragment list. 3282 */ 3283 ath_txfrag_cleanup(sc, &frags, ni); 3284 ATH_TXBUF_UNLOCK(sc); 3285 retval = ENOBUFS; 3286 goto finish; 3287 } 3288 3289 /* 3290 * Check here if the node is in power save state. 3291 */ 3292 ath_tx_update_tim(sc, ni, 1); 3293 3294 if (next != NULL) { 3295 /* 3296 * Beware of state changing between frags. 3297 * XXX check sta power-save state? 3298 */ 3299 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 3300 DPRINTF(sc, ATH_DEBUG_XMIT, 3301 "%s: flush fragmented packet, state %s\n", 3302 __func__, 3303 ieee80211_state_name[ni->ni_vap->iv_state]); 3304 /* XXX dmamap */ 3305 ath_freetx(next); 3306 goto reclaim; 3307 } 3308 m = next; 3309 bf = TAILQ_FIRST(&frags); 3310 KASSERT(bf != NULL, ("no buf for txfrag")); 3311 TAILQ_REMOVE(&frags, bf, bf_list); 3312 goto nextfrag; 3313 } 3314 3315 /* 3316 * Bump watchdog timer. 3317 */ 3318 sc->sc_wd_timer = 5; 3319 3320 finish: 3321 ATH_TX_UNLOCK(sc); 3322 3323 /* 3324 * Finished transmitting! 3325 */ 3326 ATH_PCU_LOCK(sc); 3327 sc->sc_txstart_cnt--; 3328 ATH_PCU_UNLOCK(sc); 3329 3330 /* Sleep the hardware if required */ 3331 ath_power_restore_power_state(sc); 3332 3333 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished"); 3334 3335 return (retval); 3336 } 3337 3338 static int 3339 ath_media_change(struct ifnet *ifp) 3340 { 3341 int error = ieee80211_media_change(ifp); 3342 /* NB: only the fixed rate can change and that doesn't need a reset */ 3343 return (error == ENETRESET ? 0 : error); 3344 } 3345 3346 /* 3347 * Block/unblock tx+rx processing while a key change is done. 3348 * We assume the caller serializes key management operations 3349 * so we only need to worry about synchronization with other 3350 * uses that originate in the driver. 3351 */ 3352 static void 3353 ath_key_update_begin(struct ieee80211vap *vap) 3354 { 3355 struct ifnet *ifp = vap->iv_ic->ic_ifp; 3356 struct ath_softc *sc = ifp->if_softc; 3357 3358 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3359 taskqueue_block(sc->sc_tq); 3360 } 3361 3362 static void 3363 ath_key_update_end(struct ieee80211vap *vap) 3364 { 3365 struct ifnet *ifp = vap->iv_ic->ic_ifp; 3366 struct ath_softc *sc = ifp->if_softc; 3367 3368 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3369 taskqueue_unblock(sc->sc_tq); 3370 } 3371 3372 static void 3373 ath_update_promisc(struct ifnet *ifp) 3374 { 3375 struct ath_softc *sc = ifp->if_softc; 3376 u_int32_t rfilt; 3377 3378 /* configure rx filter */ 3379 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3380 rfilt = ath_calcrxfilter(sc); 3381 ath_hal_setrxfilter(sc->sc_ah, rfilt); 3382 ath_power_restore_power_state(sc); 3383 3384 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 3385 } 3386 3387 static void 3388 ath_update_mcast(struct ifnet *ifp) 3389 { 3390 struct ath_softc *sc = ifp->if_softc; 3391 u_int32_t mfilt[2]; 3392 3393 /* calculate and install multicast filter */ 3394 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 3395 struct ifmultiaddr *ifma; 3396 /* 3397 * Merge multicast addresses to form the hardware filter. 3398 */ 3399 mfilt[0] = mfilt[1] = 0; 3400 #if 0 3401 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 3402 #endif 3403 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3404 caddr_t dl; 3405 u_int32_t val; 3406 u_int8_t pos; 3407 3408 /* calculate XOR of eight 6bit values */ 3409 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 3410 val = LE_READ_4(dl + 0); 3411 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3412 val = LE_READ_4(dl + 3); 3413 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3414 pos &= 0x3f; 3415 mfilt[pos / 32] |= (1 << (pos % 32)); 3416 } 3417 #if 0 3418 if_maddr_runlock(ifp); 3419 #endif 3420 } else 3421 mfilt[0] = mfilt[1] = ~0; 3422 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3423 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 3424 ath_power_restore_power_state(sc); 3425 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 3426 __func__, mfilt[0], mfilt[1]); 3427 } 3428 3429 void 3430 ath_mode_init(struct ath_softc *sc) 3431 { 3432 struct ifnet *ifp = sc->sc_ifp; 3433 struct ath_hal *ah = sc->sc_ah; 3434 u_int32_t rfilt; 3435 3436 /* configure rx filter */ 3437 rfilt = ath_calcrxfilter(sc); 3438 ath_hal_setrxfilter(ah, rfilt); 3439 3440 /* configure operational mode */ 3441 ath_hal_setopmode(ah); 3442 3443 #if 0 3444 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE, 3445 "%s: ah=%p, ifp=%p, if_addr=%p\n", 3446 __func__, 3447 ah, 3448 ifp, 3449 (ifp == NULL) ? NULL : ifp->if_addr); 3450 #endif 3451 3452 /* handle any link-level address change */ 3453 ath_hal_setmac(ah, IF_LLADDR(ifp)); 3454 3455 /* calculate and install multicast filter */ 3456 ath_update_mcast(ifp); 3457 } 3458 3459 /* 3460 * Set the slot time based on the current setting. 3461 */ 3462 void 3463 ath_setslottime(struct ath_softc *sc) 3464 { 3465 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3466 struct ath_hal *ah = sc->sc_ah; 3467 u_int usec; 3468 3469 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 3470 usec = 13; 3471 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 3472 usec = 21; 3473 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 3474 /* honor short/long slot time only in 11g */ 3475 /* XXX shouldn't honor on pure g or turbo g channel */ 3476 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3477 usec = HAL_SLOT_TIME_9; 3478 else 3479 usec = HAL_SLOT_TIME_20; 3480 } else 3481 usec = HAL_SLOT_TIME_9; 3482 3483 DPRINTF(sc, ATH_DEBUG_RESET, 3484 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 3485 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 3486 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 3487 3488 /* Wake up the hardware first before updating the slot time */ 3489 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3490 ath_hal_setslottime(ah, usec); 3491 ath_power_restore_power_state(sc); 3492 sc->sc_updateslot = OK; 3493 } 3494 3495 /* 3496 * Callback from the 802.11 layer to update the 3497 * slot time based on the current setting. 3498 */ 3499 static void 3500 ath_updateslot(struct ifnet *ifp) 3501 { 3502 struct ath_softc *sc = ifp->if_softc; 3503 struct ieee80211com *ic = ifp->if_l2com; 3504 3505 /* 3506 * When not coordinating the BSS, change the hardware 3507 * immediately. For other operation we defer the change 3508 * until beacon updates have propagated to the stations. 3509 * 3510 * XXX sc_updateslot isn't changed behind a lock? 3511 */ 3512 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3513 ic->ic_opmode == IEEE80211_M_MBSS) 3514 sc->sc_updateslot = UPDATE; 3515 else 3516 ath_setslottime(sc); 3517 } 3518 3519 /* 3520 * Append the contents of src to dst; both queues 3521 * are assumed to be locked. 3522 */ 3523 void 3524 ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 3525 { 3526 3527 ATH_TXQ_LOCK_ASSERT(src); 3528 ATH_TXQ_LOCK_ASSERT(dst); 3529 3530 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 3531 dst->axq_link = src->axq_link; 3532 src->axq_link = NULL; 3533 dst->axq_depth += src->axq_depth; 3534 dst->axq_aggr_depth += src->axq_aggr_depth; 3535 src->axq_depth = 0; 3536 src->axq_aggr_depth = 0; 3537 } 3538 3539 /* 3540 * Reset the hardware, with no loss. 3541 * 3542 * This can't be used for a general case reset. 3543 */ 3544 static void 3545 ath_reset_proc(void *arg, int pending) 3546 { 3547 struct ath_softc *sc = arg; 3548 struct ifnet *ifp = sc->sc_ifp; 3549 3550 #if 0 3551 if_printf(ifp, "%s: resetting\n", __func__); 3552 #endif 3553 wlan_serialize_enter(); 3554 ath_reset(ifp, ATH_RESET_NOLOSS); 3555 wlan_serialize_exit(); 3556 } 3557 3558 /* 3559 * Reset the hardware after detecting beacons have stopped. 3560 */ 3561 static void 3562 ath_bstuck_proc(void *arg, int pending) 3563 { 3564 struct ath_softc *sc = arg; 3565 struct ifnet *ifp = sc->sc_ifp; 3566 uint32_t hangs = 0; 3567 3568 wlan_serialize_enter(); 3569 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 3570 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 3571 3572 #ifdef ATH_DEBUG_ALQ 3573 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON)) 3574 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL); 3575 #endif 3576 3577 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3578 sc->sc_bmisscount); 3579 sc->sc_stats.ast_bstuck++; 3580 /* 3581 * This assumes that there's no simultaneous channel mode change 3582 * occuring. 3583 */ 3584 ath_reset(ifp, ATH_RESET_NOLOSS); 3585 wlan_serialize_exit(); 3586 } 3587 3588 static void 3589 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3590 { 3591 bus_addr_t *paddr = (bus_addr_t*) arg; 3592 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 3593 *paddr = segs->ds_addr; 3594 } 3595 3596 /* 3597 * Allocate the descriptors and appropriate DMA tag/setup. 3598 * 3599 * For some situations (eg EDMA TX completion), there isn't a requirement 3600 * for the ath_buf entries to be allocated. 3601 */ 3602 int 3603 ath_descdma_alloc_desc(struct ath_softc *sc, 3604 struct ath_descdma *dd, ath_bufhead *head, 3605 const char *name, int ds_size, int ndesc) 3606 { 3607 #define DS2PHYS(_dd, _ds) \ 3608 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3609 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 3610 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3611 struct ifnet *ifp = sc->sc_ifp; 3612 int error; 3613 3614 dd->dd_descsize = ds_size; 3615 3616 DPRINTF(sc, ATH_DEBUG_RESET, 3617 "%s: %s DMA: %u desc, %d bytes per descriptor\n", 3618 __func__, name, ndesc, dd->dd_descsize); 3619 3620 dd->dd_name = name; 3621 dd->dd_desc_len = dd->dd_descsize * ndesc; 3622 3623 /* 3624 * Merlin work-around: 3625 * Descriptors that cross the 4KB boundary can't be used. 3626 * Assume one skipped descriptor per 4KB page. 3627 */ 3628 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3629 int numpages = dd->dd_desc_len / 4096; 3630 dd->dd_desc_len += ds_size * numpages; 3631 } 3632 3633 /* 3634 * Setup DMA descriptor area. 3635 * 3636 * BUS_DMA_ALLOCNOW is not used; we never use bounce 3637 * buffers for the descriptors themselves. 3638 */ 3639 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3640 PAGE_SIZE, 0, /* alignment, bounds */ 3641 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3642 BUS_SPACE_MAXADDR, /* highaddr */ 3643 NULL, NULL, /* filter, filterarg */ 3644 dd->dd_desc_len, /* maxsize */ 3645 1, /* nsegments */ 3646 dd->dd_desc_len, /* maxsegsize */ 3647 0, /* flags */ 3648 &dd->dd_dmat); 3649 if (error != 0) { 3650 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3651 return error; 3652 } 3653 3654 /* allocate descriptors */ 3655 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 3656 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 3657 &dd->dd_dmamap); 3658 if (error != 0) { 3659 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3660 "error %u\n", ndesc, dd->dd_name, error); 3661 goto fail1; 3662 } 3663 3664 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3665 dd->dd_desc, dd->dd_desc_len, 3666 ath_load_cb, &dd->dd_desc_paddr, 3667 BUS_DMA_NOWAIT); 3668 if (error != 0) { 3669 if_printf(ifp, "unable to map %s descriptors, error %u\n", 3670 dd->dd_name, error); 3671 goto fail2; 3672 } 3673 3674 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3675 __func__, dd->dd_name, (uint8_t *) dd->dd_desc, 3676 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, 3677 /*XXX*/ (u_long) dd->dd_desc_len); 3678 3679 return (0); 3680 3681 fail2: 3682 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3683 fail1: 3684 bus_dma_tag_destroy(dd->dd_dmat); 3685 memset(dd, 0, sizeof(*dd)); 3686 return error; 3687 #undef DS2PHYS 3688 #undef ATH_DESC_4KB_BOUND_CHECK 3689 } 3690 3691 int 3692 ath_descdma_setup(struct ath_softc *sc, 3693 struct ath_descdma *dd, ath_bufhead *head, 3694 const char *name, int ds_size, int nbuf, int ndesc) 3695 { 3696 #define DS2PHYS(_dd, _ds) \ 3697 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3698 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 3699 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3700 struct ifnet *ifp = sc->sc_ifp; 3701 uint8_t *ds; 3702 struct ath_buf *bf; 3703 int i, bsize, error; 3704 3705 /* Allocate descriptors */ 3706 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size, 3707 nbuf * ndesc); 3708 3709 /* Assume any errors during allocation were dealt with */ 3710 if (error != 0) { 3711 return (error); 3712 } 3713 3714 ds = (uint8_t *) dd->dd_desc; 3715 3716 /* allocate rx buffers */ 3717 bsize = sizeof(struct ath_buf) * nbuf; 3718 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT|M_ZERO); 3719 if (bf == NULL) { 3720 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3721 dd->dd_name, bsize); 3722 goto fail3; 3723 } 3724 dd->dd_bufptr = bf; 3725 3726 TAILQ_INIT(head); 3727 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) { 3728 bf->bf_desc = (struct ath_desc *) ds; 3729 bf->bf_daddr = DS2PHYS(dd, ds); 3730 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3731 /* 3732 * Merlin WAR: Skip descriptor addresses which 3733 * cause 4KB boundary crossing along any point 3734 * in the descriptor. 3735 */ 3736 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 3737 dd->dd_descsize)) { 3738 /* Start at the next page */ 3739 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 3740 bf->bf_desc = (struct ath_desc *) ds; 3741 bf->bf_daddr = DS2PHYS(dd, ds); 3742 } 3743 } 3744 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3745 &bf->bf_dmamap); 3746 if (error != 0) { 3747 if_printf(ifp, "unable to create dmamap for %s " 3748 "buffer %u, error %u\n", dd->dd_name, i, error); 3749 ath_descdma_cleanup(sc, dd, head); 3750 return error; 3751 } 3752 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 3753 TAILQ_INSERT_TAIL(head, bf, bf_list); 3754 } 3755 3756 /* 3757 * XXX TODO: ensure that ds doesn't overflow the descriptor 3758 * allocation otherwise weird stuff will occur and crash your 3759 * machine. 3760 */ 3761 return 0; 3762 /* XXX this should likely just call ath_descdma_cleanup() */ 3763 fail3: 3764 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3765 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3766 bus_dma_tag_destroy(dd->dd_dmat); 3767 memset(dd, 0, sizeof(*dd)); 3768 return error; 3769 #undef DS2PHYS 3770 #undef ATH_DESC_4KB_BOUND_CHECK 3771 } 3772 3773 /* 3774 * Allocate ath_buf entries but no descriptor contents. 3775 * 3776 * This is for RX EDMA where the descriptors are the header part of 3777 * the RX buffer. 3778 */ 3779 int 3780 ath_descdma_setup_rx_edma(struct ath_softc *sc, 3781 struct ath_descdma *dd, ath_bufhead *head, 3782 const char *name, int nbuf, int rx_status_len) 3783 { 3784 struct ifnet *ifp = sc->sc_ifp; 3785 struct ath_buf *bf; 3786 int i, bsize, error; 3787 3788 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n", 3789 __func__, name, nbuf); 3790 3791 dd->dd_name = name; 3792 /* 3793 * This is (mostly) purely for show. We're not allocating any actual 3794 * descriptors here as EDMA RX has the descriptor be part 3795 * of the RX buffer. 3796 * 3797 * However, dd_desc_len is used by ath_descdma_free() to determine 3798 * whether we have already freed this DMA mapping. 3799 */ 3800 dd->dd_desc_len = rx_status_len * nbuf; 3801 dd->dd_descsize = rx_status_len; 3802 3803 /* allocate rx buffers */ 3804 bsize = sizeof(struct ath_buf) * nbuf; 3805 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT | M_ZERO); 3806 if (bf == NULL) { 3807 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3808 dd->dd_name, bsize); 3809 error = ENOMEM; 3810 goto fail3; 3811 } 3812 dd->dd_bufptr = bf; 3813 3814 TAILQ_INIT(head); 3815 for (i = 0; i < nbuf; i++, bf++) { 3816 bf->bf_desc = NULL; 3817 bf->bf_daddr = 0; 3818 bf->bf_lastds = NULL; /* Just an initial value */ 3819 3820 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3821 &bf->bf_dmamap); 3822 if (error != 0) { 3823 if_printf(ifp, "unable to create dmamap for %s " 3824 "buffer %u, error %u\n", dd->dd_name, i, error); 3825 ath_descdma_cleanup(sc, dd, head); 3826 return error; 3827 } 3828 TAILQ_INSERT_TAIL(head, bf, bf_list); 3829 } 3830 return 0; 3831 fail3: 3832 memset(dd, 0, sizeof(*dd)); 3833 return error; 3834 } 3835 3836 void 3837 ath_descdma_cleanup(struct ath_softc *sc, 3838 struct ath_descdma *dd, ath_bufhead *head) 3839 { 3840 struct ath_buf *bf; 3841 struct ieee80211_node *ni; 3842 int do_warning = 0; 3843 3844 if (dd->dd_dmamap != 0) { 3845 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3846 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3847 bus_dma_tag_destroy(dd->dd_dmat); 3848 } 3849 3850 if (head != NULL) { 3851 TAILQ_FOREACH(bf, head, bf_list) { 3852 if (bf->bf_m) { 3853 /* 3854 * XXX warn if there's buffers here. 3855 * XXX it should have been freed by the 3856 * owner! 3857 */ 3858 3859 if (do_warning == 0) { 3860 do_warning = 1; 3861 device_printf(sc->sc_dev, 3862 "%s: %s: mbuf should've been" 3863 " unmapped/freed!\n", 3864 __func__, 3865 dd->dd_name); 3866 } 3867 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3868 BUS_DMASYNC_POSTREAD); 3869 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3870 m_freem(bf->bf_m); 3871 bf->bf_m = NULL; 3872 } 3873 if (bf->bf_dmamap != NULL) { 3874 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3875 bf->bf_dmamap = NULL; 3876 } 3877 ni = bf->bf_node; 3878 bf->bf_node = NULL; 3879 if (ni != NULL) { 3880 /* 3881 * Reclaim node reference. 3882 */ 3883 ieee80211_free_node(ni); 3884 } 3885 } 3886 } 3887 3888 if (head != NULL) 3889 TAILQ_INIT(head); 3890 3891 if (dd->dd_bufptr != NULL) 3892 kfree(dd->dd_bufptr, M_ATHDEV); 3893 memset(dd, 0, sizeof(*dd)); 3894 } 3895 3896 static int 3897 ath_desc_alloc(struct ath_softc *sc) 3898 { 3899 int error; 3900 3901 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3902 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER); 3903 if (error != 0) { 3904 return error; 3905 } 3906 sc->sc_txbuf_cnt = ath_txbuf; 3907 3908 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 3909 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, 3910 ATH_TXDESC); 3911 if (error != 0) { 3912 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3913 return error; 3914 } 3915 3916 /* 3917 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 3918 * flag doesn't have to be set in ath_getbuf_locked(). 3919 */ 3920 3921 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3922 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); 3923 if (error != 0) { 3924 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3925 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3926 &sc->sc_txbuf_mgmt); 3927 return error; 3928 } 3929 return 0; 3930 } 3931 3932 static void 3933 ath_desc_free(struct ath_softc *sc) 3934 { 3935 3936 if (sc->sc_bdma.dd_desc_len != 0) 3937 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3938 if (sc->sc_txdma.dd_desc_len != 0) 3939 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3940 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 3941 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3942 &sc->sc_txbuf_mgmt); 3943 } 3944 3945 static struct ieee80211_node * 3946 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3947 { 3948 struct ieee80211com *ic = vap->iv_ic; 3949 struct ath_softc *sc = ic->ic_ifp->if_softc; 3950 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3951 struct ath_node *an; 3952 3953 an = kmalloc(space, M_80211_NODE, M_INTWAIT|M_ZERO); 3954 if (an == NULL) { 3955 /* XXX stat+msg */ 3956 return NULL; 3957 } 3958 ath_rate_node_init(sc, an); 3959 3960 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3961 ksnprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3962 device_get_nameunit(sc->sc_dev), an); 3963 #if 0 3964 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3965 #endif 3966 3967 /* XXX setup ath_tid */ 3968 ath_tx_tid_init(sc, an); 3969 3970 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__, 3971 ath_hal_ether_sprintf(mac), an); 3972 return &an->an_node; 3973 } 3974 3975 static void 3976 ath_node_cleanup(struct ieee80211_node *ni) 3977 { 3978 struct ieee80211com *ic = ni->ni_ic; 3979 struct ath_softc *sc = ic->ic_ifp->if_softc; 3980 3981 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__, 3982 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni)); 3983 3984 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3985 ath_tx_node_flush(sc, ATH_NODE(ni)); 3986 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3987 sc->sc_node_cleanup(ni); 3988 } 3989 3990 static void 3991 ath_node_free(struct ieee80211_node *ni) 3992 { 3993 struct ieee80211com *ic = ni->ni_ic; 3994 struct ath_softc *sc = ic->ic_ifp->if_softc; 3995 3996 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__, 3997 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni)); 3998 #if 0 3999 mtx_destroy(&ATH_NODE(ni)->an_mtx); 4000 #endif 4001 sc->sc_node_free(ni); 4002 } 4003 4004 static void 4005 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 4006 { 4007 struct ieee80211com *ic = ni->ni_ic; 4008 struct ath_softc *sc = ic->ic_ifp->if_softc; 4009 struct ath_hal *ah = sc->sc_ah; 4010 4011 *rssi = ic->ic_node_getrssi(ni); 4012 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 4013 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 4014 else 4015 *noise = -95; /* nominally correct */ 4016 } 4017 4018 /* 4019 * Set the default antenna. 4020 */ 4021 void 4022 ath_setdefantenna(struct ath_softc *sc, u_int antenna) 4023 { 4024 struct ath_hal *ah = sc->sc_ah; 4025 4026 /* XXX block beacon interrupts */ 4027 ath_hal_setdefantenna(ah, antenna); 4028 if (sc->sc_defant != antenna) 4029 sc->sc_stats.ast_ant_defswitch++; 4030 sc->sc_defant = antenna; 4031 sc->sc_rxotherant = 0; 4032 } 4033 4034 static void 4035 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4036 { 4037 txq->axq_qnum = qnum; 4038 txq->axq_ac = 0; 4039 txq->axq_depth = 0; 4040 txq->axq_aggr_depth = 0; 4041 txq->axq_intrcnt = 0; 4042 txq->axq_link = NULL; 4043 txq->axq_softc = sc; 4044 TAILQ_INIT(&txq->axq_q); 4045 TAILQ_INIT(&txq->axq_tidq); 4046 TAILQ_INIT(&txq->fifo.axq_q); 4047 ATH_TXQ_LOCK_INIT(sc, txq); 4048 } 4049 4050 /* 4051 * Setup a h/w transmit queue. 4052 */ 4053 static struct ath_txq * 4054 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4055 { 4056 #define N(a) (sizeof(a)/sizeof(a[0])) 4057 struct ath_hal *ah = sc->sc_ah; 4058 HAL_TXQ_INFO qi; 4059 int qnum; 4060 4061 memset(&qi, 0, sizeof(qi)); 4062 qi.tqi_subtype = subtype; 4063 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4064 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4065 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4066 /* 4067 * Enable interrupts only for EOL and DESC conditions. 4068 * We mark tx descriptors to receive a DESC interrupt 4069 * when a tx queue gets deep; otherwise waiting for the 4070 * EOL to reap descriptors. Note that this is done to 4071 * reduce interrupt load and this only defers reaping 4072 * descriptors, never transmitting frames. Aside from 4073 * reducing interrupts this also permits more concurrency. 4074 * The only potential downside is if the tx queue backs 4075 * up in which case the top half of the kernel may backup 4076 * due to a lack of tx descriptors. 4077 */ 4078 if (sc->sc_isedma) 4079 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 4080 HAL_TXQ_TXOKINT_ENABLE; 4081 else 4082 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 4083 HAL_TXQ_TXDESCINT_ENABLE; 4084 4085 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4086 if (qnum == -1) { 4087 /* 4088 * NB: don't print a message, this happens 4089 * normally on parts with too few tx queues 4090 */ 4091 return NULL; 4092 } 4093 if (qnum >= N(sc->sc_txq)) { 4094 device_printf(sc->sc_dev, 4095 "hal qnum %u out of range, max %zu!\n", 4096 qnum, N(sc->sc_txq)); 4097 ath_hal_releasetxqueue(ah, qnum); 4098 return NULL; 4099 } 4100 if (!ATH_TXQ_SETUP(sc, qnum)) { 4101 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4102 sc->sc_txqsetup |= 1<<qnum; 4103 } 4104 return &sc->sc_txq[qnum]; 4105 #undef N 4106 } 4107 4108 /* 4109 * Setup a hardware data transmit queue for the specified 4110 * access control. The hal may not support all requested 4111 * queues in which case it will return a reference to a 4112 * previously setup queue. We record the mapping from ac's 4113 * to h/w queues for use by ath_tx_start and also track 4114 * the set of h/w queues being used to optimize work in the 4115 * transmit interrupt handler and related routines. 4116 */ 4117 static int 4118 ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4119 { 4120 #define N(a) (sizeof(a)/sizeof(a[0])) 4121 struct ath_txq *txq; 4122 4123 if (ac >= N(sc->sc_ac2q)) { 4124 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4125 ac, N(sc->sc_ac2q)); 4126 return 0; 4127 } 4128 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4129 if (txq != NULL) { 4130 txq->axq_ac = ac; 4131 sc->sc_ac2q[ac] = txq; 4132 return 1; 4133 } else 4134 return 0; 4135 #undef N 4136 } 4137 4138 /* 4139 * Update WME parameters for a transmit queue. 4140 */ 4141 static int 4142 ath_txq_update(struct ath_softc *sc, int ac) 4143 { 4144 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4145 #define ATH_TXOP_TO_US(v) (v<<5) 4146 struct ifnet *ifp = sc->sc_ifp; 4147 struct ieee80211com *ic = ifp->if_l2com; 4148 struct ath_txq *txq = sc->sc_ac2q[ac]; 4149 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4150 struct ath_hal *ah = sc->sc_ah; 4151 HAL_TXQ_INFO qi; 4152 4153 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4154 #ifdef IEEE80211_SUPPORT_TDMA 4155 if (sc->sc_tdma) { 4156 /* 4157 * AIFS is zero so there's no pre-transmit wait. The 4158 * burst time defines the slot duration and is configured 4159 * through net80211. The QCU is setup to not do post-xmit 4160 * back off, lockout all lower-priority QCU's, and fire 4161 * off the DMA beacon alert timer which is setup based 4162 * on the slot configuration. 4163 */ 4164 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4165 | HAL_TXQ_TXERRINT_ENABLE 4166 | HAL_TXQ_TXURNINT_ENABLE 4167 | HAL_TXQ_TXEOLINT_ENABLE 4168 | HAL_TXQ_DBA_GATED 4169 | HAL_TXQ_BACKOFF_DISABLE 4170 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 4171 ; 4172 qi.tqi_aifs = 0; 4173 /* XXX +dbaprep? */ 4174 qi.tqi_readyTime = sc->sc_tdmaslotlen; 4175 qi.tqi_burstTime = qi.tqi_readyTime; 4176 } else { 4177 #endif 4178 /* 4179 * XXX shouldn't this just use the default flags 4180 * used in the previous queue setup? 4181 */ 4182 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4183 | HAL_TXQ_TXERRINT_ENABLE 4184 | HAL_TXQ_TXDESCINT_ENABLE 4185 | HAL_TXQ_TXURNINT_ENABLE 4186 | HAL_TXQ_TXEOLINT_ENABLE 4187 ; 4188 qi.tqi_aifs = wmep->wmep_aifsn; 4189 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4190 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 4191 qi.tqi_readyTime = 0; 4192 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4193 #ifdef IEEE80211_SUPPORT_TDMA 4194 } 4195 #endif 4196 4197 DPRINTF(sc, ATH_DEBUG_RESET, 4198 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 4199 __func__, txq->axq_qnum, qi.tqi_qflags, 4200 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4201 4202 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4203 if_printf(ifp, "unable to update hardware queue " 4204 "parameters for %s traffic!\n", 4205 ieee80211_wme_acnames[ac]); 4206 return 0; 4207 } else { 4208 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4209 return 1; 4210 } 4211 #undef ATH_TXOP_TO_US 4212 #undef ATH_EXPONENT_TO_VALUE 4213 } 4214 4215 /* 4216 * Callback from the 802.11 layer to update WME parameters. 4217 */ 4218 int 4219 ath_wme_update(struct ieee80211com *ic) 4220 { 4221 struct ath_softc *sc = ic->ic_ifp->if_softc; 4222 4223 return !ath_txq_update(sc, WME_AC_BE) || 4224 !ath_txq_update(sc, WME_AC_BK) || 4225 !ath_txq_update(sc, WME_AC_VI) || 4226 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4227 } 4228 4229 /* 4230 * Reclaim resources for a setup queue. 4231 */ 4232 static void 4233 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4234 { 4235 4236 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4237 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4238 ATH_TXQ_LOCK_DESTROY(txq); 4239 } 4240 4241 /* 4242 * Reclaim all tx queue resources. 4243 */ 4244 static void 4245 ath_tx_cleanup(struct ath_softc *sc) 4246 { 4247 int i; 4248 4249 ATH_TXBUF_LOCK_DESTROY(sc); 4250 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4251 if (ATH_TXQ_SETUP(sc, i)) 4252 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4253 } 4254 4255 /* 4256 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4257 * using the current rates in sc_rixmap. 4258 */ 4259 int 4260 ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4261 { 4262 int rix = sc->sc_rixmap[rate]; 4263 /* NB: return lowest rix for invalid rate */ 4264 return (rix == 0xff ? 0 : rix); 4265 } 4266 4267 static void 4268 ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 4269 struct ath_buf *bf) 4270 { 4271 struct ieee80211_node *ni = bf->bf_node; 4272 struct ifnet *ifp = sc->sc_ifp; 4273 struct ieee80211com *ic = ifp->if_l2com; 4274 int sr, lr, pri; 4275 4276 if (ts->ts_status == 0) { 4277 u_int8_t txant = ts->ts_antenna; 4278 sc->sc_stats.ast_ant_tx[txant]++; 4279 sc->sc_ant_tx[txant]++; 4280 if (ts->ts_finaltsi != 0) 4281 sc->sc_stats.ast_tx_altrate++; 4282 pri = M_WME_GETAC(bf->bf_m); 4283 if (pri >= WME_AC_VO) 4284 ic->ic_wme.wme_hipri_traffic++; 4285 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 4286 ni->ni_inact = ni->ni_inact_reload; 4287 } else { 4288 if (ts->ts_status & HAL_TXERR_XRETRY) 4289 sc->sc_stats.ast_tx_xretries++; 4290 if (ts->ts_status & HAL_TXERR_FIFO) 4291 sc->sc_stats.ast_tx_fifoerr++; 4292 if (ts->ts_status & HAL_TXERR_FILT) 4293 sc->sc_stats.ast_tx_filtered++; 4294 if (ts->ts_status & HAL_TXERR_XTXOP) 4295 sc->sc_stats.ast_tx_xtxop++; 4296 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4297 sc->sc_stats.ast_tx_timerexpired++; 4298 4299 if (bf->bf_m->m_flags & M_FF) 4300 sc->sc_stats.ast_ff_txerr++; 4301 } 4302 /* XXX when is this valid? */ 4303 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR) 4304 sc->sc_stats.ast_tx_desccfgerr++; 4305 /* 4306 * This can be valid for successful frame transmission! 4307 * If there's a TX FIFO underrun during aggregate transmission, 4308 * the MAC will pad the rest of the aggregate with delimiters. 4309 * If a BA is returned, the frame is marked as "OK" and it's up 4310 * to the TX completion code to notice which frames weren't 4311 * successfully transmitted. 4312 */ 4313 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN) 4314 sc->sc_stats.ast_tx_data_underrun++; 4315 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN) 4316 sc->sc_stats.ast_tx_delim_underrun++; 4317 4318 sr = ts->ts_shortretry; 4319 lr = ts->ts_longretry; 4320 sc->sc_stats.ast_tx_shortretry += sr; 4321 sc->sc_stats.ast_tx_longretry += lr; 4322 4323 } 4324 4325 /* 4326 * The default completion. If fail is 1, this means 4327 * "please don't retry the frame, and just return -1 status 4328 * to the net80211 stack. 4329 */ 4330 void 4331 ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4332 { 4333 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4334 int st; 4335 4336 if (fail == 1) 4337 st = -1; 4338 else 4339 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 4340 ts->ts_status : HAL_TXERR_XRETRY; 4341 4342 #if 0 4343 if (bf->bf_state.bfs_dobaw) 4344 device_printf(sc->sc_dev, 4345 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 4346 __func__, 4347 bf, 4348 SEQNO(bf->bf_state.bfs_seqno)); 4349 #endif 4350 if (bf->bf_next != NULL) 4351 device_printf(sc->sc_dev, 4352 "%s: bf %p: seqno %d: bf_next not NULL!\n", 4353 __func__, 4354 bf, 4355 SEQNO(bf->bf_state.bfs_seqno)); 4356 4357 /* 4358 * Check if the node software queue is empty; if so 4359 * then clear the TIM. 4360 * 4361 * This needs to be done before the buffer is freed as 4362 * otherwise the node reference will have been released 4363 * and the node may not actually exist any longer. 4364 * 4365 * XXX I don't like this belonging here, but it's cleaner 4366 * to do it here right now then all the other places 4367 * where ath_tx_default_comp() is called. 4368 * 4369 * XXX TODO: during drain, ensure that the callback is 4370 * being called so we get a chance to update the TIM. 4371 */ 4372 if (bf->bf_node) { 4373 ATH_TX_LOCK(sc); 4374 ath_tx_update_tim(sc, bf->bf_node, 0); 4375 ATH_TX_UNLOCK(sc); 4376 } 4377 4378 /* 4379 * Do any tx complete callback. Note this must 4380 * be done before releasing the node reference. 4381 * This will free the mbuf, release the net80211 4382 * node and recycle the ath_buf. 4383 */ 4384 ath_tx_freebuf(sc, bf, st); 4385 } 4386 4387 /* 4388 * Update rate control with the given completion status. 4389 */ 4390 void 4391 ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4392 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4393 int nframes, int nbad) 4394 { 4395 struct ath_node *an; 4396 4397 /* Only for unicast frames */ 4398 if (ni == NULL) 4399 return; 4400 4401 an = ATH_NODE(ni); 4402 ATH_NODE_UNLOCK_ASSERT(an); 4403 4404 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4405 ATH_NODE_LOCK(an); 4406 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4407 ATH_NODE_UNLOCK(an); 4408 } 4409 } 4410 4411 /* 4412 * Process the completion of the given buffer. 4413 * 4414 * This calls the rate control update and then the buffer completion. 4415 * This will either free the buffer or requeue it. In any case, the 4416 * bf pointer should be treated as invalid after this function is called. 4417 */ 4418 void 4419 ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, 4420 struct ath_tx_status *ts, struct ath_buf *bf) 4421 { 4422 struct ieee80211_node *ni = bf->bf_node; 4423 struct ath_node *an = NULL; 4424 4425 ATH_TX_UNLOCK_ASSERT(sc); 4426 ATH_TXQ_UNLOCK_ASSERT(txq); 4427 4428 /* If unicast frame, update general statistics */ 4429 if (ni != NULL) { 4430 an = ATH_NODE(ni); 4431 /* update statistics */ 4432 ath_tx_update_stats(sc, ts, bf); 4433 } 4434 4435 /* 4436 * Call the completion handler. 4437 * The completion handler is responsible for 4438 * calling the rate control code. 4439 * 4440 * Frames with no completion handler get the 4441 * rate control code called here. 4442 */ 4443 if (bf->bf_comp == NULL) { 4444 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4445 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 4446 /* 4447 * XXX assume this isn't an aggregate 4448 * frame. 4449 */ 4450 ath_tx_update_ratectrl(sc, ni, 4451 bf->bf_state.bfs_rc, ts, 4452 bf->bf_state.bfs_pktlen, 1, 4453 (ts->ts_status == 0 ? 0 : 1)); 4454 } 4455 ath_tx_default_comp(sc, bf, 0); 4456 } else 4457 bf->bf_comp(sc, bf, 0); 4458 } 4459 4460 4461 4462 /* 4463 * Process completed xmit descriptors from the specified queue. 4464 * Kick the packet scheduler if needed. This can occur from this 4465 * particular task. 4466 */ 4467 static int 4468 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4469 { 4470 struct ath_hal *ah = sc->sc_ah; 4471 struct ath_buf *bf; 4472 struct ath_desc *ds; 4473 struct ath_tx_status *ts; 4474 struct ieee80211_node *ni; 4475 #ifdef IEEE80211_SUPPORT_SUPERG 4476 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 4477 #endif /* IEEE80211_SUPPORT_SUPERG */ 4478 int nacked; 4479 HAL_STATUS status; 4480 4481 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4482 __func__, txq->axq_qnum, 4483 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4484 txq->axq_link); 4485 4486 ATH_KTR(sc, ATH_KTR_TXCOMP, 4, 4487 "ath_tx_processq: txq=%u head %p link %p depth %p", 4488 txq->axq_qnum, 4489 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4490 txq->axq_link, 4491 txq->axq_depth); 4492 4493 nacked = 0; 4494 for (;;) { 4495 ATH_TXQ_LOCK(txq); 4496 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4497 bf = TAILQ_FIRST(&txq->axq_q); 4498 if (bf == NULL) { 4499 ATH_TXQ_UNLOCK(txq); 4500 break; 4501 } 4502 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 4503 ts = &bf->bf_status.ds_txstat; 4504 4505 status = ath_hal_txprocdesc(ah, ds, ts); 4506 #ifdef ATH_DEBUG 4507 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4508 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4509 status == HAL_OK); 4510 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) 4511 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4512 status == HAL_OK); 4513 #endif 4514 #ifdef ATH_DEBUG_ALQ 4515 if (if_ath_alq_checkdebug(&sc->sc_alq, 4516 ATH_ALQ_EDMA_TXSTATUS)) { 4517 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 4518 sc->sc_tx_statuslen, 4519 (char *) ds); 4520 } 4521 #endif 4522 4523 if (status == HAL_EINPROGRESS) { 4524 ATH_KTR(sc, ATH_KTR_TXCOMP, 3, 4525 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", 4526 txq->axq_qnum, bf, ds); 4527 ATH_TXQ_UNLOCK(txq); 4528 break; 4529 } 4530 ATH_TXQ_REMOVE(txq, bf, bf_list); 4531 4532 /* 4533 * Sanity check. 4534 */ 4535 if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) { 4536 device_printf(sc->sc_dev, 4537 "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n", 4538 __func__, 4539 txq->axq_qnum, 4540 bf, 4541 bf->bf_state.bfs_tx_queue); 4542 } 4543 if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) { 4544 device_printf(sc->sc_dev, 4545 "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n", 4546 __func__, 4547 txq->axq_qnum, 4548 bf->bf_last, 4549 bf->bf_last->bf_state.bfs_tx_queue); 4550 } 4551 4552 #if 0 4553 if (txq->axq_depth > 0) { 4554 /* 4555 * More frames follow. Mark the buffer busy 4556 * so it's not re-used while the hardware may 4557 * still re-read the link field in the descriptor. 4558 * 4559 * Use the last buffer in an aggregate as that 4560 * is where the hardware may be - intermediate 4561 * descriptors won't be "busy". 4562 */ 4563 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4564 } else 4565 txq->axq_link = NULL; 4566 #else 4567 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4568 #endif 4569 if (bf->bf_state.bfs_aggr) 4570 txq->axq_aggr_depth--; 4571 4572 ni = bf->bf_node; 4573 4574 ATH_KTR(sc, ATH_KTR_TXCOMP, 5, 4575 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", 4576 txq->axq_qnum, bf, ds, ni, ts->ts_status); 4577 /* 4578 * If unicast frame was ack'd update RSSI, 4579 * including the last rx time used to 4580 * workaround phantom bmiss interrupts. 4581 */ 4582 if (ni != NULL && ts->ts_status == 0 && 4583 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 4584 nacked++; 4585 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4586 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4587 ts->ts_rssi); 4588 } 4589 ATH_TXQ_UNLOCK(txq); 4590 4591 /* 4592 * Update statistics and call completion 4593 */ 4594 ath_tx_process_buf_completion(sc, txq, ts, bf); 4595 4596 /* XXX at this point, bf and ni may be totally invalid */ 4597 } 4598 #ifdef IEEE80211_SUPPORT_SUPERG 4599 /* 4600 * Flush fast-frame staging queue when traffic slows. 4601 */ 4602 if (txq->axq_depth <= 1) 4603 ieee80211_ff_flush(ic, txq->axq_ac); 4604 #endif 4605 4606 /* Kick the software TXQ scheduler */ 4607 if (dosched) { 4608 ATH_TX_LOCK(sc); 4609 ath_txq_sched(sc, txq); 4610 ATH_TX_UNLOCK(sc); 4611 } 4612 4613 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4614 "ath_tx_processq: txq=%u: done", 4615 txq->axq_qnum); 4616 4617 return nacked; 4618 } 4619 4620 #define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4621 4622 /* 4623 * Deferred processing of transmit interrupt; special-cased 4624 * for a single hardware transmit queue (e.g. 5210 and 5211). 4625 */ 4626 static void 4627 ath_tx_proc_q0(void *arg, int npending) 4628 { 4629 struct ath_softc *sc = arg; 4630 #if 0 4631 struct ifnet *ifp = sc->sc_ifp; 4632 #endif 4633 uint32_t txqs; 4634 4635 wlan_serialize_enter(); 4636 ATH_PCU_LOCK(sc); 4637 sc->sc_txproc_cnt++; 4638 txqs = sc->sc_txq_active; 4639 sc->sc_txq_active &= ~txqs; 4640 ATH_PCU_UNLOCK(sc); 4641 4642 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4643 4644 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4645 "ath_tx_proc_q0: txqs=0x%08x", txqs); 4646 4647 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 4648 /* XXX why is lastrx updated in tx code? */ 4649 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4650 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4651 ath_tx_processq(sc, sc->sc_cabq, 1); 4652 #if 0 4653 /* remove, DragonFly uses OACTIVE to control if_start calls */ 4654 IF_LOCK(&ifp->if_snd); 4655 ifq_clr_oactive(&ifp->if_snd); 4656 IF_UNLOCK(&ifp->if_snd); 4657 #endif 4658 sc->sc_wd_timer = 0; 4659 4660 if (sc->sc_softled) 4661 ath_led_event(sc, sc->sc_txrix); 4662 4663 ATH_PCU_LOCK(sc); 4664 sc->sc_txproc_cnt--; 4665 ATH_PCU_UNLOCK(sc); 4666 4667 ath_power_restore_power_state(sc); 4668 4669 ath_tx_kick(sc); 4670 wlan_serialize_exit(); 4671 } 4672 4673 /* 4674 * Deferred processing of transmit interrupt; special-cased 4675 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4676 */ 4677 static void 4678 ath_tx_proc_q0123(void *arg, int npending) 4679 { 4680 struct ath_softc *sc = arg; 4681 #if 0 4682 struct ifnet *ifp = sc->sc_ifp; 4683 #endif 4684 int nacked; 4685 uint32_t txqs; 4686 4687 wlan_serialize_enter(); 4688 ATH_PCU_LOCK(sc); 4689 sc->sc_txproc_cnt++; 4690 txqs = sc->sc_txq_active; 4691 sc->sc_txq_active &= ~txqs; 4692 ATH_PCU_UNLOCK(sc); 4693 4694 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4695 4696 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4697 "ath_tx_proc_q0123: txqs=0x%08x", txqs); 4698 4699 /* 4700 * Process each active queue. 4701 */ 4702 nacked = 0; 4703 if (TXQACTIVE(txqs, 0)) 4704 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 4705 if (TXQACTIVE(txqs, 1)) 4706 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 4707 if (TXQACTIVE(txqs, 2)) 4708 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 4709 if (TXQACTIVE(txqs, 3)) 4710 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 4711 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4712 ath_tx_processq(sc, sc->sc_cabq, 1); 4713 if (nacked) 4714 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4715 4716 #if 0 4717 /* remove, DragonFly uses OACTIVE to control if_start calls */ 4718 IF_LOCK(&ifp->if_snd); 4719 ifq_clr_oactive(&ifp->if_snd); 4720 IF_UNLOCK(&ifp->if_snd); 4721 #endif 4722 sc->sc_wd_timer = 0; 4723 4724 if (sc->sc_softled) 4725 ath_led_event(sc, sc->sc_txrix); 4726 4727 ATH_PCU_LOCK(sc); 4728 sc->sc_txproc_cnt--; 4729 ATH_PCU_UNLOCK(sc); 4730 4731 ath_power_restore_power_state(sc); 4732 4733 ath_tx_kick(sc); 4734 wlan_serialize_exit(); 4735 } 4736 4737 /* 4738 * Deferred processing of transmit interrupt. 4739 */ 4740 static void 4741 ath_tx_proc(void *arg, int npending) 4742 { 4743 struct ath_softc *sc = arg; 4744 #if 0 4745 struct ifnet *ifp = sc->sc_ifp; 4746 #endif 4747 int i, nacked; 4748 uint32_t txqs; 4749 4750 wlan_serialize_enter(); 4751 ATH_PCU_LOCK(sc); 4752 sc->sc_txproc_cnt++; 4753 txqs = sc->sc_txq_active; 4754 sc->sc_txq_active &= ~txqs; 4755 ATH_PCU_UNLOCK(sc); 4756 4757 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4758 4759 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); 4760 4761 /* 4762 * Process each active queue. 4763 */ 4764 nacked = 0; 4765 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4766 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 4767 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 4768 if (nacked) 4769 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4770 4771 #if 0 4772 /* remove, DragonFly uses OACTIVE to control if_start calls */ 4773 /* XXX check this inside of IF_LOCK? */ 4774 IF_LOCK(&ifp->if_snd); 4775 ifq_clr_oactive(&ifp->if_snd); 4776 IF_UNLOCK(&ifp->if_snd); 4777 #endif 4778 sc->sc_wd_timer = 0; 4779 4780 if (sc->sc_softled) 4781 ath_led_event(sc, sc->sc_txrix); 4782 4783 ATH_PCU_LOCK(sc); 4784 sc->sc_txproc_cnt--; 4785 ATH_PCU_UNLOCK(sc); 4786 4787 ath_power_restore_power_state(sc); 4788 4789 ath_tx_kick(sc); 4790 wlan_serialize_exit(); 4791 } 4792 #undef TXQACTIVE 4793 4794 /* 4795 * Deferred processing of TXQ rescheduling. 4796 */ 4797 static void 4798 ath_txq_sched_tasklet(void *arg, int npending) 4799 { 4800 struct ath_softc *sc = arg; 4801 int i; 4802 4803 wlan_serialize_enter(); 4804 4805 /* XXX is skipping ok? */ 4806 ATH_PCU_LOCK(sc); 4807 #if 0 4808 if (sc->sc_inreset_cnt > 0) { 4809 device_printf(sc->sc_dev, 4810 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4811 ATH_PCU_UNLOCK(sc); 4812 wlan_serialize_exit(); 4813 return; 4814 } 4815 #endif 4816 sc->sc_txproc_cnt++; 4817 ATH_PCU_UNLOCK(sc); 4818 4819 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4820 4821 ATH_TX_LOCK(sc); 4822 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4823 if (ATH_TXQ_SETUP(sc, i)) { 4824 ath_txq_sched(sc, &sc->sc_txq[i]); 4825 } 4826 } 4827 ATH_TX_UNLOCK(sc); 4828 4829 ath_power_restore_power_state(sc); 4830 4831 ATH_PCU_LOCK(sc); 4832 sc->sc_txproc_cnt--; 4833 ATH_PCU_UNLOCK(sc); 4834 wlan_serialize_exit(); 4835 } 4836 4837 void 4838 ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 4839 { 4840 4841 ATH_TXBUF_LOCK_ASSERT(sc); 4842 4843 if (bf->bf_flags & ATH_BUF_MGMT) 4844 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 4845 else { 4846 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4847 sc->sc_txbuf_cnt++; 4848 if (sc->sc_txbuf_cnt > ath_txbuf) { 4849 device_printf(sc->sc_dev, 4850 "%s: sc_txbuf_cnt > %d?\n", 4851 __func__, 4852 ath_txbuf); 4853 sc->sc_txbuf_cnt = ath_txbuf; 4854 } 4855 } 4856 } 4857 4858 void 4859 ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 4860 { 4861 4862 ATH_TXBUF_LOCK_ASSERT(sc); 4863 4864 if (bf->bf_flags & ATH_BUF_MGMT) 4865 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 4866 else { 4867 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 4868 sc->sc_txbuf_cnt++; 4869 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 4870 device_printf(sc->sc_dev, 4871 "%s: sc_txbuf_cnt > %d?\n", 4872 __func__, 4873 ATH_TXBUF); 4874 sc->sc_txbuf_cnt = ATH_TXBUF; 4875 } 4876 } 4877 } 4878 4879 /* 4880 * Free the holding buffer if it exists 4881 */ 4882 void 4883 ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq) 4884 { 4885 ATH_TXBUF_UNLOCK_ASSERT(sc); 4886 ATH_TXQ_LOCK_ASSERT(txq); 4887 4888 if (txq->axq_holdingbf == NULL) 4889 return; 4890 4891 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY; 4892 4893 ATH_TXBUF_LOCK(sc); 4894 ath_returnbuf_tail(sc, txq->axq_holdingbf); 4895 ATH_TXBUF_UNLOCK(sc); 4896 4897 txq->axq_holdingbf = NULL; 4898 } 4899 4900 /* 4901 * Add this buffer to the holding queue, freeing the previous 4902 * one if it exists. 4903 */ 4904 static void 4905 ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf) 4906 { 4907 struct ath_txq *txq; 4908 4909 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4910 4911 ATH_TXBUF_UNLOCK_ASSERT(sc); 4912 ATH_TXQ_LOCK_ASSERT(txq); 4913 4914 /* XXX assert ATH_BUF_BUSY is set */ 4915 4916 /* XXX assert the tx queue is under the max number */ 4917 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) { 4918 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n", 4919 __func__, 4920 bf, 4921 bf->bf_state.bfs_tx_queue); 4922 bf->bf_flags &= ~ATH_BUF_BUSY; 4923 ath_returnbuf_tail(sc, bf); 4924 return; 4925 } 4926 ath_txq_freeholdingbuf(sc, txq); 4927 txq->axq_holdingbf = bf; 4928 } 4929 4930 /* 4931 * Return a buffer to the pool and update the 'busy' flag on the 4932 * previous 'tail' entry. 4933 * 4934 * This _must_ only be called when the buffer is involved in a completed 4935 * TX. The logic is that if it was part of an active TX, the previous 4936 * buffer on the list is now not involved in a halted TX DMA queue, waiting 4937 * for restart (eg for TDMA.) 4938 * 4939 * The caller must free the mbuf and recycle the node reference. 4940 * 4941 * XXX This method of handling busy / holding buffers is insanely stupid. 4942 * It requires bf_state.bfs_tx_queue to be correctly assigned. It would 4943 * be much nicer if buffers in the processq() methods would instead be 4944 * always completed there (pushed onto a txq or ath_bufhead) so we knew 4945 * exactly what hardware queue they came from in the first place. 4946 */ 4947 void 4948 ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 4949 { 4950 struct ath_txq *txq; 4951 4952 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4953 4954 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 4955 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 4956 4957 /* 4958 * If this buffer is busy, push it onto the holding queue. 4959 */ 4960 if (bf->bf_flags & ATH_BUF_BUSY) { 4961 ATH_TXQ_LOCK(txq); 4962 ath_txq_addholdingbuf(sc, bf); 4963 ATH_TXQ_UNLOCK(txq); 4964 return; 4965 } 4966 4967 /* 4968 * Not a busy buffer, so free normally 4969 */ 4970 ATH_TXBUF_LOCK(sc); 4971 ath_returnbuf_tail(sc, bf); 4972 ATH_TXBUF_UNLOCK(sc); 4973 } 4974 4975 /* 4976 * This is currently used by ath_tx_draintxq() and 4977 * ath_tx_tid_free_pkts(). 4978 * 4979 * It recycles a single ath_buf. 4980 */ 4981 void 4982 ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 4983 { 4984 struct ieee80211_node *ni = bf->bf_node; 4985 struct mbuf *m0 = bf->bf_m; 4986 4987 /* 4988 * Make sure that we only sync/unload if there's an mbuf. 4989 * If not (eg we cloned a buffer), the unload will have already 4990 * occured. 4991 */ 4992 if (bf->bf_m != NULL) { 4993 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4994 BUS_DMASYNC_POSTWRITE); 4995 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4996 } 4997 4998 bf->bf_node = NULL; 4999 bf->bf_m = NULL; 5000 5001 /* Free the buffer, it's not needed any longer */ 5002 ath_freebuf(sc, bf); 5003 5004 /* Pass the buffer back to net80211 - completing it */ 5005 ieee80211_tx_complete(ni, m0, status); 5006 } 5007 5008 static struct ath_buf * 5009 ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq) 5010 { 5011 struct ath_buf *bf; 5012 5013 ATH_TXQ_LOCK_ASSERT(txq); 5014 5015 /* 5016 * Drain the FIFO queue first, then if it's 5017 * empty, move to the normal frame queue. 5018 */ 5019 bf = TAILQ_FIRST(&txq->fifo.axq_q); 5020 if (bf != NULL) { 5021 /* 5022 * Is it the last buffer in this set? 5023 * Decrement the FIFO counter. 5024 */ 5025 if (bf->bf_flags & ATH_BUF_FIFOEND) { 5026 if (txq->axq_fifo_depth == 0) { 5027 device_printf(sc->sc_dev, 5028 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n", 5029 __func__, 5030 txq->axq_qnum, 5031 txq->fifo.axq_depth); 5032 } else 5033 txq->axq_fifo_depth--; 5034 } 5035 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 5036 return (bf); 5037 } 5038 5039 /* 5040 * Debugging! 5041 */ 5042 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) { 5043 device_printf(sc->sc_dev, 5044 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n", 5045 __func__, 5046 txq->axq_qnum, 5047 txq->axq_fifo_depth, 5048 txq->fifo.axq_depth); 5049 } 5050 5051 /* 5052 * Now drain the pending queue. 5053 */ 5054 bf = TAILQ_FIRST(&txq->axq_q); 5055 if (bf == NULL) { 5056 txq->axq_link = NULL; 5057 return (NULL); 5058 } 5059 ATH_TXQ_REMOVE(txq, bf, bf_list); 5060 return (bf); 5061 } 5062 5063 void 5064 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 5065 { 5066 #ifdef ATH_DEBUG 5067 struct ath_hal *ah = sc->sc_ah; 5068 #endif 5069 struct ath_buf *bf; 5070 u_int ix; 5071 5072 /* 5073 * NB: this assumes output has been stopped and 5074 * we do not need to block ath_tx_proc 5075 */ 5076 for (ix = 0;; ix++) { 5077 ATH_TXQ_LOCK(txq); 5078 bf = ath_tx_draintxq_get_one(sc, txq); 5079 if (bf == NULL) { 5080 ATH_TXQ_UNLOCK(txq); 5081 break; 5082 } 5083 if (bf->bf_state.bfs_aggr) 5084 txq->axq_aggr_depth--; 5085 #ifdef ATH_DEBUG 5086 if (sc->sc_debug & ATH_DEBUG_RESET) { 5087 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5088 int status = 0; 5089 5090 /* 5091 * EDMA operation has a TX completion FIFO 5092 * separate from the TX descriptor, so this 5093 * method of checking the "completion" status 5094 * is wrong. 5095 */ 5096 if (! sc->sc_isedma) { 5097 status = (ath_hal_txprocdesc(ah, 5098 bf->bf_lastds, 5099 &bf->bf_status.ds_txstat) == HAL_OK); 5100 } 5101 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); 5102 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 5103 bf->bf_m->m_len, 0, -1); 5104 } 5105 #endif /* ATH_DEBUG */ 5106 /* 5107 * Since we're now doing magic in the completion 5108 * functions, we -must- call it for aggregation 5109 * destinations or BAW tracking will get upset. 5110 */ 5111 /* 5112 * Clear ATH_BUF_BUSY; the completion handler 5113 * will free the buffer. 5114 */ 5115 ATH_TXQ_UNLOCK(txq); 5116 bf->bf_flags &= ~ATH_BUF_BUSY; 5117 if (bf->bf_comp) 5118 bf->bf_comp(sc, bf, 1); 5119 else 5120 ath_tx_default_comp(sc, bf, 1); 5121 } 5122 5123 /* 5124 * Free the holding buffer if it exists 5125 */ 5126 ATH_TXQ_LOCK(txq); 5127 ath_txq_freeholdingbuf(sc, txq); 5128 ATH_TXQ_UNLOCK(txq); 5129 5130 /* 5131 * Drain software queued frames which are on 5132 * active TIDs. 5133 */ 5134 ath_tx_txq_drain(sc, txq); 5135 } 5136 5137 static void 5138 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5139 { 5140 struct ath_hal *ah = sc->sc_ah; 5141 5142 ATH_TXQ_LOCK_ASSERT(txq); 5143 5144 DPRINTF(sc, ATH_DEBUG_RESET, 5145 "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, " 5146 "link %p, holdingbf=%p\n", 5147 __func__, 5148 txq->axq_qnum, 5149 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 5150 (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)), 5151 (int) ath_hal_numtxpending(ah, txq->axq_qnum), 5152 txq->axq_flags, 5153 txq->axq_link, 5154 txq->axq_holdingbf); 5155 5156 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5157 /* We've stopped TX DMA, so mark this as stopped. */ 5158 txq->axq_flags &= ~ATH_TXQ_PUTRUNNING; 5159 5160 #ifdef ATH_DEBUG 5161 if ((sc->sc_debug & ATH_DEBUG_RESET) 5162 && (txq->axq_holdingbf != NULL)) { 5163 ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0); 5164 } 5165 #endif 5166 } 5167 5168 int 5169 ath_stoptxdma(struct ath_softc *sc) 5170 { 5171 struct ath_hal *ah = sc->sc_ah; 5172 int i; 5173 5174 /* XXX return value */ 5175 if (sc->sc_invalid) 5176 return 0; 5177 5178 if (!sc->sc_invalid) { 5179 /* don't touch the hardware if marked invalid */ 5180 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5181 __func__, sc->sc_bhalq, 5182 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 5183 NULL); 5184 5185 /* stop the beacon queue */ 5186 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5187 5188 /* Stop the data queues */ 5189 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5190 if (ATH_TXQ_SETUP(sc, i)) { 5191 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5192 ath_tx_stopdma(sc, &sc->sc_txq[i]); 5193 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5194 } 5195 } 5196 } 5197 5198 return 1; 5199 } 5200 5201 #ifdef ATH_DEBUG 5202 void 5203 ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq) 5204 { 5205 struct ath_hal *ah = sc->sc_ah; 5206 struct ath_buf *bf; 5207 int i = 0; 5208 5209 if (! (sc->sc_debug & ATH_DEBUG_RESET)) 5210 return; 5211 5212 device_printf(sc->sc_dev, "%s: Q%d: begin\n", 5213 __func__, txq->axq_qnum); 5214 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 5215 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 5216 ath_hal_txprocdesc(ah, bf->bf_lastds, 5217 &bf->bf_status.ds_txstat) == HAL_OK); 5218 i++; 5219 } 5220 device_printf(sc->sc_dev, "%s: Q%d: end\n", 5221 __func__, txq->axq_qnum); 5222 } 5223 #endif /* ATH_DEBUG */ 5224 5225 /* 5226 * Drain the transmit queues and reclaim resources. 5227 */ 5228 void 5229 ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 5230 { 5231 struct ath_hal *ah = sc->sc_ah; 5232 #ifdef ATH_DEBUG 5233 struct ifnet *ifp = sc->sc_ifp; 5234 #endif 5235 int i; 5236 struct ath_buf *bf_last; 5237 5238 (void) ath_stoptxdma(sc); 5239 5240 /* 5241 * Dump the queue contents 5242 */ 5243 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5244 /* 5245 * XXX TODO: should we just handle the completed TX frames 5246 * here, whether or not the reset is a full one or not? 5247 */ 5248 if (ATH_TXQ_SETUP(sc, i)) { 5249 #ifdef ATH_DEBUG 5250 if (sc->sc_debug & ATH_DEBUG_RESET) 5251 ath_tx_dump(sc, &sc->sc_txq[i]); 5252 #endif /* ATH_DEBUG */ 5253 if (reset_type == ATH_RESET_NOLOSS) { 5254 ath_tx_processq(sc, &sc->sc_txq[i], 0); 5255 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5256 /* 5257 * Free the holding buffer; DMA is now 5258 * stopped. 5259 */ 5260 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 5261 /* 5262 * Setup the link pointer to be the 5263 * _last_ buffer/descriptor in the list. 5264 * If there's nothing in the list, set it 5265 * to NULL. 5266 */ 5267 bf_last = ATH_TXQ_LAST(&sc->sc_txq[i], 5268 axq_q_s); 5269 if (bf_last != NULL) { 5270 ath_hal_gettxdesclinkptr(ah, 5271 bf_last->bf_lastds, 5272 &sc->sc_txq[i].axq_link); 5273 } else { 5274 sc->sc_txq[i].axq_link = NULL; 5275 } 5276 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5277 } else 5278 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5279 } 5280 } 5281 #ifdef ATH_DEBUG 5282 if (sc->sc_debug & ATH_DEBUG_RESET) { 5283 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 5284 if (bf != NULL && bf->bf_m != NULL) { 5285 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 5286 ath_hal_txprocdesc(ah, bf->bf_lastds, 5287 &bf->bf_status.ds_txstat) == HAL_OK); 5288 ieee80211_dump_pkt(ifp->if_l2com, 5289 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5290 0, -1); 5291 } 5292 } 5293 #endif /* ATH_DEBUG */ 5294 #if 0 5295 /* remove, DragonFly uses OACTIVE to control if_start calls */ 5296 IF_LOCK(&ifp->if_snd); 5297 ifq_clr_oactive(&ifp->if_snd); 5298 IF_UNLOCK(&ifp->if_snd); 5299 #endif 5300 sc->sc_wd_timer = 0; 5301 } 5302 5303 /* 5304 * Update internal state after a channel change. 5305 */ 5306 static void 5307 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5308 { 5309 enum ieee80211_phymode mode; 5310 5311 /* 5312 * Change channels and update the h/w rate map 5313 * if we're switching; e.g. 11a to 11b/g. 5314 */ 5315 mode = ieee80211_chan2mode(chan); 5316 if (mode != sc->sc_curmode) 5317 ath_setcurmode(sc, mode); 5318 sc->sc_curchan = chan; 5319 } 5320 5321 /* 5322 * Set/change channels. If the channel is really being changed, 5323 * it's done by resetting the chip. To accomplish this we must 5324 * first cleanup any pending DMA, then restart stuff after a la 5325 * ath_init. 5326 */ 5327 static int 5328 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5329 { 5330 struct ifnet *ifp = sc->sc_ifp; 5331 struct ieee80211com *ic = ifp->if_l2com; 5332 struct ath_hal *ah = sc->sc_ah; 5333 int ret = 0; 5334 5335 /* Treat this as an interface reset */ 5336 ATH_PCU_UNLOCK_ASSERT(sc); 5337 ATH_UNLOCK_ASSERT(sc); 5338 5339 /* (Try to) stop TX/RX from occuring */ 5340 taskqueue_block(sc->sc_tq); 5341 5342 ATH_PCU_LOCK(sc); 5343 5344 /* Stop new RX/TX/interrupt completion */ 5345 if (ath_reset_grablock(sc, 1) == 0) { 5346 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5347 __func__); 5348 } 5349 5350 ath_hal_intrset(ah, 0); 5351 5352 /* Stop pending RX/TX completion */ 5353 ath_txrx_stop_locked(sc); 5354 5355 ATH_PCU_UNLOCK(sc); 5356 5357 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 5358 __func__, ieee80211_chan2ieee(ic, chan), 5359 chan->ic_freq, chan->ic_flags); 5360 if (chan != sc->sc_curchan) { 5361 HAL_STATUS status; 5362 /* 5363 * To switch channels clear any pending DMA operations; 5364 * wait long enough for the RX fifo to drain, reset the 5365 * hardware at the new frequency, and then re-enable 5366 * the relevant bits of the h/w. 5367 */ 5368 #if 0 5369 ath_hal_intrset(ah, 0); /* disable interrupts */ 5370 #endif 5371 ath_stoprecv(sc, 1); /* turn off frame recv */ 5372 /* 5373 * First, handle completed TX/RX frames. 5374 */ 5375 ath_rx_flush(sc); 5376 ath_draintxq(sc, ATH_RESET_NOLOSS); 5377 /* 5378 * Next, flush the non-scheduled frames. 5379 */ 5380 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 5381 5382 ath_update_chainmasks(sc, chan); 5383 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 5384 sc->sc_cur_rxchainmask); 5385 sc->sc_rxfifo_state = ATH_RXFIFO_RESET; 5386 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 5387 if_printf(ifp, "%s: unable to reset " 5388 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 5389 __func__, ieee80211_chan2ieee(ic, chan), 5390 chan->ic_freq, chan->ic_flags, status); 5391 ret = EIO; 5392 goto finish; 5393 } 5394 sc->sc_diversity = ath_hal_getdiversity(ah); 5395 5396 /* Let DFS at it in case it's a DFS channel */ 5397 ath_dfs_radar_enable(sc, chan); 5398 5399 /* Let spectral at in case spectral is enabled */ 5400 ath_spectral_enable(sc, chan); 5401 5402 /* 5403 * Let bluetooth coexistence at in case it's needed for this 5404 * channel 5405 */ 5406 ath_btcoex_enable(sc, ic->ic_curchan); 5407 5408 /* 5409 * If we're doing TDMA, enforce the TXOP limitation for chips 5410 * that support it. 5411 */ 5412 if (sc->sc_hasenforcetxop && sc->sc_tdma) 5413 ath_hal_setenforcetxop(sc->sc_ah, 1); 5414 else 5415 ath_hal_setenforcetxop(sc->sc_ah, 0); 5416 5417 /* 5418 * Re-enable rx framework. 5419 */ 5420 if (ath_startrecv(sc) != 0) { 5421 if_printf(ifp, "%s: unable to restart recv logic\n", 5422 __func__); 5423 ret = EIO; 5424 goto finish; 5425 } 5426 5427 /* 5428 * Change channels and update the h/w rate map 5429 * if we're switching; e.g. 11a to 11b/g. 5430 */ 5431 ath_chan_change(sc, chan); 5432 5433 /* 5434 * Reset clears the beacon timers; reset them 5435 * here if needed. 5436 */ 5437 if (sc->sc_beacons) { /* restart beacons */ 5438 #ifdef IEEE80211_SUPPORT_TDMA 5439 if (sc->sc_tdma) 5440 ath_tdma_config(sc, NULL); 5441 else 5442 #endif 5443 ath_beacon_config(sc, NULL); 5444 } 5445 5446 /* 5447 * Re-enable interrupts. 5448 */ 5449 #if 0 5450 ath_hal_intrset(ah, sc->sc_imask); 5451 #endif 5452 } 5453 5454 finish: 5455 ATH_PCU_LOCK(sc); 5456 sc->sc_inreset_cnt--; 5457 /* XXX only do this if sc_inreset_cnt == 0? */ 5458 ath_hal_intrset(ah, sc->sc_imask); 5459 ATH_PCU_UNLOCK(sc); 5460 5461 #if 0 5462 /* remove, DragonFly uses OACTIVE to control if_start calls */ 5463 IF_LOCK(&ifp->if_snd); 5464 ifq_clr_oactive(&ifp->if_snd); 5465 IF_UNLOCK(&ifp->if_snd); 5466 #endif 5467 ath_txrx_start(sc); 5468 /* XXX ath_start? */ 5469 5470 return ret; 5471 } 5472 5473 /* 5474 * Periodically recalibrate the PHY to account 5475 * for temperature/environment changes. 5476 */ 5477 static void 5478 ath_calibrate(void *arg) 5479 { 5480 struct ath_softc *sc = arg; 5481 struct ath_hal *ah = sc->sc_ah; 5482 struct ifnet *ifp = sc->sc_ifp; 5483 struct ieee80211com *ic = ifp->if_l2com; 5484 HAL_BOOL longCal, isCalDone = AH_TRUE; 5485 HAL_BOOL aniCal, shortCal = AH_FALSE; 5486 int nextcal; 5487 5488 /* 5489 * Force the hardware awake for ANI work. 5490 */ 5491 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5492 5493 wlan_serialize_enter(); 5494 5495 /* Skip trying to do this if we're in reset */ 5496 if (sc->sc_inreset_cnt) 5497 goto restart; 5498 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 5499 goto restart; 5500 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5501 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5502 if (sc->sc_doresetcal) 5503 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5504 5505 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5506 if (aniCal) { 5507 sc->sc_stats.ast_ani_cal++; 5508 sc->sc_lastani = ticks; 5509 ath_hal_ani_poll(ah, sc->sc_curchan); 5510 } 5511 5512 if (longCal) { 5513 sc->sc_stats.ast_per_cal++; 5514 sc->sc_lastlongcal = ticks; 5515 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5516 /* 5517 * Rfgain is out of bounds, reset the chip 5518 * to load new gain values. 5519 */ 5520 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5521 "%s: rfgain change\n", __func__); 5522 sc->sc_stats.ast_per_rfgain++; 5523 sc->sc_resetcal = 0; 5524 sc->sc_doresetcal = AH_TRUE; 5525 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5526 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5527 goto done; 5528 } 5529 /* 5530 * If this long cal is after an idle period, then 5531 * reset the data collection state so we start fresh. 5532 */ 5533 if (sc->sc_resetcal) { 5534 (void) ath_hal_calreset(ah, sc->sc_curchan); 5535 sc->sc_lastcalreset = ticks; 5536 sc->sc_lastshortcal = ticks; 5537 sc->sc_resetcal = 0; 5538 sc->sc_doresetcal = AH_TRUE; 5539 } 5540 } 5541 5542 /* Only call if we're doing a short/long cal, not for ANI calibration */ 5543 if (shortCal || longCal) { 5544 isCalDone = AH_FALSE; 5545 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 5546 if (longCal) { 5547 /* 5548 * Calibrate noise floor data again in case of change. 5549 */ 5550 ath_hal_process_noisefloor(ah); 5551 } 5552 } else { 5553 DPRINTF(sc, ATH_DEBUG_ANY, 5554 "%s: calibration of channel %u failed\n", 5555 __func__, sc->sc_curchan->ic_freq); 5556 sc->sc_stats.ast_per_calfail++; 5557 } 5558 if (shortCal) 5559 sc->sc_lastshortcal = ticks; 5560 } 5561 if (!isCalDone) { 5562 restart: 5563 /* 5564 * Use a shorter interval to potentially collect multiple 5565 * data samples required to complete calibration. Once 5566 * we're told the work is done we drop back to a longer 5567 * interval between requests. We're more aggressive doing 5568 * work when operating as an AP to improve operation right 5569 * after startup. 5570 */ 5571 sc->sc_lastshortcal = ticks; 5572 nextcal = ath_shortcalinterval*hz/1000; 5573 if (sc->sc_opmode != HAL_M_HOSTAP) 5574 nextcal *= 10; 5575 sc->sc_doresetcal = AH_TRUE; 5576 } else { 5577 /* nextcal should be the shortest time for next event */ 5578 nextcal = ath_longcalinterval*hz; 5579 if (sc->sc_lastcalreset == 0) 5580 sc->sc_lastcalreset = sc->sc_lastlongcal; 5581 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 5582 sc->sc_resetcal = 1; /* setup reset next trip */ 5583 sc->sc_doresetcal = AH_FALSE; 5584 } 5585 /* ANI calibration may occur more often than short/long/resetcal */ 5586 if (ath_anicalinterval > 0) 5587 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5588 5589 if (nextcal != 0) { 5590 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 5591 __func__, nextcal, isCalDone ? "" : "!"); 5592 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 5593 } else { 5594 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 5595 __func__); 5596 /* NB: don't rearm timer */ 5597 } 5598 done: 5599 /* 5600 * Restore power state now that we're done. 5601 */ 5602 ath_power_restore_power_state(sc); 5603 wlan_serialize_exit(); 5604 } 5605 5606 static void 5607 ath_scan_start(struct ieee80211com *ic) 5608 { 5609 struct ifnet *ifp = ic->ic_ifp; 5610 struct ath_softc *sc = ifp->if_softc; 5611 struct ath_hal *ah = sc->sc_ah; 5612 u_int32_t rfilt; 5613 5614 /* XXX calibration timer? */ 5615 5616 ATH_LOCK(sc); 5617 sc->sc_scanning = 1; 5618 sc->sc_syncbeacon = 0; 5619 rfilt = ath_calcrxfilter(sc); 5620 ATH_UNLOCK(sc); 5621 5622 ATH_PCU_LOCK(sc); 5623 ath_hal_setrxfilter(ah, rfilt); 5624 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 5625 ATH_PCU_UNLOCK(sc); 5626 5627 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5628 __func__, rfilt, ath_hal_ether_sprintf(ifp->if_broadcastaddr)); 5629 } 5630 5631 static void 5632 ath_scan_end(struct ieee80211com *ic) 5633 { 5634 struct ifnet *ifp = ic->ic_ifp; 5635 struct ath_softc *sc = ifp->if_softc; 5636 struct ath_hal *ah = sc->sc_ah; 5637 u_int32_t rfilt; 5638 5639 ATH_LOCK(sc); 5640 sc->sc_scanning = 0; 5641 rfilt = ath_calcrxfilter(sc); 5642 ATH_UNLOCK(sc); 5643 5644 ATH_PCU_LOCK(sc); 5645 ath_hal_setrxfilter(ah, rfilt); 5646 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5647 5648 ath_hal_process_noisefloor(ah); 5649 ATH_PCU_UNLOCK(sc); 5650 5651 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5652 __func__, rfilt, ath_hal_ether_sprintf(sc->sc_curbssid), 5653 sc->sc_curaid); 5654 } 5655 5656 #ifdef ATH_ENABLE_11N 5657 /* 5658 * For now, just do a channel change. 5659 * 5660 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 5661 * control state and resetting the hardware without dropping frames out 5662 * of the queue. 5663 * 5664 * The unfortunate trouble here is making absolutely sure that the 5665 * channel width change has propagated enough so the hardware 5666 * absolutely isn't handed bogus frames for it's current operating 5667 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 5668 * does occur in parallel, we need to make certain we've blocked 5669 * any further ongoing TX (and RX, that can cause raw TX) 5670 * before we do this. 5671 */ 5672 static void 5673 ath_update_chw(struct ieee80211com *ic) 5674 { 5675 struct ifnet *ifp = ic->ic_ifp; 5676 struct ath_softc *sc = ifp->if_softc; 5677 5678 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 5679 ath_set_channel(ic); 5680 } 5681 #endif /* ATH_ENABLE_11N */ 5682 5683 static void 5684 ath_set_channel(struct ieee80211com *ic) 5685 { 5686 struct ifnet *ifp = ic->ic_ifp; 5687 struct ath_softc *sc = ifp->if_softc; 5688 5689 (void) ath_chan_set(sc, ic->ic_curchan); 5690 /* 5691 * If we are returning to our bss channel then mark state 5692 * so the next recv'd beacon's tsf will be used to sync the 5693 * beacon timers. Note that since we only hear beacons in 5694 * sta/ibss mode this has no effect in other operating modes. 5695 */ 5696 ATH_LOCK(sc); 5697 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5698 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5699 sc->sc_syncbeacon = 1; 5700 ath_power_restore_power_state(sc); 5701 ATH_UNLOCK(sc); 5702 } 5703 5704 /* 5705 * Walk the vap list and check if there any vap's in RUN state. 5706 */ 5707 static int 5708 ath_isanyrunningvaps(struct ieee80211vap *this) 5709 { 5710 struct ieee80211com *ic = this->iv_ic; 5711 struct ieee80211vap *vap; 5712 5713 IEEE80211_LOCK_ASSERT(ic); 5714 5715 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5716 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5717 return 1; 5718 } 5719 return 0; 5720 } 5721 5722 static int 5723 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5724 { 5725 struct ieee80211com *ic = vap->iv_ic; 5726 struct ath_softc *sc = ic->ic_ifp->if_softc; 5727 struct ath_vap *avp = ATH_VAP(vap); 5728 struct ath_hal *ah = sc->sc_ah; 5729 struct ieee80211_node *ni = NULL; 5730 int i, error, stamode; 5731 u_int32_t rfilt; 5732 int csa_run_transition = 0; 5733 enum ieee80211_state ostate = vap->iv_state; 5734 5735 static const HAL_LED_STATE leds[] = { 5736 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5737 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5738 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5739 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5740 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5741 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5742 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5743 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5744 }; 5745 5746 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5747 ieee80211_state_name[ostate], 5748 ieee80211_state_name[nstate]); 5749 5750 /* 5751 * net80211 _should_ have the comlock asserted at this point. 5752 * There are some comments around the calls to vap->iv_newstate 5753 * which indicate that it (newstate) may end up dropping the 5754 * lock. This and the subsequent lock assert check after newstate 5755 * are an attempt to catch these and figure out how/why. 5756 */ 5757 IEEE80211_LOCK_ASSERT(ic); 5758 5759 /* Before we touch the hardware - wake it up */ 5760 /* 5761 * If the NIC is in anything other than SLEEP state, 5762 * we need to ensure that self-generated frames are 5763 * set for PWRMGT=0. Otherwise we may end up with 5764 * strange situations. 5765 * 5766 * XXX TODO: is this actually the case? :-) 5767 */ 5768 if (nstate != IEEE80211_S_SLEEP) 5769 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5770 5771 /* 5772 * Now, wake the thing up. 5773 */ 5774 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5775 5776 if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5777 csa_run_transition = 1; 5778 5779 wlan_serialize_exit(); 5780 callout_stop_sync(&sc->sc_cal_ch); 5781 wlan_serialize_enter(); 5782 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5783 5784 if (nstate == IEEE80211_S_SCAN) { 5785 /* 5786 * Scanning: turn off beacon miss and don't beacon. 5787 * Mark beacon state so when we reach RUN state we'll 5788 * [re]setup beacons. Unblock the task q thread so 5789 * deferred interrupt processing is done. 5790 */ 5791 5792 /* Ensure we stay awake during scan */ 5793 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5794 ath_power_setpower(sc, HAL_PM_AWAKE); 5795 5796 ath_hal_intrset(ah, 5797 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5798 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5799 sc->sc_beacons = 0; 5800 taskqueue_unblock(sc->sc_tq); 5801 } 5802 5803 ni = ieee80211_ref_node(vap->iv_bss); 5804 rfilt = ath_calcrxfilter(sc); 5805 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5806 vap->iv_opmode == IEEE80211_M_AHDEMO || 5807 vap->iv_opmode == IEEE80211_M_IBSS); 5808 5809 /* 5810 * XXX Dont need to do this (and others) if we've transitioned 5811 * from SLEEP->RUN. 5812 */ 5813 if (stamode && nstate == IEEE80211_S_RUN) { 5814 sc->sc_curaid = ni->ni_associd; 5815 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5816 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5817 } 5818 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5819 __func__, rfilt, 5820 ath_hal_ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 5821 ath_hal_setrxfilter(ah, rfilt); 5822 5823 /* XXX is this to restore keycache on resume? */ 5824 if (vap->iv_opmode != IEEE80211_M_STA && 5825 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 5826 for (i = 0; i < IEEE80211_WEP_NKID; i++) 5827 if (ath_hal_keyisvalid(ah, i)) 5828 ath_hal_keysetmac(ah, i, ni->ni_bssid); 5829 } 5830 5831 /* 5832 * Invoke the parent method to do net80211 work. 5833 */ 5834 error = avp->av_newstate(vap, nstate, arg); 5835 if (error != 0) 5836 goto bad; 5837 5838 /* 5839 * See above: ensure av_newstate() doesn't drop the lock 5840 * on us. 5841 */ 5842 IEEE80211_LOCK_ASSERT(ic); 5843 5844 if (nstate == IEEE80211_S_RUN) { 5845 /* NB: collect bss node again, it may have changed */ 5846 ieee80211_free_node(ni); 5847 ni = ieee80211_ref_node(vap->iv_bss); 5848 5849 DPRINTF(sc, ATH_DEBUG_STATE, 5850 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5851 "capinfo 0x%04x chan %d\n", __func__, 5852 vap->iv_flags, ni->ni_intval, 5853 ath_hal_ether_sprintf(ni->ni_bssid), 5854 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5855 5856 switch (vap->iv_opmode) { 5857 #ifdef IEEE80211_SUPPORT_TDMA 5858 case IEEE80211_M_AHDEMO: 5859 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 5860 break; 5861 /* fall thru... */ 5862 #endif 5863 case IEEE80211_M_HOSTAP: 5864 case IEEE80211_M_IBSS: 5865 case IEEE80211_M_MBSS: 5866 /* 5867 * Allocate and setup the beacon frame. 5868 * 5869 * Stop any previous beacon DMA. This may be 5870 * necessary, for example, when an ibss merge 5871 * causes reconfiguration; there will be a state 5872 * transition from RUN->RUN that means we may 5873 * be called with beacon transmission active. 5874 */ 5875 ath_hal_stoptxdma(ah, sc->sc_bhalq); 5876 5877 error = ath_beacon_alloc(sc, ni); 5878 if (error != 0) 5879 goto bad; 5880 /* 5881 * If joining an adhoc network defer beacon timer 5882 * configuration to the next beacon frame so we 5883 * have a current TSF to use. Otherwise we're 5884 * starting an ibss/bss so there's no need to delay; 5885 * if this is the first vap moving to RUN state, then 5886 * beacon state needs to be [re]configured. 5887 */ 5888 if (vap->iv_opmode == IEEE80211_M_IBSS && 5889 ni->ni_tstamp.tsf != 0) { 5890 sc->sc_syncbeacon = 1; 5891 } else if (!sc->sc_beacons) { 5892 #ifdef IEEE80211_SUPPORT_TDMA 5893 if (vap->iv_caps & IEEE80211_C_TDMA) 5894 ath_tdma_config(sc, vap); 5895 else 5896 #endif 5897 ath_beacon_config(sc, vap); 5898 sc->sc_beacons = 1; 5899 } 5900 break; 5901 case IEEE80211_M_STA: 5902 /* 5903 * Defer beacon timer configuration to the next 5904 * beacon frame so we have a current TSF to use 5905 * (any TSF collected when scanning is likely old). 5906 * However if it's due to a CSA -> RUN transition, 5907 * force a beacon update so we pick up a lack of 5908 * beacons from an AP in CAC and thus force a 5909 * scan. 5910 * 5911 * And, there's also corner cases here where 5912 * after a scan, the AP may have disappeared. 5913 * In that case, we may not receive an actual 5914 * beacon to update the beacon timer and thus we 5915 * won't get notified of the missing beacons. 5916 */ 5917 if (ostate != IEEE80211_S_RUN && 5918 ostate != IEEE80211_S_SLEEP) { 5919 DPRINTF(sc, ATH_DEBUG_BEACON, 5920 "%s: STA; syncbeacon=1\n", __func__); 5921 sc->sc_syncbeacon = 1; 5922 5923 if (csa_run_transition) 5924 ath_beacon_config(sc, vap); 5925 5926 /* 5927 * PR: kern/175227 5928 * 5929 * Reconfigure beacons during reset; as 5930 * otherwise 5931 * we won't get the beacon timers reprogrammed 5932 * after a reset and thus we won't pick up a 5933 * beacon miss interrupt. 5934 * 5935 * Hopefully we'll see a beacon before the BMISS 5936 * timer fires (too often), leading to a STA 5937 * disassociation. 5938 */ 5939 sc->sc_beacons = 1; 5940 } 5941 break; 5942 case IEEE80211_M_MONITOR: 5943 /* 5944 * Monitor mode vaps have only INIT->RUN and RUN->RUN 5945 * transitions so we must re-enable interrupts here to 5946 * handle the case of a single monitor mode vap. 5947 */ 5948 ath_hal_intrset(ah, sc->sc_imask); 5949 break; 5950 case IEEE80211_M_WDS: 5951 break; 5952 default: 5953 break; 5954 } 5955 /* 5956 * Let the hal process statistics collected during a 5957 * scan so it can provide calibrated noise floor data. 5958 */ 5959 ath_hal_process_noisefloor(ah); 5960 /* 5961 * Reset rssi stats; maybe not the best place... 5962 */ 5963 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5964 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5965 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 5966 5967 /* 5968 * Force awake for RUN mode 5969 */ 5970 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5971 ath_power_setpower(sc, HAL_PM_AWAKE); 5972 5973 /* 5974 * Finally, start any timers and the task q thread 5975 * (in case we didn't go through SCAN state). 5976 */ 5977 if (ath_longcalinterval != 0) { 5978 /* start periodic recalibration timer */ 5979 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5980 } else { 5981 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5982 "%s: calibration disabled\n", __func__); 5983 } 5984 5985 taskqueue_unblock(sc->sc_tq); 5986 } else if (nstate == IEEE80211_S_INIT) { 5987 /* 5988 * If there are no vaps left in RUN state then 5989 * shutdown host/driver operation: 5990 * o disable interrupts 5991 * o disable the task queue thread 5992 * o mark beacon processing as stopped 5993 */ 5994 if (!ath_isanyrunningvaps(vap)) { 5995 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5996 /* disable interrupts */ 5997 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5998 taskqueue_block(sc->sc_tq); 5999 sc->sc_beacons = 0; 6000 } 6001 #ifdef IEEE80211_SUPPORT_TDMA 6002 ath_hal_setcca(ah, AH_TRUE); 6003 #endif 6004 } else if (nstate == IEEE80211_S_SLEEP) { 6005 /* We're going to sleep, so transition appropriately */ 6006 /* For now, only do this if we're a single STA vap */ 6007 if (sc->sc_nvaps == 1 && 6008 vap->iv_opmode == IEEE80211_M_STA) { 6009 DPRINTF(sc, ATH_DEBUG_BEACON, 6010 "%s: syncbeacon=%d\n", 6011 __func__, sc->sc_syncbeacon); 6012 /* 6013 * Always at least set the self-generated 6014 * frame config to set PWRMGT=1. 6015 */ 6016 ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP); 6017 6018 /* 6019 * If we're not syncing beacons, transition 6020 * to NETWORK_SLEEP. 6021 * 6022 * We stay awake if syncbeacon > 0 in case 6023 * we need to listen for some beacons otherwise 6024 * our beacon timer config may be wrong. 6025 */ 6026 if (sc->sc_syncbeacon == 0) { 6027 ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP); 6028 } 6029 } 6030 } 6031 bad: 6032 ieee80211_free_node(ni); 6033 6034 /* 6035 * Restore the power state - either to what it was, or 6036 * to network_sleep if it's alright. 6037 */ 6038 ath_power_restore_power_state(sc); 6039 6040 return error; 6041 } 6042 6043 /* 6044 * Allocate a key cache slot to the station so we can 6045 * setup a mapping from key index to node. The key cache 6046 * slot is needed for managing antenna state and for 6047 * compression when stations do not use crypto. We do 6048 * it uniliaterally here; if crypto is employed this slot 6049 * will be reassigned. 6050 */ 6051 static void 6052 ath_setup_stationkey(struct ieee80211_node *ni) 6053 { 6054 struct ieee80211vap *vap = ni->ni_vap; 6055 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6056 ieee80211_keyix keyix, rxkeyix; 6057 6058 /* XXX should take a locked ref to vap->iv_bss */ 6059 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 6060 /* 6061 * Key cache is full; we'll fall back to doing 6062 * the more expensive lookup in software. Note 6063 * this also means no h/w compression. 6064 */ 6065 /* XXX msg+statistic */ 6066 } else { 6067 /* XXX locking? */ 6068 ni->ni_ucastkey.wk_keyix = keyix; 6069 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 6070 /* NB: must mark device key to get called back on delete */ 6071 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 6072 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 6073 /* NB: this will create a pass-thru key entry */ 6074 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 6075 } 6076 } 6077 6078 /* 6079 * Setup driver-specific state for a newly associated node. 6080 * Note that we're called also on a re-associate, the isnew 6081 * param tells us if this is the first time or not. 6082 */ 6083 static void 6084 ath_newassoc(struct ieee80211_node *ni, int isnew) 6085 { 6086 struct ath_node *an = ATH_NODE(ni); 6087 struct ieee80211vap *vap = ni->ni_vap; 6088 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6089 const struct ieee80211_txparam *tp = ni->ni_txparms; 6090 6091 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 6092 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 6093 6094 DPRINTF(sc, ATH_DEBUG_NODE, 6095 "%s: %s: reassoc; isnew=%d, is_powersave=%d\n", 6096 __func__, 6097 ath_hal_ether_sprintf(ni->ni_macaddr), 6098 isnew, 6099 an->an_is_powersave); 6100 6101 ATH_NODE_LOCK(an); 6102 ath_rate_newassoc(sc, an, isnew); 6103 ATH_NODE_UNLOCK(an); 6104 6105 if (isnew && 6106 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 6107 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 6108 ath_setup_stationkey(ni); 6109 6110 /* 6111 * If we're reassociating, make sure that any paused queues 6112 * get unpaused. 6113 * 6114 * Now, we may hvae frames in the hardware queue for this node. 6115 * So if we are reassociating and there are frames in the queue, 6116 * we need to go through the cleanup path to ensure that they're 6117 * marked as non-aggregate. 6118 */ 6119 if (! isnew) { 6120 DPRINTF(sc, ATH_DEBUG_NODE, 6121 "%s: %s: reassoc; is_powersave=%d\n", 6122 __func__, 6123 ath_hal_ether_sprintf(ni->ni_macaddr), 6124 an->an_is_powersave); 6125 6126 /* XXX for now, we can't hold the lock across assoc */ 6127 ath_tx_node_reassoc(sc, an); 6128 6129 /* XXX for now, we can't hold the lock across wakeup */ 6130 if (an->an_is_powersave) 6131 ath_tx_node_wakeup(sc, an); 6132 } 6133 } 6134 6135 static int 6136 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 6137 int nchans, struct ieee80211_channel chans[]) 6138 { 6139 struct ath_softc *sc = ic->ic_ifp->if_softc; 6140 struct ath_hal *ah = sc->sc_ah; 6141 HAL_STATUS status; 6142 6143 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6144 "%s: rd %u cc %u location %c%s\n", 6145 __func__, reg->regdomain, reg->country, reg->location, 6146 reg->ecm ? " ecm" : ""); 6147 6148 status = ath_hal_set_channels(ah, chans, nchans, 6149 reg->country, reg->regdomain); 6150 if (status != HAL_OK) { 6151 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 6152 __func__, status); 6153 return EINVAL; /* XXX */ 6154 } 6155 6156 return 0; 6157 } 6158 6159 static void 6160 ath_getradiocaps(struct ieee80211com *ic, 6161 int maxchans, int *nchans, struct ieee80211_channel chans[]) 6162 { 6163 struct ath_softc *sc = ic->ic_ifp->if_softc; 6164 struct ath_hal *ah = sc->sc_ah; 6165 6166 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 6167 __func__, SKU_DEBUG, CTRY_DEFAULT); 6168 6169 /* XXX check return */ 6170 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 6171 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 6172 6173 } 6174 6175 static int 6176 ath_getchannels(struct ath_softc *sc) 6177 { 6178 struct ifnet *ifp = sc->sc_ifp; 6179 struct ieee80211com *ic = ifp->if_l2com; 6180 struct ath_hal *ah = sc->sc_ah; 6181 HAL_STATUS status; 6182 6183 /* 6184 * Collect channel set based on EEPROM contents. 6185 */ 6186 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 6187 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 6188 if (status != HAL_OK) { 6189 if_printf(ifp, "%s: unable to collect channel list from hal, " 6190 "status %d\n", __func__, status); 6191 return EINVAL; 6192 } 6193 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6194 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 6195 /* XXX map Atheros sku's to net80211 SKU's */ 6196 /* XXX net80211 types too small */ 6197 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 6198 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 6199 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 6200 ic->ic_regdomain.isocc[1] = ' '; 6201 6202 ic->ic_regdomain.ecm = 1; 6203 ic->ic_regdomain.location = 'I'; 6204 6205 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6206 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 6207 __func__, sc->sc_eerd, sc->sc_eecc, 6208 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 6209 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 6210 return 0; 6211 } 6212 6213 static int 6214 ath_rate_setup(struct ath_softc *sc, u_int mode) 6215 { 6216 struct ath_hal *ah = sc->sc_ah; 6217 const HAL_RATE_TABLE *rt; 6218 6219 switch (mode) { 6220 case IEEE80211_MODE_11A: 6221 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 6222 break; 6223 case IEEE80211_MODE_HALF: 6224 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6225 break; 6226 case IEEE80211_MODE_QUARTER: 6227 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6228 break; 6229 case IEEE80211_MODE_11B: 6230 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 6231 break; 6232 case IEEE80211_MODE_11G: 6233 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 6234 break; 6235 case IEEE80211_MODE_TURBO_A: 6236 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 6237 break; 6238 case IEEE80211_MODE_TURBO_G: 6239 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6240 break; 6241 case IEEE80211_MODE_STURBO_A: 6242 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6243 break; 6244 case IEEE80211_MODE_11NA: 6245 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6246 break; 6247 case IEEE80211_MODE_11NG: 6248 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6249 break; 6250 default: 6251 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6252 __func__, mode); 6253 return 0; 6254 } 6255 sc->sc_rates[mode] = rt; 6256 return (rt != NULL); 6257 } 6258 6259 static void 6260 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6261 { 6262 #define N(a) (sizeof(a)/sizeof(a[0])) 6263 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6264 static const struct { 6265 u_int rate; /* tx/rx 802.11 rate */ 6266 u_int16_t timeOn; /* LED on time (ms) */ 6267 u_int16_t timeOff; /* LED off time (ms) */ 6268 } blinkrates[] = { 6269 { 108, 40, 10 }, 6270 { 96, 44, 11 }, 6271 { 72, 50, 13 }, 6272 { 48, 57, 14 }, 6273 { 36, 67, 16 }, 6274 { 24, 80, 20 }, 6275 { 22, 100, 25 }, 6276 { 18, 133, 34 }, 6277 { 12, 160, 40 }, 6278 { 10, 200, 50 }, 6279 { 6, 240, 58 }, 6280 { 4, 267, 66 }, 6281 { 2, 400, 100 }, 6282 { 0, 500, 130 }, 6283 /* XXX half/quarter rates */ 6284 }; 6285 const HAL_RATE_TABLE *rt; 6286 int i, j; 6287 6288 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6289 rt = sc->sc_rates[mode]; 6290 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6291 for (i = 0; i < rt->rateCount; i++) { 6292 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6293 if (rt->info[i].phy != IEEE80211_T_HT) 6294 sc->sc_rixmap[ieeerate] = i; 6295 else 6296 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6297 } 6298 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6299 for (i = 0; i < N(sc->sc_hwmap); i++) { 6300 if (i >= rt->rateCount) { 6301 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6302 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6303 continue; 6304 } 6305 sc->sc_hwmap[i].ieeerate = 6306 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6307 if (rt->info[i].phy == IEEE80211_T_HT) 6308 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6309 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6310 if (rt->info[i].shortPreamble || 6311 rt->info[i].phy == IEEE80211_T_OFDM) 6312 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6313 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 6314 for (j = 0; j < N(blinkrates)-1; j++) 6315 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6316 break; 6317 /* NB: this uses the last entry if the rate isn't found */ 6318 /* XXX beware of overlow */ 6319 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6320 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6321 } 6322 sc->sc_currates = rt; 6323 sc->sc_curmode = mode; 6324 /* 6325 * All protection frames are transmited at 2Mb/s for 6326 * 11g, otherwise at 1Mb/s. 6327 */ 6328 if (mode == IEEE80211_MODE_11G) 6329 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6330 else 6331 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 6332 /* NB: caller is responsible for resetting rate control state */ 6333 #undef N 6334 } 6335 6336 static void 6337 ath_watchdog(void *arg) 6338 { 6339 struct ath_softc *sc = arg; 6340 int do_reset = 0; 6341 6342 wlan_serialize_enter(); 6343 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 6344 struct ifnet *ifp = sc->sc_ifp; 6345 uint32_t hangs; 6346 6347 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6348 6349 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6350 hangs != 0) { 6351 if_printf(ifp, "%s hang detected (0x%x)\n", 6352 hangs & 0xff ? "bb" : "mac", hangs); 6353 } else 6354 if_printf(ifp, "device timeout\n"); 6355 do_reset = 1; 6356 ifp->if_oerrors++; 6357 sc->sc_stats.ast_watchdog++; 6358 ath_power_restore_power_state(sc); 6359 } 6360 6361 /* 6362 * We can't hold the lock across the ath_reset() call. 6363 * 6364 * And since this routine can't hold a lock and sleep, 6365 * do the reset deferred. 6366 */ 6367 if (do_reset) { 6368 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 6369 } 6370 6371 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 6372 wlan_serialize_exit(); 6373 } 6374 6375 /* 6376 * (DragonFly network start) 6377 */ 6378 static void 6379 ath_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 6380 { 6381 struct ath_softc *sc = ifp->if_softc; 6382 struct mbuf *m; 6383 6384 wlan_assert_serialized(); 6385 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 6386 6387 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) { 6388 ifq_purge(&ifp->if_snd); 6389 return; 6390 } 6391 ifq_set_oactive(&ifp->if_snd); 6392 for (;;) { 6393 m = ifq_dequeue(&ifp->if_snd); 6394 if (m == NULL) 6395 break; 6396 ath_transmit(ifp, m); 6397 } 6398 ifq_clr_oactive(&ifp->if_snd); 6399 } 6400 6401 /* 6402 * Fetch the rate control statistics for the given node. 6403 */ 6404 static int 6405 ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs) 6406 { 6407 struct ath_node *an; 6408 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 6409 struct ieee80211_node *ni; 6410 int error = 0; 6411 6412 /* Perform a lookup on the given node */ 6413 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr); 6414 if (ni == NULL) { 6415 error = EINVAL; 6416 goto bad; 6417 } 6418 6419 /* Lock the ath_node */ 6420 an = ATH_NODE(ni); 6421 ATH_NODE_LOCK(an); 6422 6423 /* Fetch the rate control stats for this node */ 6424 error = ath_rate_fetch_node_stats(sc, an, rs); 6425 6426 /* No matter what happens here, just drop through */ 6427 6428 /* Unlock the ath_node */ 6429 ATH_NODE_UNLOCK(an); 6430 6431 /* Unref the node */ 6432 ieee80211_node_decref(ni); 6433 6434 bad: 6435 return (error); 6436 } 6437 6438 #ifdef ATH_DIAGAPI 6439 /* 6440 * Diagnostic interface to the HAL. This is used by various 6441 * tools to do things like retrieve register contents for 6442 * debugging. The mechanism is intentionally opaque so that 6443 * it can change frequently w/o concern for compatiblity. 6444 */ 6445 static int 6446 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6447 { 6448 struct ath_hal *ah = sc->sc_ah; 6449 u_int id = ad->ad_id & ATH_DIAG_ID; 6450 void *indata = NULL; 6451 void *outdata = NULL; 6452 u_int32_t insize = ad->ad_in_size; 6453 u_int32_t outsize = ad->ad_out_size; 6454 int error = 0; 6455 6456 if (ad->ad_id & ATH_DIAG_IN) { 6457 /* 6458 * Copy in data. 6459 */ 6460 indata = kmalloc(insize, M_TEMP, M_INTWAIT); 6461 if (indata == NULL) { 6462 error = ENOMEM; 6463 goto bad; 6464 } 6465 error = copyin(ad->ad_in_data, indata, insize); 6466 if (error) 6467 goto bad; 6468 } 6469 if (ad->ad_id & ATH_DIAG_DYN) { 6470 /* 6471 * Allocate a buffer for the results (otherwise the HAL 6472 * returns a pointer to a buffer where we can read the 6473 * results). Note that we depend on the HAL leaving this 6474 * pointer for us to use below in reclaiming the buffer; 6475 * may want to be more defensive. 6476 */ 6477 outdata = kmalloc(outsize, M_TEMP, M_INTWAIT); 6478 if (outdata == NULL) { 6479 error = ENOMEM; 6480 goto bad; 6481 } 6482 } 6483 6484 if (id != HAL_DIAG_REGS) 6485 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6486 6487 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6488 if (outsize < ad->ad_out_size) 6489 ad->ad_out_size = outsize; 6490 if (outdata != NULL) 6491 error = copyout(outdata, ad->ad_out_data, 6492 ad->ad_out_size); 6493 } else { 6494 error = EINVAL; 6495 } 6496 if (id != HAL_DIAG_REGS) 6497 ath_power_restore_power_state(sc); 6498 bad: 6499 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6500 kfree(indata, M_TEMP); 6501 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6502 kfree(outdata, M_TEMP); 6503 return error; 6504 } 6505 #endif /* ATH_DIAGAPI */ 6506 6507 static int 6508 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, 6509 struct ucred *cr __unused) 6510 { 6511 #define IS_RUNNING(ifp) \ 6512 ((ifp->if_flags & IFF_UP) && (ifp->if_flags & IFF_RUNNING)) 6513 struct ath_softc *sc = ifp->if_softc; 6514 struct ieee80211com *ic = ifp->if_l2com; 6515 struct ifreq *ifr = (struct ifreq *)data; 6516 const HAL_RATE_TABLE *rt; 6517 int error = 0; 6518 6519 switch (cmd) { 6520 case SIOCSIFFLAGS: 6521 ATH_LOCK(sc); 6522 if (IS_RUNNING(ifp)) { 6523 /* 6524 * To avoid rescanning another access point, 6525 * do not call ath_init() here. Instead, 6526 * only reflect promisc mode settings. 6527 */ 6528 ath_mode_init(sc); 6529 } else if (ifp->if_flags & IFF_UP) { 6530 /* 6531 * Beware of being called during attach/detach 6532 * to reset promiscuous mode. In that case we 6533 * will still be marked UP but not RUNNING. 6534 * However trying to re-init the interface 6535 * is the wrong thing to do as we've already 6536 * torn down much of our state. There's 6537 * probably a better way to deal with this. 6538 */ 6539 if (!sc->sc_invalid) 6540 ath_init(sc); /* XXX lose error */ 6541 } else { 6542 ath_stop_locked(ifp); 6543 if (!sc->sc_invalid) 6544 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 6545 } 6546 ATH_UNLOCK(sc); 6547 break; 6548 case SIOCGIFMEDIA: 6549 case SIOCSIFMEDIA: 6550 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6551 break; 6552 case SIOCGATHSTATS: 6553 /* NB: embed these numbers to get a consistent view */ 6554 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 6555 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 6556 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 6557 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 6558 #ifdef IEEE80211_SUPPORT_TDMA 6559 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 6560 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 6561 #endif 6562 rt = sc->sc_currates; 6563 sc->sc_stats.ast_tx_rate = 6564 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 6565 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 6566 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 6567 return copyout(&sc->sc_stats, 6568 ifr->ifr_data, sizeof (sc->sc_stats)); 6569 case SIOCGATHAGSTATS: 6570 return copyout(&sc->sc_aggr_stats, 6571 ifr->ifr_data, sizeof (sc->sc_aggr_stats)); 6572 case SIOCZATHSTATS: 6573 error = priv_check(curthread, PRIV_DRIVER); 6574 if (error == 0) { 6575 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 6576 memset(&sc->sc_aggr_stats, 0, 6577 sizeof(sc->sc_aggr_stats)); 6578 memset(&sc->sc_intr_stats, 0, 6579 sizeof(sc->sc_intr_stats)); 6580 } 6581 break; 6582 #ifdef ATH_DIAGAPI 6583 case SIOCGATHDIAG: 6584 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6585 break; 6586 case SIOCGATHPHYERR: 6587 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 6588 break; 6589 #endif 6590 case SIOCGATHSPECTRAL: 6591 error = ath_ioctl_spectral(sc,(struct ath_diag*) ifr); 6592 break; 6593 case SIOCGATHNODERATESTATS: 6594 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr); 6595 break; 6596 case SIOCGIFADDR: 6597 error = ether_ioctl(ifp, cmd, data); 6598 break; 6599 default: 6600 error = EINVAL; 6601 break; 6602 } 6603 return error; 6604 #undef IS_RUNNING 6605 } 6606 6607 /* 6608 * Announce various information on device/driver attach. 6609 */ 6610 static void 6611 ath_announce(struct ath_softc *sc) 6612 { 6613 struct ifnet *ifp = sc->sc_ifp; 6614 struct ath_hal *ah = sc->sc_ah; 6615 6616 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 6617 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6618 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 6619 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 6620 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6621 if (bootverbose) { 6622 int i; 6623 for (i = 0; i <= WME_AC_VO; i++) { 6624 struct ath_txq *txq = sc->sc_ac2q[i]; 6625 if_printf(ifp, "Use hw queue %u for %s traffic\n", 6626 txq->axq_qnum, ieee80211_wme_acnames[i]); 6627 } 6628 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 6629 sc->sc_cabq->axq_qnum); 6630 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 6631 } 6632 if (ath_rxbuf != ATH_RXBUF) 6633 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 6634 if (ath_txbuf != ATH_TXBUF) 6635 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 6636 if (sc->sc_mcastkey && bootverbose) 6637 if_printf(ifp, "using multicast key search\n"); 6638 } 6639 6640 static void 6641 ath_dfs_tasklet(void *p, int npending) 6642 { 6643 struct ath_softc *sc = (struct ath_softc *) p; 6644 struct ifnet *ifp = sc->sc_ifp; 6645 struct ieee80211com *ic = ifp->if_l2com; 6646 6647 /* 6648 * If previous processing has found a radar event, 6649 * signal this to the net80211 layer to begin DFS 6650 * processing. 6651 */ 6652 wlan_serialize_enter(); 6653 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 6654 /* DFS event found, initiate channel change */ 6655 /* 6656 * XXX doesn't currently tell us whether the event 6657 * XXX was found in the primary or extension 6658 * XXX channel! 6659 */ 6660 IEEE80211_LOCK(ic); 6661 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6662 IEEE80211_UNLOCK(ic); 6663 } 6664 wlan_serialize_exit(); 6665 } 6666 6667 #if 0 6668 /* 6669 * Enable/disable power save. This must be called with 6670 * no TX driver locks currently held, so it should only 6671 * be called from the RX path (which doesn't hold any 6672 * TX driver locks.) 6673 */ 6674 static void 6675 ath_node_powersave(struct ieee80211_node *ni, int enable) 6676 { 6677 #ifdef ATH_SW_PSQ 6678 struct ath_node *an = ATH_NODE(ni); 6679 struct ieee80211com *ic = ni->ni_ic; 6680 struct ath_softc *sc = ic->ic_ifp->if_softc; 6681 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6682 6683 /* XXX and no TXQ locks should be held here */ 6684 6685 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6s: enable=%d\n", 6686 __func__, 6687 ath_hal_ether_sprintf(ni->ni_macaddr), 6688 !! enable); 6689 6690 /* Suspend or resume software queue handling */ 6691 if (enable) 6692 ath_tx_node_sleep(sc, an); 6693 else 6694 ath_tx_node_wakeup(sc, an); 6695 6696 /* Update net80211 state */ 6697 if (avp->av_node_ps) 6698 avp->av_node_ps(ni, enable); 6699 #else 6700 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6701 6702 /* Update net80211 state */ 6703 if (avp->av_node_ps) 6704 avp->av_node_ps(ni, enable); 6705 #endif/* ATH_SW_PSQ */ 6706 } 6707 6708 #endif 6709 6710 /* 6711 * Notification from net80211 that the powersave queue state has 6712 * changed. 6713 * 6714 * Since the software queue also may have some frames: 6715 * 6716 * + if the node software queue has frames and the TID state 6717 * is 0, we set the TIM; 6718 * + if the node and the stack are both empty, we clear the TIM bit. 6719 * + If the stack tries to set the bit, always set it. 6720 * + If the stack tries to clear the bit, only clear it if the 6721 * software queue in question is also cleared. 6722 * 6723 * TODO: this is called during node teardown; so let's ensure this 6724 * is all correctly handled and that the TIM bit is cleared. 6725 * It may be that the node flush is called _AFTER_ the net80211 6726 * stack clears the TIM. 6727 * 6728 * Here is the racy part. Since it's possible >1 concurrent, 6729 * overlapping TXes will appear complete with a TX completion in 6730 * another thread, it's possible that the concurrent TIM calls will 6731 * clash. We can't hold the node lock here because setting the 6732 * TIM grabs the net80211 comlock and this may cause a LOR. 6733 * The solution is either to totally serialise _everything_ at 6734 * this point (ie, all TX, completion and any reset/flush go into 6735 * one taskqueue) or a new "ath TIM lock" needs to be created that 6736 * just wraps the driver state change and this call to avp->av_set_tim(). 6737 * 6738 * The same race exists in the net80211 power save queue handling 6739 * as well. Since multiple transmitting threads may queue frames 6740 * into the driver, as well as ps-poll and the driver transmitting 6741 * frames (and thus clearing the psq), it's quite possible that 6742 * a packet entering the PSQ and a ps-poll being handled will 6743 * race, causing the TIM to be cleared and not re-set. 6744 */ 6745 static int 6746 ath_node_set_tim(struct ieee80211_node *ni, int enable) 6747 { 6748 #ifdef ATH_SW_PSQ 6749 struct ieee80211com *ic = ni->ni_ic; 6750 struct ath_softc *sc = ic->ic_ifp->if_softc; 6751 struct ath_node *an = ATH_NODE(ni); 6752 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6753 int changed = 0; 6754 6755 ATH_TX_LOCK(sc); 6756 an->an_stack_psq = enable; 6757 6758 /* 6759 * This will get called for all operating modes, 6760 * even if avp->av_set_tim is unset. 6761 * It's currently set for hostap/ibss modes; but 6762 * the same infrastructure is used for both STA 6763 * and AP/IBSS node power save. 6764 */ 6765 if (avp->av_set_tim == NULL) { 6766 ATH_TX_UNLOCK(sc); 6767 return (0); 6768 } 6769 6770 /* 6771 * If setting the bit, always set it here. 6772 * If clearing the bit, only clear it if the 6773 * software queue is also empty. 6774 * 6775 * If the node has left power save, just clear the TIM 6776 * bit regardless of the state of the power save queue. 6777 * 6778 * XXX TODO: although atomics are used, it's quite possible 6779 * that a race will occur between this and setting/clearing 6780 * in another thread. TX completion will occur always in 6781 * one thread, however setting/clearing the TIM bit can come 6782 * from a variety of different process contexts! 6783 */ 6784 if (enable && an->an_tim_set == 1) { 6785 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6786 "%s: %s: enable=%d, tim_set=1, ignoring\n", 6787 __func__, 6788 ath_hal_ether_sprintf(ni->ni_macaddr), 6789 enable); 6790 ATH_TX_UNLOCK(sc); 6791 } else if (enable) { 6792 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6793 "%s: %s: enable=%d, enabling TIM\n", 6794 __func__, 6795 ath_hal_ether_sprintf(ni->ni_macaddr), 6796 enable); 6797 an->an_tim_set = 1; 6798 ATH_TX_UNLOCK(sc); 6799 changed = avp->av_set_tim(ni, enable); 6800 } else if (an->an_swq_depth == 0) { 6801 /* disable */ 6802 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6803 "%s: %s: enable=%d, an_swq_depth == 0, disabling\n", 6804 __func__, 6805 ath_hal_ether_sprintf(ni->ni_macaddr), 6806 enable); 6807 an->an_tim_set = 0; 6808 ATH_TX_UNLOCK(sc); 6809 changed = avp->av_set_tim(ni, enable); 6810 } else if (! an->an_is_powersave) { 6811 /* 6812 * disable regardless; the node isn't in powersave now 6813 */ 6814 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6815 "%s: %s: enable=%d, an_pwrsave=0, disabling\n", 6816 __func__, 6817 ath_hal_ether_sprintf(ni->ni_macaddr), 6818 enable); 6819 an->an_tim_set = 0; 6820 ATH_TX_UNLOCK(sc); 6821 changed = avp->av_set_tim(ni, enable); 6822 } else { 6823 /* 6824 * psq disable, node is currently in powersave, node 6825 * software queue isn't empty, so don't clear the TIM bit 6826 * for now. 6827 */ 6828 ATH_TX_UNLOCK(sc); 6829 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6830 "%s: %s: enable=%d, an_swq_depth > 0, ignoring\n", 6831 __func__, 6832 ath_hal_ether_sprintf(ni->ni_macaddr), 6833 enable); 6834 changed = 0; 6835 } 6836 6837 return (changed); 6838 #else 6839 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6840 6841 /* 6842 * Some operating modes don't set av_set_tim(), so don't 6843 * update it here. 6844 */ 6845 if (avp->av_set_tim == NULL) 6846 return (0); 6847 6848 return (avp->av_set_tim(ni, enable)); 6849 #endif /* ATH_SW_PSQ */ 6850 } 6851 6852 /* 6853 * Set or update the TIM from the software queue. 6854 * 6855 * Check the software queue depth before attempting to do lock 6856 * anything; that avoids trying to obtain the lock. Then, 6857 * re-check afterwards to ensure nothing has changed in the 6858 * meantime. 6859 * 6860 * set: This is designed to be called from the TX path, after 6861 * a frame has been queued; to see if the swq > 0. 6862 * 6863 * clear: This is designed to be called from the buffer completion point 6864 * (right now it's ath_tx_default_comp()) where the state of 6865 * a software queue has changed. 6866 * 6867 * It makes sense to place it at buffer free / completion rather 6868 * than after each software queue operation, as there's no real 6869 * point in churning the TIM bit as the last frames in the software 6870 * queue are transmitted. If they fail and we retry them, we'd 6871 * just be setting the TIM bit again anyway. 6872 */ 6873 void 6874 ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, 6875 int enable) 6876 { 6877 #ifdef ATH_SW_PSQ 6878 struct ath_node *an; 6879 struct ath_vap *avp; 6880 6881 /* Don't do this for broadcast/etc frames */ 6882 if (ni == NULL) 6883 return; 6884 6885 an = ATH_NODE(ni); 6886 avp = ATH_VAP(ni->ni_vap); 6887 6888 /* 6889 * And for operating modes without the TIM handler set, let's 6890 * just skip those. 6891 */ 6892 if (avp->av_set_tim == NULL) 6893 return; 6894 6895 ATH_TX_LOCK_ASSERT(sc); 6896 6897 if (enable) { 6898 if (an->an_is_powersave && 6899 an->an_tim_set == 0 && 6900 an->an_swq_depth != 0) { 6901 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6902 "%s: %s: swq_depth>0, tim_set=0, set!\n", 6903 __func__, 6904 ath_hal_ether_sprintf(ni->ni_macaddr)); 6905 an->an_tim_set = 1; 6906 (void) avp->av_set_tim(ni, 1); 6907 } 6908 } else { 6909 /* 6910 * Don't bother grabbing the lock unless the queue is empty. 6911 */ 6912 if (&an->an_swq_depth != 0) 6913 return; 6914 6915 if (an->an_is_powersave && 6916 an->an_stack_psq == 0 && 6917 an->an_tim_set == 1 && 6918 an->an_swq_depth == 0) { 6919 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6920 "%s: %s: swq_depth=0, tim_set=1, psq_set=0," 6921 " clear!\n", 6922 __func__, 6923 ath_hal_ether_sprintf(ni->ni_macaddr)); 6924 an->an_tim_set = 0; 6925 (void) avp->av_set_tim(ni, 0); 6926 } 6927 } 6928 #else 6929 return; 6930 #endif /* ATH_SW_PSQ */ 6931 } 6932 6933 #if 0 6934 /* 6935 * Received a ps-poll frame from net80211. 6936 * 6937 * Here we get a chance to serve out a software-queued frame ourselves 6938 * before we punt it to net80211 to transmit us one itself - either 6939 * because there's traffic in the net80211 psq, or a NULL frame to 6940 * indicate there's nothing else. 6941 */ 6942 static void 6943 ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m) 6944 { 6945 #ifdef ATH_SW_PSQ 6946 struct ath_node *an; 6947 struct ath_vap *avp; 6948 struct ieee80211com *ic = ni->ni_ic; 6949 struct ath_softc *sc = ic->ic_ifp->if_softc; 6950 int tid; 6951 6952 /* Just paranoia */ 6953 if (ni == NULL) 6954 return; 6955 6956 /* 6957 * Unassociated (temporary node) station. 6958 */ 6959 if (ni->ni_associd == 0) 6960 return; 6961 6962 /* 6963 * We do have an active node, so let's begin looking into it. 6964 */ 6965 an = ATH_NODE(ni); 6966 avp = ATH_VAP(ni->ni_vap); 6967 6968 /* 6969 * For now, we just call the original ps-poll method. 6970 * Once we're ready to flip this on: 6971 * 6972 * + Set leak to 1, as no matter what we're going to have 6973 * to send a frame; 6974 * + Check the software queue and if there's something in it, 6975 * schedule the highest TID thas has traffic from this node. 6976 * Then make sure we schedule the software scheduler to 6977 * run so it picks up said frame. 6978 * 6979 * That way whatever happens, we'll at least send _a_ frame 6980 * to the given node. 6981 * 6982 * Again, yes, it's crappy QoS if the node has multiple 6983 * TIDs worth of traffic - but let's get it working first 6984 * before we optimise it. 6985 * 6986 * Also yes, there's definitely latency here - we're not 6987 * direct dispatching to the hardware in this path (and 6988 * we're likely being called from the packet receive path, 6989 * so going back into TX may be a little hairy!) but again 6990 * I'd like to get this working first before optimising 6991 * turn-around time. 6992 */ 6993 6994 ATH_TX_LOCK(sc); 6995 6996 /* 6997 * Legacy - we're called and the node isn't asleep. 6998 * Immediately punt. 6999 */ 7000 if (! an->an_is_powersave) { 7001 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7002 "%s: %6D: not in powersave?\n", 7003 __func__, 7004 ni->ni_macaddr, 7005 ":"); 7006 ATH_TX_UNLOCK(sc); 7007 if (avp->av_recv_pspoll) 7008 avp->av_recv_pspoll(ni, m); 7009 return; 7010 } 7011 7012 /* 7013 * We're in powersave. 7014 * 7015 * Leak a frame. 7016 */ 7017 an->an_leak_count = 1; 7018 7019 /* 7020 * Now, if there's no frames in the node, just punt to 7021 * recv_pspoll. 7022 * 7023 * Don't bother checking if the TIM bit is set, we really 7024 * only care if there are any frames here! 7025 */ 7026 if (an->an_swq_depth == 0) { 7027 ATH_TX_UNLOCK(sc); 7028 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7029 "%s: %6D: SWQ empty; punting to net80211\n", 7030 __func__, 7031 ni->ni_macaddr, 7032 ":"); 7033 if (avp->av_recv_pspoll) 7034 avp->av_recv_pspoll(ni, m); 7035 return; 7036 } 7037 7038 /* 7039 * Ok, let's schedule the highest TID that has traffic 7040 * and then schedule something. 7041 */ 7042 for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) { 7043 struct ath_tid *atid = &an->an_tid[tid]; 7044 /* 7045 * No frames? Skip. 7046 */ 7047 if (atid->axq_depth == 0) 7048 continue; 7049 ath_tx_tid_sched(sc, atid); 7050 /* 7051 * XXX we could do a direct call to the TXQ 7052 * scheduler code here to optimise latency 7053 * at the expense of a REALLY deep callstack. 7054 */ 7055 ATH_TX_UNLOCK(sc); 7056 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 7057 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7058 "%s: %6D: leaking frame to TID %d\n", 7059 __func__, 7060 ni->ni_macaddr, 7061 ":", 7062 tid); 7063 return; 7064 } 7065 7066 ATH_TX_UNLOCK(sc); 7067 7068 /* 7069 * XXX nothing in the TIDs at this point? Eek. 7070 */ 7071 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7072 "%s: %6D: TIDs empty, but ath_node showed traffic?!\n", 7073 __func__, 7074 ni->ni_macaddr, 7075 ":"); 7076 if (avp->av_recv_pspoll) 7077 avp->av_recv_pspoll(ni, m); 7078 #else 7079 if (avp->av_recv_pspoll) 7080 avp->av_recv_pspoll(ni, m); 7081 #endif /* ATH_SW_PSQ */ 7082 } 7083 7084 #endif 7085 7086 MODULE_VERSION(if_ath, 1); 7087 MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 7088 #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 7089 MODULE_DEPEND(if_ath, alq, 1, 1, 1); 7090 #endif 7091