1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 32 /* 33 * Driver for the Atheros Wireless LAN controller. 34 * 35 * This software is derived from work of Atsushi Onoe; his contribution 36 * is greatly appreciated. 37 */ 38 39 #include "opt_inet.h" 40 #include "opt_ath.h" 41 /* 42 * This is needed for register operations which are performed 43 * by the driver - eg, calls to ath_hal_gettsf32(). 44 * 45 * It's also required for any AH_DEBUG checks in here, eg the 46 * module dependencies. 47 */ 48 #include "opt_ah.h" 49 #include "opt_wlan.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/sysctl.h> 54 #include <sys/mbuf.h> 55 #include <sys/malloc.h> 56 #include <sys/lock.h> 57 #include <sys/mutex.h> 58 #include <sys/kernel.h> 59 #include <sys/socket.h> 60 #include <sys/sockio.h> 61 #include <sys/errno.h> 62 #include <sys/callout.h> 63 #include <sys/bus.h> 64 #include <sys/endian.h> 65 #include <sys/kthread.h> 66 #include <sys/taskqueue.h> 67 #include <sys/priv.h> 68 #include <sys/module.h> 69 #include <sys/ktr.h> 70 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 #include <net/if_types.h> 76 #include <net/if_arp.h> 77 #include <net/ethernet.h> 78 #include <net/if_llc.h> 79 #include <net/ifq_var.h> 80 81 #include <netproto/802_11/ieee80211_var.h> 82 #include <netproto/802_11/ieee80211_regdomain.h> 83 #ifdef IEEE80211_SUPPORT_SUPERG 84 #include <netproto/802_11/ieee80211_superg.h> 85 #endif 86 #ifdef IEEE80211_SUPPORT_TDMA 87 #include <netproto/802_11/ieee80211_tdma.h> 88 #endif 89 90 #include <net/bpf.h> 91 92 #ifdef INET 93 #include <netinet/in.h> 94 #include <netinet/if_ether.h> 95 #endif 96 97 #include <dev/netif/ath/ath/if_athvar.h> 98 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ 99 #include <dev/netif/ath/ath_hal/ah_diagcodes.h> 100 101 #include <dev/netif/ath/ath/if_ath_debug.h> 102 #include <dev/netif/ath/ath/if_ath_misc.h> 103 #include <dev/netif/ath/ath/if_ath_tsf.h> 104 #include <dev/netif/ath/ath/if_ath_tx.h> 105 #include <dev/netif/ath/ath/if_ath_sysctl.h> 106 #include <dev/netif/ath/ath/if_ath_led.h> 107 #include <dev/netif/ath/ath/if_ath_keycache.h> 108 #include <dev/netif/ath/ath/if_ath_rx.h> 109 #include <dev/netif/ath/ath/if_ath_rx_edma.h> 110 #include <dev/netif/ath/ath/if_ath_tx_edma.h> 111 #include <dev/netif/ath/ath/if_ath_beacon.h> 112 #include <dev/netif/ath/ath/if_ath_btcoex.h> 113 #include <dev/netif/ath/ath/if_ath_spectral.h> 114 #include <dev/netif/ath/ath/if_ath_lna_div.h> 115 #include <dev/netif/ath/ath/if_athdfs.h> 116 117 #ifdef ATH_TX99_DIAG 118 #include <dev/netif/ath/ath_tx99/ath_tx99.h> 119 #endif 120 121 #ifdef ATH_DEBUG_ALQ 122 #include <dev/netif/ath/ath/if_ath_alq.h> 123 #endif 124 125 /* 126 * Only enable this if you're working on PS-POLL support. 127 */ 128 #define ATH_SW_PSQ 129 130 #ifdef __DragonFly__ 131 #define CURVNET_SET(name) 132 #define CURVNET_RESTORE() 133 #endif 134 135 /* 136 * ATH_BCBUF determines the number of vap's that can transmit 137 * beacons and also (currently) the number of vap's that can 138 * have unique mac addresses/bssid. When staggering beacons 139 * 4 is probably a good max as otherwise the beacons become 140 * very closely spaced and there is limited time for cab q traffic 141 * to go out. You can burst beacons instead but that is not good 142 * for stations in power save and at some point you really want 143 * another radio (and channel). 144 * 145 * The limit on the number of mac addresses is tied to our use of 146 * the U/L bit and tracking addresses in a byte; it would be 147 * worthwhile to allow more for applications like proxy sta. 148 */ 149 CTASSERT(ATH_BCBUF <= 8); 150 151 static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 152 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 153 const uint8_t [IEEE80211_ADDR_LEN], 154 const uint8_t [IEEE80211_ADDR_LEN]); 155 static void ath_vap_delete(struct ieee80211vap *); 156 static void ath_init(void *); 157 static void ath_stop_locked(struct ifnet *); 158 static void ath_stop(struct ifnet *); 159 static int ath_reset_vap(struct ieee80211vap *, u_long); 160 static int ath_transmit(struct ifnet *ifp, struct mbuf *m); 161 #if 0 162 static void ath_qflush(struct ifnet *ifp); 163 #endif 164 static int ath_media_change(struct ifnet *); 165 static void ath_watchdog(void *); 166 static void ath_start(struct ifnet *, struct ifaltq_subque *); 167 static int ath_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 168 static void ath_fatal_proc(void *, int); 169 static void ath_bmiss_vap(struct ieee80211vap *); 170 static void ath_bmiss_proc(void *, int); 171 static void ath_key_update_begin(struct ieee80211vap *); 172 static void ath_key_update_end(struct ieee80211vap *); 173 static void ath_update_mcast(struct ifnet *); 174 static void ath_update_promisc(struct ifnet *); 175 static void ath_updateslot(struct ifnet *); 176 static void ath_bstuck_proc(void *, int); 177 static void ath_reset_proc(void *, int); 178 static int ath_desc_alloc(struct ath_softc *); 179 static void ath_desc_free(struct ath_softc *); 180 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 181 const uint8_t [IEEE80211_ADDR_LEN]); 182 static void ath_node_cleanup(struct ieee80211_node *); 183 static void ath_node_free(struct ieee80211_node *); 184 static void ath_node_getsignal(const struct ieee80211_node *, 185 int8_t *, int8_t *); 186 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 187 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 188 static int ath_tx_setup(struct ath_softc *, int, int); 189 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 190 static void ath_tx_cleanup(struct ath_softc *); 191 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, 192 int dosched); 193 static void ath_tx_proc_q0(void *, int); 194 static void ath_tx_proc_q0123(void *, int); 195 static void ath_tx_proc(void *, int); 196 static void ath_txq_sched_tasklet(void *, int); 197 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 198 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 199 static void ath_scan_start(struct ieee80211com *); 200 static void ath_scan_end(struct ieee80211com *); 201 static void ath_set_channel(struct ieee80211com *); 202 #ifdef ATH_ENABLE_11N 203 static void ath_update_chw(struct ieee80211com *); 204 #endif /* ATH_ENABLE_11N */ 205 static void ath_calibrate(void *); 206 static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 207 static void ath_setup_stationkey(struct ieee80211_node *); 208 static void ath_newassoc(struct ieee80211_node *, int); 209 static int ath_setregdomain(struct ieee80211com *, 210 struct ieee80211_regdomain *, int, 211 struct ieee80211_channel []); 212 static void ath_getradiocaps(struct ieee80211com *, int, int *, 213 struct ieee80211_channel []); 214 static int ath_getchannels(struct ath_softc *); 215 216 static int ath_rate_setup(struct ath_softc *, u_int mode); 217 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 218 219 static void ath_announce(struct ath_softc *); 220 221 static void ath_dfs_tasklet(void *, int); 222 #if 0 223 static void ath_node_powersave(struct ieee80211_node *, int); 224 static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *); 225 #endif 226 static int ath_node_set_tim(struct ieee80211_node *, int); 227 228 #ifdef IEEE80211_SUPPORT_TDMA 229 #include <dev/netif/ath/ath/if_ath_tdma.h> 230 #endif 231 232 extern const char* ath_hal_ether_sprintf(const u_int8_t *mac); 233 234 SYSCTL_DECL(_hw_ath); 235 236 /* XXX validate sysctl values */ 237 static int ath_longcalinterval = 30; /* long cals every 30 secs */ 238 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 239 0, "long chip calibration interval (secs)"); 240 static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 241 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 242 0, "short chip calibration interval (msecs)"); 243 static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 244 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 245 0, "reset chip calibration results (secs)"); 246 static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 247 SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 248 0, "ANI calibration (msecs)"); 249 250 int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 251 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 252 0, "rx buffers allocated"); 253 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 254 int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 255 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 256 0, "tx buffers allocated"); 257 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 258 int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 259 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt, 260 0, "tx (mgmt) buffers allocated"); 261 TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); 262 263 int ath_bstuck_threshold = 4; /* max missed beacons */ 264 SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 265 0, "max missed beacon xmits before chip reset"); 266 267 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 268 269 void 270 ath_legacy_attach_comp_func(struct ath_softc *sc) 271 { 272 273 /* 274 * Special case certain configurations. Note the 275 * CAB queue is handled by these specially so don't 276 * include them when checking the txq setup mask. 277 */ 278 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 279 case 0x01: 280 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 281 break; 282 case 0x0f: 283 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 284 break; 285 default: 286 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 287 break; 288 } 289 } 290 291 /* 292 * Set the target power mode. 293 * 294 * If this is called during a point in time where 295 * the hardware is being programmed elsewhere, it will 296 * simply store it away and update it when all current 297 * uses of the hardware are completed. 298 */ 299 void 300 _ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line) 301 { 302 ATH_LOCK_ASSERT(sc); 303 304 sc->sc_target_powerstate = power_state; 305 306 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 307 __func__, 308 file, 309 line, 310 power_state, 311 sc->sc_powersave_refcnt); 312 313 if (sc->sc_powersave_refcnt == 0 && 314 power_state != sc->sc_cur_powerstate) { 315 sc->sc_cur_powerstate = power_state; 316 ath_hal_setpower(sc->sc_ah, power_state); 317 318 /* 319 * If the NIC is force-awake, then set the 320 * self-gen frame state appropriately. 321 * 322 * If the nic is in network sleep or full-sleep, 323 * we let the above call leave the self-gen 324 * state as "sleep". 325 */ 326 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 327 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 328 ath_hal_setselfgenpower(sc->sc_ah, 329 sc->sc_target_selfgen_state); 330 } 331 } 332 } 333 334 /* 335 * Set the current self-generated frames state. 336 * 337 * This is separate from the target power mode. The chip may be 338 * awake but the desired state is "sleep", so frames sent to the 339 * destination has PWRMGT=1 in the 802.11 header. The NIC also 340 * needs to know to set PWRMGT=1 in self-generated frames. 341 */ 342 void 343 _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line) 344 { 345 346 ATH_LOCK_ASSERT(sc); 347 348 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 349 __func__, 350 file, 351 line, 352 power_state, 353 sc->sc_target_selfgen_state); 354 355 sc->sc_target_selfgen_state = power_state; 356 357 /* 358 * If the NIC is force-awake, then set the power state. 359 * Network-state and full-sleep will already transition it to 360 * mark self-gen frames as sleeping - and we can't 361 * guarantee the NIC is awake to program the self-gen frame 362 * setting anyway. 363 */ 364 if (sc->sc_cur_powerstate == HAL_PM_AWAKE) { 365 ath_hal_setselfgenpower(sc->sc_ah, power_state); 366 } 367 } 368 369 /* 370 * Set the hardware power mode and take a reference. 371 * 372 * This doesn't update the target power mode in the driver; 373 * it just updates the hardware power state. 374 * 375 * XXX it should only ever force the hardware awake; it should 376 * never be called to set it asleep. 377 */ 378 void 379 _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line) 380 { 381 ATH_LOCK_ASSERT(sc); 382 383 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 384 __func__, 385 file, 386 line, 387 power_state, 388 sc->sc_powersave_refcnt); 389 390 sc->sc_powersave_refcnt++; 391 392 if (power_state != sc->sc_cur_powerstate) { 393 ath_hal_setpower(sc->sc_ah, power_state); 394 sc->sc_cur_powerstate = power_state; 395 396 /* 397 * Adjust the self-gen powerstate if appropriate. 398 */ 399 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 400 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 401 ath_hal_setselfgenpower(sc->sc_ah, 402 sc->sc_target_selfgen_state); 403 } 404 405 } 406 } 407 408 /* 409 * Restore the power save mode to what it once was. 410 * 411 * This will decrement the reference counter and once it hits 412 * zero, it'll restore the powersave state. 413 */ 414 void 415 _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line) 416 { 417 418 ATH_LOCK_ASSERT(sc); 419 420 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n", 421 __func__, 422 file, 423 line, 424 sc->sc_powersave_refcnt, 425 sc->sc_target_powerstate); 426 427 if (sc->sc_powersave_refcnt == 0) 428 device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__); 429 else 430 sc->sc_powersave_refcnt--; 431 432 if (sc->sc_powersave_refcnt == 0 && 433 sc->sc_target_powerstate != sc->sc_cur_powerstate) { 434 sc->sc_cur_powerstate = sc->sc_target_powerstate; 435 ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate); 436 } 437 438 /* 439 * Adjust the self-gen powerstate if appropriate. 440 */ 441 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 442 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 443 ath_hal_setselfgenpower(sc->sc_ah, 444 sc->sc_target_selfgen_state); 445 } 446 447 } 448 449 #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 450 #define HAL_MODE_HT40 \ 451 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 452 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 453 int 454 ath_attach(u_int16_t devid, struct ath_softc *sc) 455 { 456 struct ifnet *ifp; 457 struct ieee80211com *ic; 458 struct ath_hal *ah = NULL; 459 HAL_STATUS status; 460 int error = 0, i; 461 u_int wmodes; 462 uint8_t macaddr[IEEE80211_ADDR_LEN]; 463 int rx_chainmask, tx_chainmask; 464 465 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 466 467 CURVNET_SET(vnet0); 468 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 469 if (ifp == NULL) { 470 device_printf(sc->sc_dev, "can not if_alloc()\n"); 471 error = ENOSPC; 472 CURVNET_RESTORE(); 473 goto bad; 474 } 475 ic = ifp->if_l2com; 476 477 /* set these up early for if_printf use */ 478 if_initname(ifp, device_get_name(sc->sc_dev), 479 device_get_unit(sc->sc_dev)); 480 CURVNET_RESTORE(); 481 482 /* prepare sysctl tree for use in sub modules */ 483 sysctl_ctx_init(&sc->sc_sysctl_ctx); 484 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 485 SYSCTL_STATIC_CHILDREN(_hw), 486 OID_AUTO, 487 device_get_nameunit(sc->sc_dev), 488 CTLFLAG_RD, 0, ""); 489 490 491 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 492 sc->sc_eepromdata, &status); 493 if (ah == NULL) { 494 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 495 status); 496 error = ENXIO; 497 goto bad; 498 } 499 sc->sc_ah = ah; 500 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 501 #ifdef ATH_DEBUG 502 sc->sc_debug = ath_debug; 503 #endif 504 505 /* 506 * Setup the DMA/EDMA functions based on the current 507 * hardware support. 508 * 509 * This is required before the descriptors are allocated. 510 */ 511 if (ath_hal_hasedma(sc->sc_ah)) { 512 sc->sc_isedma = 1; 513 ath_recv_setup_edma(sc); 514 ath_xmit_setup_edma(sc); 515 } else { 516 ath_recv_setup_legacy(sc); 517 ath_xmit_setup_legacy(sc); 518 } 519 520 if (ath_hal_hasmybeacon(sc->sc_ah)) { 521 sc->sc_do_mybeacon = 1; 522 } 523 524 /* 525 * Check if the MAC has multi-rate retry support. 526 * We do this by trying to setup a fake extended 527 * descriptor. MAC's that don't have support will 528 * return false w/o doing anything. MAC's that do 529 * support it will return true w/o doing anything. 530 */ 531 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 532 533 /* 534 * Check if the device has hardware counters for PHY 535 * errors. If so we need to enable the MIB interrupt 536 * so we can act on stat triggers. 537 */ 538 if (ath_hal_hwphycounters(ah)) 539 sc->sc_needmib = 1; 540 541 /* 542 * Get the hardware key cache size. 543 */ 544 sc->sc_keymax = ath_hal_keycachesize(ah); 545 if (sc->sc_keymax > ATH_KEYMAX) { 546 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 547 ATH_KEYMAX, sc->sc_keymax); 548 sc->sc_keymax = ATH_KEYMAX; 549 } 550 /* 551 * Reset the key cache since some parts do not 552 * reset the contents on initial power up. 553 */ 554 for (i = 0; i < sc->sc_keymax; i++) 555 ath_hal_keyreset(ah, i); 556 557 /* 558 * Collect the default channel list. 559 */ 560 error = ath_getchannels(sc); 561 if (error != 0) 562 goto bad; 563 564 /* 565 * Setup rate tables for all potential media types. 566 */ 567 ath_rate_setup(sc, IEEE80211_MODE_11A); 568 ath_rate_setup(sc, IEEE80211_MODE_11B); 569 ath_rate_setup(sc, IEEE80211_MODE_11G); 570 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 571 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 572 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 573 ath_rate_setup(sc, IEEE80211_MODE_11NA); 574 ath_rate_setup(sc, IEEE80211_MODE_11NG); 575 ath_rate_setup(sc, IEEE80211_MODE_HALF); 576 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 577 578 /* NB: setup here so ath_rate_update is happy */ 579 ath_setcurmode(sc, IEEE80211_MODE_11A); 580 581 /* 582 * Allocate TX descriptors and populate the lists. 583 */ 584 wlan_assert_serialized(); 585 wlan_serialize_exit(); 586 error = ath_desc_alloc(sc); 587 wlan_serialize_enter(); 588 if (error != 0) { 589 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 590 error); 591 goto bad; 592 } 593 error = ath_txdma_setup(sc); 594 if (error != 0) { 595 if_printf(ifp, "failed to allocate TX descriptors: %d\n", 596 error); 597 goto bad; 598 } 599 600 /* 601 * Allocate RX descriptors and populate the lists. 602 */ 603 error = ath_rxdma_setup(sc); 604 if (error != 0) { 605 if_printf(ifp, "failed to allocate RX descriptors: %d\n", 606 error); 607 goto bad; 608 } 609 610 callout_init_mp(&sc->sc_cal_ch); 611 callout_init_mp(&sc->sc_wd_ch); 612 613 ATH_TXBUF_LOCK_INIT(sc); 614 615 sc->sc_tq = taskqueue_create("ath_taskq", M_INTWAIT, 616 taskqueue_thread_enqueue, &sc->sc_tq); 617 taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1, 618 "%s taskq", ifp->if_xname); 619 620 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 621 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 622 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 623 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 624 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); 625 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); 626 627 /* 628 * Allocate hardware transmit queues: one queue for 629 * beacon frames and one data queue for each QoS 630 * priority. Note that the hal handles resetting 631 * these queues at the needed time. 632 * 633 * XXX PS-Poll 634 */ 635 sc->sc_bhalq = ath_beaconq_setup(sc); 636 if (sc->sc_bhalq == (u_int) -1) { 637 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 638 error = EIO; 639 goto bad2; 640 } 641 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 642 if (sc->sc_cabq == NULL) { 643 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 644 error = EIO; 645 goto bad2; 646 } 647 /* NB: insure BK queue is the lowest priority h/w queue */ 648 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 649 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 650 ieee80211_wme_acnames[WME_AC_BK]); 651 error = EIO; 652 goto bad2; 653 } 654 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 655 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 656 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 657 /* 658 * Not enough hardware tx queues to properly do WME; 659 * just punt and assign them all to the same h/w queue. 660 * We could do a better job of this if, for example, 661 * we allocate queues when we switch from station to 662 * AP mode. 663 */ 664 if (sc->sc_ac2q[WME_AC_VI] != NULL) 665 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 666 if (sc->sc_ac2q[WME_AC_BE] != NULL) 667 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 668 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 669 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 670 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 671 } 672 673 /* 674 * Attach the TX completion function. 675 * 676 * The non-EDMA chips may have some special case optimisations; 677 * this method gives everyone a chance to attach cleanly. 678 */ 679 sc->sc_tx.xmit_attach_comp_func(sc); 680 681 /* 682 * Setup rate control. Some rate control modules 683 * call back to change the anntena state so expose 684 * the necessary entry points. 685 * XXX maybe belongs in struct ath_ratectrl? 686 */ 687 sc->sc_setdefantenna = ath_setdefantenna; 688 sc->sc_rc = ath_rate_attach(sc); 689 if (sc->sc_rc == NULL) { 690 error = EIO; 691 goto bad2; 692 } 693 694 /* Attach DFS module */ 695 if (! ath_dfs_attach(sc)) { 696 device_printf(sc->sc_dev, 697 "%s: unable to attach DFS\n", __func__); 698 error = EIO; 699 goto bad2; 700 } 701 702 /* Attach spectral module */ 703 if (ath_spectral_attach(sc) < 0) { 704 device_printf(sc->sc_dev, 705 "%s: unable to attach spectral\n", __func__); 706 error = EIO; 707 goto bad2; 708 } 709 710 /* Attach bluetooth coexistence module */ 711 if (ath_btcoex_attach(sc) < 0) { 712 device_printf(sc->sc_dev, 713 "%s: unable to attach bluetooth coexistence\n", __func__); 714 error = EIO; 715 goto bad2; 716 } 717 718 /* Attach LNA diversity module */ 719 if (ath_lna_div_attach(sc) < 0) { 720 device_printf(sc->sc_dev, 721 "%s: unable to attach LNA diversity\n", __func__); 722 error = EIO; 723 goto bad2; 724 } 725 726 /* Start DFS processing tasklet */ 727 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 728 729 /* Configure LED state */ 730 sc->sc_blinking = 0; 731 sc->sc_ledstate = 1; 732 sc->sc_ledon = 0; /* low true */ 733 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 734 callout_init_mp(&sc->sc_ledtimer); 735 736 /* 737 * Don't setup hardware-based blinking. 738 * 739 * Although some NICs may have this configured in the 740 * default reset register values, the user may wish 741 * to alter which pins have which function. 742 * 743 * The reference driver attaches the MAC network LED to GPIO1 and 744 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 745 * NIC has these reversed. 746 */ 747 sc->sc_hardled = (1 == 0); 748 sc->sc_led_net_pin = -1; 749 sc->sc_led_pwr_pin = -1; 750 /* 751 * Auto-enable soft led processing for IBM cards and for 752 * 5211 minipci cards. Users can also manually enable/disable 753 * support with a sysctl. 754 */ 755 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 756 ath_led_config(sc); 757 ath_hal_setledstate(ah, HAL_LED_INIT); 758 759 ifp->if_softc = sc; 760 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 761 #if 0 762 ifp->if_transmit = ath_transmit; 763 ifp->if_qflush = ath_qflush; 764 #endif 765 ifp->if_start = ath_start; 766 ifp->if_ioctl = ath_ioctl; 767 ifp->if_init = ath_init; 768 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN); 769 #if 0 770 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 771 IFQ_SET_READY(&ifp->if_snd); 772 #endif 773 774 ic->ic_ifp = ifp; 775 /* XXX not right but it's not used anywhere important */ 776 ic->ic_phytype = IEEE80211_T_OFDM; 777 ic->ic_opmode = IEEE80211_M_STA; 778 ic->ic_caps = 779 IEEE80211_C_STA /* station mode */ 780 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 781 | IEEE80211_C_HOSTAP /* hostap mode */ 782 | IEEE80211_C_MONITOR /* monitor mode */ 783 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 784 | IEEE80211_C_WDS /* 4-address traffic works */ 785 | IEEE80211_C_MBSS /* mesh point link mode */ 786 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 787 | IEEE80211_C_SHSLOT /* short slot time supported */ 788 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 789 #ifndef ATH_ENABLE_11N 790 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 791 #endif 792 | IEEE80211_C_TXFRAG /* handle tx frags */ 793 #ifdef ATH_ENABLE_DFS 794 | IEEE80211_C_DFS /* Enable radar detection */ 795 #endif 796 | IEEE80211_C_PMGT /* Station side power mgmt */ 797 | IEEE80211_C_SWSLEEP 798 ; 799 /* 800 * Query the hal to figure out h/w crypto support. 801 */ 802 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 803 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 804 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 805 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 806 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 807 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 808 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 809 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 810 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 811 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 812 /* 813 * Check if h/w does the MIC and/or whether the 814 * separate key cache entries are required to 815 * handle both tx+rx MIC keys. 816 */ 817 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 818 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 819 /* 820 * If the h/w supports storing tx+rx MIC keys 821 * in one cache slot automatically enable use. 822 */ 823 if (ath_hal_hastkipsplit(ah) || 824 !ath_hal_settkipsplit(ah, AH_FALSE)) 825 sc->sc_splitmic = 1; 826 /* 827 * If the h/w can do TKIP MIC together with WME then 828 * we use it; otherwise we force the MIC to be done 829 * in software by the net80211 layer. 830 */ 831 if (ath_hal_haswmetkipmic(ah)) 832 sc->sc_wmetkipmic = 1; 833 } 834 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 835 /* 836 * Check for multicast key search support. 837 */ 838 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 839 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 840 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 841 } 842 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 843 /* 844 * Mark key cache slots associated with global keys 845 * as in use. If we knew TKIP was not to be used we 846 * could leave the +32, +64, and +32+64 slots free. 847 */ 848 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 849 setbit(sc->sc_keymap, i); 850 setbit(sc->sc_keymap, i+64); 851 if (sc->sc_splitmic) { 852 setbit(sc->sc_keymap, i+32); 853 setbit(sc->sc_keymap, i+32+64); 854 } 855 } 856 /* 857 * TPC support can be done either with a global cap or 858 * per-packet support. The latter is not available on 859 * all parts. We're a bit pedantic here as all parts 860 * support a global cap. 861 */ 862 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 863 ic->ic_caps |= IEEE80211_C_TXPMGT; 864 865 /* 866 * Mark WME capability only if we have sufficient 867 * hardware queues to do proper priority scheduling. 868 */ 869 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 870 ic->ic_caps |= IEEE80211_C_WME; 871 /* 872 * Check for misc other capabilities. 873 */ 874 if (ath_hal_hasbursting(ah)) 875 ic->ic_caps |= IEEE80211_C_BURST; 876 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 877 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 878 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 879 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 880 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 881 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah); 882 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah); 883 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah); 884 885 if (ath_hal_hasfastframes(ah)) 886 ic->ic_caps |= IEEE80211_C_FF; 887 wmodes = ath_hal_getwirelessmodes(ah); 888 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 889 ic->ic_caps |= IEEE80211_C_TURBOP; 890 #ifdef IEEE80211_SUPPORT_TDMA 891 if (ath_hal_macversion(ah) > 0x78) { 892 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 893 ic->ic_tdma_update = ath_tdma_update; 894 } 895 #endif 896 897 /* 898 * TODO: enforce that at least this many frames are available 899 * in the txbuf list before allowing data frames (raw or 900 * otherwise) to be transmitted. 901 */ 902 sc->sc_txq_data_minfree = 10; 903 /* 904 * Leave this as default to maintain legacy behaviour. 905 * Shortening the cabq/mcastq may end up causing some 906 * undesirable behaviour. 907 */ 908 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 909 910 /* 911 * How deep can the node software TX queue get whilst it's asleep. 912 */ 913 sc->sc_txq_node_psq_maxdepth = 16; 914 915 /* 916 * Default the maximum queue depth for a given node 917 * to 1/4'th the TX buffers, or 64, whichever 918 * is larger. 919 */ 920 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4); 921 922 /* Enable CABQ by default */ 923 sc->sc_cabq_enable = 1; 924 925 /* 926 * Allow the TX and RX chainmasks to be overridden by 927 * environment variables and/or device.hints. 928 * 929 * This must be done early - before the hardware is 930 * calibrated or before the 802.11n stream calculation 931 * is done. 932 */ 933 if (resource_int_value(device_get_name(sc->sc_dev), 934 device_get_unit(sc->sc_dev), "rx_chainmask", 935 &rx_chainmask) == 0) { 936 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 937 rx_chainmask); 938 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 939 } 940 if (resource_int_value(device_get_name(sc->sc_dev), 941 device_get_unit(sc->sc_dev), "tx_chainmask", 942 &tx_chainmask) == 0) { 943 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 944 tx_chainmask); 945 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 946 } 947 948 /* 949 * Query the TX/RX chainmask configuration. 950 * 951 * This is only relevant for 11n devices. 952 */ 953 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 954 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 955 956 /* 957 * Disable MRR with protected frames by default. 958 * Only 802.11n series NICs can handle this. 959 */ 960 sc->sc_mrrprot = 0; /* XXX should be a capability */ 961 962 /* 963 * Query the enterprise mode information the HAL. 964 */ 965 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0, 966 &sc->sc_ent_cfg) == HAL_OK) 967 sc->sc_use_ent = 1; 968 969 #ifdef ATH_ENABLE_11N 970 /* 971 * Query HT capabilities 972 */ 973 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 974 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 975 uint32_t rxs, txs; 976 977 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 978 979 sc->sc_mrrprot = 1; /* XXX should be a capability */ 980 981 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 982 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 983 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 984 | IEEE80211_HTCAP_MAXAMSDU_3839 985 /* max A-MSDU length */ 986 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 987 988 /* 989 * Enable short-GI for HT20 only if the hardware 990 * advertises support. 991 * Notably, anything earlier than the AR9287 doesn't. 992 */ 993 if ((ath_hal_getcapability(ah, 994 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 995 (wmodes & HAL_MODE_HT20)) { 996 device_printf(sc->sc_dev, 997 "[HT] enabling short-GI in 20MHz mode\n"); 998 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 999 } 1000 1001 if (wmodes & HAL_MODE_HT40) 1002 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 1003 | IEEE80211_HTCAP_SHORTGI40; 1004 1005 /* 1006 * TX/RX streams need to be taken into account when 1007 * negotiating which MCS rates it'll receive and 1008 * what MCS rates are available for TX. 1009 */ 1010 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 1011 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 1012 ic->ic_txstream = txs; 1013 ic->ic_rxstream = rxs; 1014 1015 /* 1016 * Setup TX and RX STBC based on what the HAL allows and 1017 * the currently configured chainmask set. 1018 * Ie - don't enable STBC TX if only one chain is enabled. 1019 * STBC RX is fine on a single RX chain; it just won't 1020 * provide any real benefit. 1021 */ 1022 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0, 1023 NULL) == HAL_OK) { 1024 sc->sc_rx_stbc = 1; 1025 device_printf(sc->sc_dev, 1026 "[HT] 1 stream STBC receive enabled\n"); 1027 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM; 1028 } 1029 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0, 1030 NULL) == HAL_OK) { 1031 sc->sc_tx_stbc = 1; 1032 device_printf(sc->sc_dev, 1033 "[HT] 1 stream STBC transmit enabled\n"); 1034 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC; 1035 } 1036 1037 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 1038 &sc->sc_rts_aggr_limit); 1039 if (sc->sc_rts_aggr_limit != (64 * 1024)) 1040 device_printf(sc->sc_dev, 1041 "[HT] RTS aggregates limited to %d KiB\n", 1042 sc->sc_rts_aggr_limit / 1024); 1043 1044 device_printf(sc->sc_dev, 1045 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 1046 } 1047 #endif 1048 1049 /* 1050 * Initial aggregation settings. 1051 */ 1052 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH; 1053 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH; 1054 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1055 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1056 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE; 1057 sc->sc_delim_min_pad = 0; 1058 1059 /* 1060 * Check if the hardware requires PCI register serialisation. 1061 * Some of the Owl based MACs require this. 1062 */ 1063 if (ncpus > 1 && 1064 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 1065 0, NULL) == HAL_OK) { 1066 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 1067 device_printf(sc->sc_dev, 1068 "Enabling register serialisation\n"); 1069 } 1070 1071 /* 1072 * Initialise the deferred completed RX buffer list. 1073 */ 1074 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 1075 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 1076 1077 /* 1078 * Indicate we need the 802.11 header padded to a 1079 * 32-bit boundary for 4-address and QoS frames. 1080 */ 1081 ic->ic_flags |= IEEE80211_F_DATAPAD; 1082 1083 /* 1084 * Query the hal about antenna support. 1085 */ 1086 sc->sc_defant = ath_hal_getdefantenna(ah); 1087 1088 /* 1089 * Not all chips have the VEOL support we want to 1090 * use with IBSS beacons; check here for it. 1091 */ 1092 sc->sc_hasveol = ath_hal_hasveol(ah); 1093 1094 /* get mac address from hardware */ 1095 ath_hal_getmac(ah, macaddr); 1096 if (sc->sc_hasbmask) 1097 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 1098 1099 /* NB: used to size node table key mapping array */ 1100 ic->ic_max_keyix = sc->sc_keymax; 1101 /* call MI attach routine. */ 1102 ieee80211_ifattach(ic, macaddr); 1103 ic->ic_setregdomain = ath_setregdomain; 1104 ic->ic_getradiocaps = ath_getradiocaps; 1105 sc->sc_opmode = HAL_M_STA; 1106 1107 /* override default methods */ 1108 ic->ic_newassoc = ath_newassoc; 1109 ic->ic_updateslot = ath_updateslot; 1110 ic->ic_wme.wme_update = ath_wme_update; 1111 ic->ic_vap_create = ath_vap_create; 1112 ic->ic_vap_delete = ath_vap_delete; 1113 ic->ic_raw_xmit = ath_raw_xmit; 1114 ic->ic_update_mcast = ath_update_mcast; 1115 ic->ic_update_promisc = ath_update_promisc; 1116 ic->ic_node_alloc = ath_node_alloc; 1117 sc->sc_node_free = ic->ic_node_free; 1118 ic->ic_node_free = ath_node_free; 1119 sc->sc_node_cleanup = ic->ic_node_cleanup; 1120 ic->ic_node_cleanup = ath_node_cleanup; 1121 ic->ic_node_getsignal = ath_node_getsignal; 1122 ic->ic_scan_start = ath_scan_start; 1123 ic->ic_scan_end = ath_scan_end; 1124 ic->ic_set_channel = ath_set_channel; 1125 #ifdef ATH_ENABLE_11N 1126 /* 802.11n specific - but just override anyway */ 1127 sc->sc_addba_request = ic->ic_addba_request; 1128 sc->sc_addba_response = ic->ic_addba_response; 1129 sc->sc_addba_stop = ic->ic_addba_stop; 1130 sc->sc_bar_response = ic->ic_bar_response; 1131 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 1132 1133 ic->ic_addba_request = ath_addba_request; 1134 ic->ic_addba_response = ath_addba_response; 1135 ic->ic_addba_response_timeout = ath_addba_response_timeout; 1136 ic->ic_addba_stop = ath_addba_stop; 1137 ic->ic_bar_response = ath_bar_response; 1138 1139 ic->ic_update_chw = ath_update_chw; 1140 #endif /* ATH_ENABLE_11N */ 1141 1142 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 1143 /* 1144 * There's one vendor bitmap entry in the RX radiotap 1145 * header; make sure that's taken into account. 1146 */ 1147 ieee80211_radiotap_attachv(ic, 1148 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 1149 ATH_TX_RADIOTAP_PRESENT, 1150 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 1151 ATH_RX_RADIOTAP_PRESENT); 1152 #else 1153 /* 1154 * No vendor bitmap/extensions are present. 1155 */ 1156 ieee80211_radiotap_attach(ic, 1157 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 1158 ATH_TX_RADIOTAP_PRESENT, 1159 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1160 ATH_RX_RADIOTAP_PRESENT); 1161 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 1162 1163 /* 1164 * Setup the ALQ logging if required 1165 */ 1166 #ifdef ATH_DEBUG_ALQ 1167 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev)); 1168 if_ath_alq_setcfg(&sc->sc_alq, 1169 sc->sc_ah->ah_macVersion, 1170 sc->sc_ah->ah_macRev, 1171 sc->sc_ah->ah_phyRev, 1172 sc->sc_ah->ah_magic); 1173 #endif 1174 1175 /* 1176 * Setup dynamic sysctl's now that country code and 1177 * regdomain are available from the hal. 1178 */ 1179 ath_sysctlattach(sc); 1180 ath_sysctl_stats_attach(sc); 1181 ath_sysctl_hal_attach(sc); 1182 1183 if (bootverbose) 1184 ieee80211_announce(ic); 1185 ath_announce(sc); 1186 1187 /* 1188 * Put it to sleep for now. 1189 */ 1190 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 1191 1192 return 0; 1193 bad2: 1194 ath_tx_cleanup(sc); 1195 ath_desc_free(sc); 1196 ath_txdma_teardown(sc); 1197 ath_rxdma_teardown(sc); 1198 bad: 1199 if (ah) 1200 ath_hal_detach(ah); 1201 1202 /* 1203 * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE.. 1204 */ 1205 #if !defined(__DragonFly__) 1206 if (ifp != NULL && ifp->if_vnet) { 1207 CURVNET_SET(ifp->if_vnet); 1208 if_free(ifp); 1209 CURVNET_RESTORE(); 1210 } else 1211 #endif 1212 if (ifp != NULL) 1213 if_free(ifp); 1214 sc->sc_invalid = 1; 1215 return error; 1216 } 1217 1218 int 1219 ath_detach(struct ath_softc *sc) 1220 { 1221 struct ifnet *ifp = sc->sc_ifp; 1222 1223 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1224 __func__, ifp->if_flags); 1225 1226 /* 1227 * NB: the order of these is important: 1228 * o stop the chip so no more interrupts will fire 1229 * o call the 802.11 layer before detaching the hal to 1230 * insure callbacks into the driver to delete global 1231 * key cache entries can be handled 1232 * o free the taskqueue which drains any pending tasks 1233 * o reclaim the tx queue data structures after calling 1234 * the 802.11 layer as we'll get called back to reclaim 1235 * node state and potentially want to use them 1236 * o to cleanup the tx queues the hal is called, so detach 1237 * it last 1238 * Other than that, it's straightforward... 1239 */ 1240 1241 /* 1242 * XXX Wake the hardware up first. ath_stop() will still 1243 * wake it up first, but I'd rather do it here just to 1244 * ensure it's awake. 1245 */ 1246 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1247 ath_power_setpower(sc, HAL_PM_AWAKE); 1248 1249 /* 1250 * Stop things cleanly. 1251 */ 1252 ath_stop(ifp); 1253 wlan_serialize_enter(); 1254 ieee80211_ifdetach(ifp->if_l2com); 1255 wlan_serialize_exit(); 1256 taskqueue_free(sc->sc_tq); 1257 #ifdef ATH_TX99_DIAG 1258 if (sc->sc_tx99 != NULL) 1259 sc->sc_tx99->detach(sc->sc_tx99); 1260 #endif 1261 ath_rate_detach(sc->sc_rc); 1262 #ifdef ATH_DEBUG_ALQ 1263 if_ath_alq_tidyup(&sc->sc_alq); 1264 #endif 1265 ath_lna_div_detach(sc); 1266 ath_btcoex_detach(sc); 1267 ath_spectral_detach(sc); 1268 ath_dfs_detach(sc); 1269 ath_desc_free(sc); 1270 ath_txdma_teardown(sc); 1271 ath_rxdma_teardown(sc); 1272 ath_tx_cleanup(sc); 1273 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 1274 1275 CURVNET_SET(ifp->if_vnet); 1276 if_free(ifp); 1277 CURVNET_RESTORE(); 1278 1279 if (sc->sc_sysctl_tree) { 1280 sysctl_ctx_free(&sc->sc_sysctl_ctx); 1281 sc->sc_sysctl_tree = NULL; 1282 } 1283 1284 return 0; 1285 } 1286 1287 /* 1288 * MAC address handling for multiple BSS on the same radio. 1289 * The first vap uses the MAC address from the EEPROM. For 1290 * subsequent vap's we set the U/L bit (bit 1) in the MAC 1291 * address and use the next six bits as an index. 1292 */ 1293 static void 1294 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 1295 { 1296 int i; 1297 1298 if (clone && sc->sc_hasbmask) { 1299 /* NB: we only do this if h/w supports multiple bssid */ 1300 for (i = 0; i < 8; i++) 1301 if ((sc->sc_bssidmask & (1<<i)) == 0) 1302 break; 1303 if (i != 0) 1304 mac[0] |= (i << 2)|0x2; 1305 } else 1306 i = 0; 1307 sc->sc_bssidmask |= 1<<i; 1308 sc->sc_hwbssidmask[0] &= ~mac[0]; 1309 if (i == 0) 1310 sc->sc_nbssid0++; 1311 } 1312 1313 static void 1314 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 1315 { 1316 int i = mac[0] >> 2; 1317 uint8_t mask; 1318 1319 if (i != 0 || --sc->sc_nbssid0 == 0) { 1320 sc->sc_bssidmask &= ~(1<<i); 1321 /* recalculate bssid mask from remaining addresses */ 1322 mask = 0xff; 1323 for (i = 1; i < 8; i++) 1324 if (sc->sc_bssidmask & (1<<i)) 1325 mask &= ~((i<<2)|0x2); 1326 sc->sc_hwbssidmask[0] |= mask; 1327 } 1328 } 1329 1330 /* 1331 * Assign a beacon xmit slot. We try to space out 1332 * assignments so when beacons are staggered the 1333 * traffic coming out of the cab q has maximal time 1334 * to go out before the next beacon is scheduled. 1335 */ 1336 static int 1337 assign_bslot(struct ath_softc *sc) 1338 { 1339 u_int slot, free; 1340 1341 free = 0; 1342 for (slot = 0; slot < ATH_BCBUF; slot++) 1343 if (sc->sc_bslot[slot] == NULL) { 1344 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 1345 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 1346 return slot; 1347 free = slot; 1348 /* NB: keep looking for a double slot */ 1349 } 1350 return free; 1351 } 1352 1353 static struct ieee80211vap * 1354 ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1355 enum ieee80211_opmode opmode, int flags, 1356 const uint8_t bssid[IEEE80211_ADDR_LEN], 1357 const uint8_t mac0[IEEE80211_ADDR_LEN]) 1358 { 1359 struct ath_softc *sc = ic->ic_ifp->if_softc; 1360 struct ath_vap *avp; 1361 struct ieee80211vap *vap; 1362 uint8_t mac[IEEE80211_ADDR_LEN]; 1363 int needbeacon, error; 1364 enum ieee80211_opmode ic_opmode; 1365 1366 avp = (struct ath_vap *) kmalloc(sizeof(struct ath_vap), 1367 M_80211_VAP, M_WAITOK | M_ZERO); 1368 needbeacon = 0; 1369 IEEE80211_ADDR_COPY(mac, mac0); 1370 1371 ATH_LOCK(sc); 1372 ic_opmode = opmode; /* default to opmode of new vap */ 1373 switch (opmode) { 1374 case IEEE80211_M_STA: 1375 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1376 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1377 goto bad; 1378 } 1379 if (sc->sc_nvaps) { 1380 /* 1381 * With multiple vaps we must fall back 1382 * to s/w beacon miss handling. 1383 */ 1384 flags |= IEEE80211_CLONE_NOBEACONS; 1385 } 1386 if (flags & IEEE80211_CLONE_NOBEACONS) { 1387 /* 1388 * Station mode w/o beacons are implemented w/ AP mode. 1389 */ 1390 ic_opmode = IEEE80211_M_HOSTAP; 1391 } 1392 break; 1393 case IEEE80211_M_IBSS: 1394 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1395 device_printf(sc->sc_dev, 1396 "only 1 ibss vap supported\n"); 1397 goto bad; 1398 } 1399 needbeacon = 1; 1400 break; 1401 case IEEE80211_M_AHDEMO: 1402 #ifdef IEEE80211_SUPPORT_TDMA 1403 if (flags & IEEE80211_CLONE_TDMA) { 1404 if (sc->sc_nvaps != 0) { 1405 device_printf(sc->sc_dev, 1406 "only 1 tdma vap supported\n"); 1407 goto bad; 1408 } 1409 needbeacon = 1; 1410 flags |= IEEE80211_CLONE_NOBEACONS; 1411 } 1412 /* fall thru... */ 1413 #endif 1414 case IEEE80211_M_MONITOR: 1415 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1416 /* 1417 * Adopt existing mode. Adding a monitor or ahdemo 1418 * vap to an existing configuration is of dubious 1419 * value but should be ok. 1420 */ 1421 /* XXX not right for monitor mode */ 1422 ic_opmode = ic->ic_opmode; 1423 } 1424 break; 1425 case IEEE80211_M_HOSTAP: 1426 case IEEE80211_M_MBSS: 1427 needbeacon = 1; 1428 break; 1429 case IEEE80211_M_WDS: 1430 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1431 device_printf(sc->sc_dev, 1432 "wds not supported in sta mode\n"); 1433 goto bad; 1434 } 1435 /* 1436 * Silently remove any request for a unique 1437 * bssid; WDS vap's always share the local 1438 * mac address. 1439 */ 1440 flags &= ~IEEE80211_CLONE_BSSID; 1441 if (sc->sc_nvaps == 0) 1442 ic_opmode = IEEE80211_M_HOSTAP; 1443 else 1444 ic_opmode = ic->ic_opmode; 1445 break; 1446 default: 1447 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1448 goto bad; 1449 } 1450 /* 1451 * Check that a beacon buffer is available; the code below assumes it. 1452 */ 1453 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1454 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1455 goto bad; 1456 } 1457 1458 /* STA, AHDEMO? */ 1459 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1460 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1461 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1462 } 1463 1464 vap = &avp->av_vap; 1465 /* XXX can't hold mutex across if_alloc */ 1466 ATH_UNLOCK(sc); 1467 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1468 bssid, mac); 1469 ATH_LOCK(sc); 1470 if (error != 0) { 1471 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1472 __func__, error); 1473 goto bad2; 1474 } 1475 1476 /* h/w crypto support */ 1477 vap->iv_key_alloc = ath_key_alloc; 1478 vap->iv_key_delete = ath_key_delete; 1479 vap->iv_key_set = ath_key_set; 1480 vap->iv_key_update_begin = ath_key_update_begin; 1481 vap->iv_key_update_end = ath_key_update_end; 1482 1483 /* override various methods */ 1484 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1485 vap->iv_recv_mgmt = ath_recv_mgmt; 1486 vap->iv_reset = ath_reset_vap; 1487 vap->iv_update_beacon = ath_beacon_update; 1488 avp->av_newstate = vap->iv_newstate; 1489 vap->iv_newstate = ath_newstate; 1490 avp->av_bmiss = vap->iv_bmiss; 1491 vap->iv_bmiss = ath_bmiss_vap; 1492 1493 #if 0 1494 avp->av_node_ps = vap->iv_node_ps; 1495 vap->iv_node_ps = ath_node_powersave; 1496 #endif 1497 1498 avp->av_set_tim = vap->iv_set_tim; 1499 vap->iv_set_tim = ath_node_set_tim; 1500 1501 #if 0 1502 avp->av_recv_pspoll = vap->iv_recv_pspoll; 1503 vap->iv_recv_pspoll = ath_node_recv_pspoll; 1504 #endif 1505 1506 /* Set default parameters */ 1507 1508 /* 1509 * Anything earlier than some AR9300 series MACs don't 1510 * support a smaller MPDU density. 1511 */ 1512 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1513 /* 1514 * All NICs can handle the maximum size, however 1515 * AR5416 based MACs can only TX aggregates w/ RTS 1516 * protection when the total aggregate size is <= 8k. 1517 * However, for now that's enforced by the TX path. 1518 */ 1519 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1520 1521 avp->av_bslot = -1; 1522 if (needbeacon) { 1523 /* 1524 * Allocate beacon state and setup the q for buffered 1525 * multicast frames. We know a beacon buffer is 1526 * available because we checked above. 1527 */ 1528 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1529 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1530 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1531 /* 1532 * Assign the vap to a beacon xmit slot. As above 1533 * this cannot fail to find a free one. 1534 */ 1535 avp->av_bslot = assign_bslot(sc); 1536 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1537 ("beacon slot %u not empty", avp->av_bslot)); 1538 sc->sc_bslot[avp->av_bslot] = vap; 1539 sc->sc_nbcnvaps++; 1540 } 1541 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1542 /* 1543 * Multple vaps are to transmit beacons and we 1544 * have h/w support for TSF adjusting; enable 1545 * use of staggered beacons. 1546 */ 1547 sc->sc_stagbeacons = 1; 1548 } 1549 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1550 } 1551 1552 ic->ic_opmode = ic_opmode; 1553 if (opmode != IEEE80211_M_WDS) { 1554 sc->sc_nvaps++; 1555 if (opmode == IEEE80211_M_STA) 1556 sc->sc_nstavaps++; 1557 if (opmode == IEEE80211_M_MBSS) 1558 sc->sc_nmeshvaps++; 1559 } 1560 switch (ic_opmode) { 1561 case IEEE80211_M_IBSS: 1562 sc->sc_opmode = HAL_M_IBSS; 1563 break; 1564 case IEEE80211_M_STA: 1565 sc->sc_opmode = HAL_M_STA; 1566 break; 1567 case IEEE80211_M_AHDEMO: 1568 #ifdef IEEE80211_SUPPORT_TDMA 1569 if (vap->iv_caps & IEEE80211_C_TDMA) { 1570 sc->sc_tdma = 1; 1571 /* NB: disable tsf adjust */ 1572 sc->sc_stagbeacons = 0; 1573 } 1574 /* 1575 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1576 * just ap mode. 1577 */ 1578 /* fall thru... */ 1579 #endif 1580 case IEEE80211_M_HOSTAP: 1581 case IEEE80211_M_MBSS: 1582 sc->sc_opmode = HAL_M_HOSTAP; 1583 break; 1584 case IEEE80211_M_MONITOR: 1585 sc->sc_opmode = HAL_M_MONITOR; 1586 break; 1587 default: 1588 /* XXX should not happen */ 1589 break; 1590 } 1591 if (sc->sc_hastsfadd) { 1592 /* 1593 * Configure whether or not TSF adjust should be done. 1594 */ 1595 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1596 } 1597 if (flags & IEEE80211_CLONE_NOBEACONS) { 1598 /* 1599 * Enable s/w beacon miss handling. 1600 */ 1601 sc->sc_swbmiss = 1; 1602 } 1603 ATH_UNLOCK(sc); 1604 1605 /* complete setup */ 1606 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1607 return vap; 1608 bad2: 1609 reclaim_address(sc, mac); 1610 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1611 bad: 1612 kfree(avp, M_80211_VAP); 1613 ATH_UNLOCK(sc); 1614 return NULL; 1615 } 1616 1617 static void 1618 ath_vap_delete(struct ieee80211vap *vap) 1619 { 1620 struct ieee80211com *ic = vap->iv_ic; 1621 struct ifnet *ifp = ic->ic_ifp; 1622 struct ath_softc *sc = ifp->if_softc; 1623 struct ath_hal *ah = sc->sc_ah; 1624 struct ath_vap *avp = ATH_VAP(vap); 1625 1626 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1627 1628 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1629 if (ifp->if_flags & IFF_RUNNING) { 1630 /* 1631 * Quiesce the hardware while we remove the vap. In 1632 * particular we need to reclaim all references to 1633 * the vap state by any frames pending on the tx queues. 1634 */ 1635 ath_hal_intrset(ah, 0); /* disable interrupts */ 1636 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1637 /* XXX Do all frames from all vaps/nodes need draining here? */ 1638 ath_stoprecv(sc, 1); /* stop recv side */ 1639 } 1640 1641 /* .. leave the hardware awake for now. */ 1642 1643 ieee80211_vap_detach(vap); 1644 1645 /* 1646 * XXX Danger Will Robinson! Danger! 1647 * 1648 * Because ieee80211_vap_detach() can queue a frame (the station 1649 * diassociate message?) after we've drained the TXQ and 1650 * flushed the software TXQ, we will end up with a frame queued 1651 * to a node whose vap is about to be freed. 1652 * 1653 * To work around this, flush the hardware/software again. 1654 * This may be racy - the ath task may be running and the packet 1655 * may be being scheduled between sw->hw txq. Tsk. 1656 * 1657 * TODO: figure out why a new node gets allocated somewhere around 1658 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1659 * call!) 1660 */ 1661 1662 ath_draintxq(sc, ATH_RESET_DEFAULT); 1663 1664 ATH_LOCK(sc); 1665 /* 1666 * Reclaim beacon state. Note this must be done before 1667 * the vap instance is reclaimed as we may have a reference 1668 * to it in the buffer for the beacon frame. 1669 */ 1670 if (avp->av_bcbuf != NULL) { 1671 if (avp->av_bslot != -1) { 1672 sc->sc_bslot[avp->av_bslot] = NULL; 1673 sc->sc_nbcnvaps--; 1674 } 1675 ath_beacon_return(sc, avp->av_bcbuf); 1676 avp->av_bcbuf = NULL; 1677 if (sc->sc_nbcnvaps == 0) { 1678 sc->sc_stagbeacons = 0; 1679 if (sc->sc_hastsfadd) 1680 ath_hal_settsfadjust(sc->sc_ah, 0); 1681 } 1682 /* 1683 * Reclaim any pending mcast frames for the vap. 1684 */ 1685 ath_tx_draintxq(sc, &avp->av_mcastq); 1686 } 1687 /* 1688 * Update bookkeeping. 1689 */ 1690 if (vap->iv_opmode == IEEE80211_M_STA) { 1691 sc->sc_nstavaps--; 1692 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1693 sc->sc_swbmiss = 0; 1694 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1695 vap->iv_opmode == IEEE80211_M_MBSS) { 1696 reclaim_address(sc, vap->iv_myaddr); 1697 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1698 if (vap->iv_opmode == IEEE80211_M_MBSS) 1699 sc->sc_nmeshvaps--; 1700 } 1701 if (vap->iv_opmode != IEEE80211_M_WDS) 1702 sc->sc_nvaps--; 1703 #ifdef IEEE80211_SUPPORT_TDMA 1704 /* TDMA operation ceases when the last vap is destroyed */ 1705 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1706 sc->sc_tdma = 0; 1707 sc->sc_swbmiss = 0; 1708 } 1709 #endif 1710 kfree(avp, M_80211_VAP); 1711 1712 if (ifp->if_flags & IFF_RUNNING) { 1713 /* 1714 * Restart rx+tx machines if still running (RUNNING will 1715 * be reset if we just destroyed the last vap). 1716 */ 1717 if (ath_startrecv(sc) != 0) 1718 if_printf(ifp, "%s: unable to restart recv logic\n", 1719 __func__); 1720 if (sc->sc_beacons) { /* restart beacons */ 1721 #ifdef IEEE80211_SUPPORT_TDMA 1722 if (sc->sc_tdma) 1723 ath_tdma_config(sc, NULL); 1724 else 1725 #endif 1726 ath_beacon_config(sc, NULL); 1727 } 1728 ath_hal_intrset(ah, sc->sc_imask); 1729 } 1730 1731 /* Ok, let the hardware asleep. */ 1732 ath_power_restore_power_state(sc); 1733 ATH_UNLOCK(sc); 1734 } 1735 1736 void 1737 ath_suspend(struct ath_softc *sc) 1738 { 1739 struct ifnet *ifp = sc->sc_ifp; 1740 struct ieee80211com *ic = ifp->if_l2com; 1741 1742 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1743 __func__, ifp->if_flags); 1744 1745 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1746 1747 ieee80211_suspend_all(ic); 1748 /* 1749 * NB: don't worry about putting the chip in low power 1750 * mode; pci will power off our socket on suspend and 1751 * CardBus detaches the device. 1752 */ 1753 1754 /* 1755 * XXX ensure none of the taskqueues are running 1756 * XXX ensure sc_invalid is 1 1757 * XXX ensure the calibration callout is disabled 1758 */ 1759 1760 /* Disable the PCIe PHY, complete with workarounds */ 1761 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1762 } 1763 1764 /* 1765 * Reset the key cache since some parts do not reset the 1766 * contents on resume. First we clear all entries, then 1767 * re-load keys that the 802.11 layer assumes are setup 1768 * in h/w. 1769 */ 1770 static void 1771 ath_reset_keycache(struct ath_softc *sc) 1772 { 1773 struct ifnet *ifp = sc->sc_ifp; 1774 struct ieee80211com *ic = ifp->if_l2com; 1775 struct ath_hal *ah = sc->sc_ah; 1776 int i; 1777 1778 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1779 for (i = 0; i < sc->sc_keymax; i++) 1780 ath_hal_keyreset(ah, i); 1781 ath_power_restore_power_state(sc); 1782 ieee80211_crypto_reload_keys(ic); 1783 } 1784 1785 /* 1786 * Fetch the current chainmask configuration based on the current 1787 * operating channel and options. 1788 */ 1789 static void 1790 ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan) 1791 { 1792 1793 /* 1794 * Set TX chainmask to the currently configured chainmask; 1795 * the TX chainmask depends upon the current operating mode. 1796 */ 1797 sc->sc_cur_rxchainmask = sc->sc_rxchainmask; 1798 if (IEEE80211_IS_CHAN_HT(chan)) { 1799 sc->sc_cur_txchainmask = sc->sc_txchainmask; 1800 } else { 1801 sc->sc_cur_txchainmask = 1; 1802 } 1803 1804 DPRINTF(sc, ATH_DEBUG_RESET, 1805 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n", 1806 __func__, 1807 sc->sc_cur_txchainmask, 1808 sc->sc_cur_rxchainmask); 1809 } 1810 1811 void 1812 ath_resume(struct ath_softc *sc) 1813 { 1814 struct ifnet *ifp = sc->sc_ifp; 1815 struct ieee80211com *ic = ifp->if_l2com; 1816 struct ath_hal *ah = sc->sc_ah; 1817 HAL_STATUS status; 1818 1819 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1820 __func__, ifp->if_flags); 1821 1822 /* Re-enable PCIe, re-enable the PCIe bus */ 1823 ath_hal_enablepcie(ah, 0, 0); 1824 1825 /* 1826 * Must reset the chip before we reload the 1827 * keycache as we were powered down on suspend. 1828 */ 1829 ath_update_chainmasks(sc, 1830 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan); 1831 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 1832 sc->sc_cur_rxchainmask); 1833 1834 /* Ensure we set the current power state to on */ 1835 ath_power_setselfgen(sc, HAL_PM_AWAKE); 1836 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1837 ath_power_setpower(sc, HAL_PM_AWAKE); 1838 1839 ath_hal_reset(ah, sc->sc_opmode, 1840 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1841 AH_FALSE, &status); 1842 ath_reset_keycache(sc); 1843 1844 /* Let DFS at it in case it's a DFS channel */ 1845 ath_dfs_radar_enable(sc, ic->ic_curchan); 1846 1847 /* Let spectral at in case spectral is enabled */ 1848 ath_spectral_enable(sc, ic->ic_curchan); 1849 1850 /* 1851 * Let bluetooth coexistence at in case it's needed for this channel 1852 */ 1853 ath_btcoex_enable(sc, ic->ic_curchan); 1854 1855 /* 1856 * If we're doing TDMA, enforce the TXOP limitation for chips that 1857 * support it. 1858 */ 1859 if (sc->sc_hasenforcetxop && sc->sc_tdma) 1860 ath_hal_setenforcetxop(sc->sc_ah, 1); 1861 else 1862 ath_hal_setenforcetxop(sc->sc_ah, 0); 1863 1864 /* Restore the LED configuration */ 1865 ath_led_config(sc); 1866 ath_hal_setledstate(ah, HAL_LED_INIT); 1867 1868 if (sc->sc_resume_up) 1869 ieee80211_resume_all(ic); 1870 1871 ath_power_restore_power_state(sc); 1872 1873 /* XXX beacons ? */ 1874 } 1875 1876 void 1877 ath_shutdown(struct ath_softc *sc) 1878 { 1879 struct ifnet *ifp = sc->sc_ifp; 1880 1881 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1882 __func__, ifp->if_flags); 1883 1884 ath_stop(ifp); 1885 /* NB: no point powering down chip as we're about to reboot */ 1886 } 1887 1888 /* 1889 * Interrupt handler. Most of the actual processing is deferred. 1890 */ 1891 void 1892 ath_intr(void *arg) 1893 { 1894 struct ath_softc *sc = arg; 1895 struct ifnet *ifp = sc->sc_ifp; 1896 struct ath_hal *ah = sc->sc_ah; 1897 HAL_INT status = 0; 1898 uint32_t txqs; 1899 1900 /* 1901 * If we're inside a reset path, just print a warning and 1902 * clear the ISR. The reset routine will finish it for us. 1903 */ 1904 ATH_PCU_LOCK(sc); 1905 if (sc->sc_inreset_cnt) { 1906 HAL_INT status; 1907 ath_hal_getisr(ah, &status); /* clear ISR */ 1908 ath_hal_intrset(ah, 0); /* disable further intr's */ 1909 DPRINTF(sc, ATH_DEBUG_ANY, 1910 "%s: in reset, ignoring: status=0x%x\n", 1911 __func__, status); 1912 ATH_PCU_UNLOCK(sc); 1913 return; 1914 } 1915 1916 if (sc->sc_invalid) { 1917 /* 1918 * The hardware is not ready/present, don't touch anything. 1919 * Note this can happen early on if the IRQ is shared. 1920 */ 1921 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1922 ATH_PCU_UNLOCK(sc); 1923 return; 1924 } 1925 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1926 ATH_PCU_UNLOCK(sc); 1927 return; 1928 } 1929 1930 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1931 1932 if ((ifp->if_flags & IFF_UP) == 0 || 1933 (ifp->if_flags & IFF_RUNNING) == 0) { 1934 HAL_INT status; 1935 1936 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1937 __func__, ifp->if_flags); 1938 ath_hal_getisr(ah, &status); /* clear ISR */ 1939 ath_hal_intrset(ah, 0); /* disable further intr's */ 1940 ATH_PCU_UNLOCK(sc); 1941 ath_power_restore_power_state(sc); 1942 return; 1943 } 1944 1945 /* 1946 * Figure out the reason(s) for the interrupt. Note 1947 * that the hal returns a pseudo-ISR that may include 1948 * bits we haven't explicitly enabled so we mask the 1949 * value to insure we only process bits we requested. 1950 */ 1951 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1952 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1953 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); 1954 #ifdef ATH_DEBUG_ALQ 1955 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate, 1956 ah->ah_syncstate); 1957 #endif /* ATH_DEBUG_ALQ */ 1958 #ifdef ATH_KTR_INTR_DEBUG 1959 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, 1960 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1961 ah->ah_intrstate[0], 1962 ah->ah_intrstate[1], 1963 ah->ah_intrstate[2], 1964 ah->ah_intrstate[3], 1965 ah->ah_intrstate[6]); 1966 #endif 1967 1968 /* Squirrel away SYNC interrupt debugging */ 1969 if (ah->ah_syncstate != 0) { 1970 int i; 1971 for (i = 0; i < 32; i++) 1972 if (ah->ah_syncstate & (i << i)) 1973 sc->sc_intr_stats.sync_intr[i]++; 1974 } 1975 1976 status &= sc->sc_imask; /* discard unasked for bits */ 1977 1978 /* Short-circuit un-handled interrupts */ 1979 if (status == 0x0) { 1980 ATH_PCU_UNLOCK(sc); 1981 ath_power_restore_power_state(sc); 1982 return; 1983 } 1984 1985 /* 1986 * Take a note that we're inside the interrupt handler, so 1987 * the reset routines know to wait. 1988 */ 1989 sc->sc_intr_cnt++; 1990 ATH_PCU_UNLOCK(sc); 1991 1992 /* 1993 * Handle the interrupt. We won't run concurrent with the reset 1994 * or channel change routines as they'll wait for sc_intr_cnt 1995 * to be 0 before continuing. 1996 */ 1997 if (status & HAL_INT_FATAL) { 1998 sc->sc_stats.ast_hardware++; 1999 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 2000 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 2001 } else { 2002 if (status & HAL_INT_SWBA) { 2003 /* 2004 * Software beacon alert--time to send a beacon. 2005 * Handle beacon transmission directly; deferring 2006 * this is too slow to meet timing constraints 2007 * under load. 2008 */ 2009 #ifdef IEEE80211_SUPPORT_TDMA 2010 if (sc->sc_tdma) { 2011 if (sc->sc_tdmaswba == 0) { 2012 struct ieee80211com *ic = ifp->if_l2com; 2013 struct ieee80211vap *vap = 2014 TAILQ_FIRST(&ic->ic_vaps); 2015 ath_tdma_beacon_send(sc, vap); 2016 sc->sc_tdmaswba = 2017 vap->iv_tdma->tdma_bintval; 2018 } else 2019 sc->sc_tdmaswba--; 2020 } else 2021 #endif 2022 { 2023 ath_beacon_proc(sc, 0); 2024 #ifdef IEEE80211_SUPPORT_SUPERG 2025 /* 2026 * Schedule the rx taskq in case there's no 2027 * traffic so any frames held on the staging 2028 * queue are aged and potentially flushed. 2029 */ 2030 sc->sc_rx.recv_sched(sc, 1); 2031 #endif 2032 } 2033 } 2034 if (status & HAL_INT_RXEOL) { 2035 int imask; 2036 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); 2037 ATH_PCU_LOCK(sc); 2038 /* 2039 * NB: the hardware should re-read the link when 2040 * RXE bit is written, but it doesn't work at 2041 * least on older hardware revs. 2042 */ 2043 sc->sc_stats.ast_rxeol++; 2044 /* 2045 * Disable RXEOL/RXORN - prevent an interrupt 2046 * storm until the PCU logic can be reset. 2047 * In case the interface is reset some other 2048 * way before "sc_kickpcu" is called, don't 2049 * modify sc_imask - that way if it is reset 2050 * by a call to ath_reset() somehow, the 2051 * interrupt mask will be correctly reprogrammed. 2052 */ 2053 imask = sc->sc_imask; 2054 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 2055 ath_hal_intrset(ah, imask); 2056 /* 2057 * Only blank sc_rxlink if we've not yet kicked 2058 * the PCU. 2059 * 2060 * This isn't entirely correct - the correct solution 2061 * would be to have a PCU lock and engage that for 2062 * the duration of the PCU fiddling; which would include 2063 * running the RX process. Otherwise we could end up 2064 * messing up the RX descriptor chain and making the 2065 * RX desc list much shorter. 2066 */ 2067 if (! sc->sc_kickpcu) 2068 sc->sc_rxlink = NULL; 2069 sc->sc_kickpcu = 1; 2070 ATH_PCU_UNLOCK(sc); 2071 /* 2072 * Enqueue an RX proc, to handled whatever 2073 * is in the RX queue. 2074 * This will then kick the PCU. 2075 */ 2076 sc->sc_rx.recv_sched(sc, 1); 2077 } 2078 if (status & HAL_INT_TXURN) { 2079 sc->sc_stats.ast_txurn++; 2080 /* bump tx trigger level */ 2081 ath_hal_updatetxtriglevel(ah, AH_TRUE); 2082 } 2083 /* 2084 * Handle both the legacy and RX EDMA interrupt bits. 2085 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 2086 */ 2087 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 2088 sc->sc_stats.ast_rx_intr++; 2089 sc->sc_rx.recv_sched(sc, 1); 2090 } 2091 if (status & HAL_INT_TX) { 2092 sc->sc_stats.ast_tx_intr++; 2093 /* 2094 * Grab all the currently set bits in the HAL txq bitmap 2095 * and blank them. This is the only place we should be 2096 * doing this. 2097 */ 2098 if (! sc->sc_isedma) { 2099 ATH_PCU_LOCK(sc); 2100 txqs = 0xffffffff; 2101 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 2102 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, 2103 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", 2104 txqs, 2105 sc->sc_txq_active, 2106 sc->sc_txq_active | txqs); 2107 sc->sc_txq_active |= txqs; 2108 ATH_PCU_UNLOCK(sc); 2109 } 2110 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 2111 } 2112 if (status & HAL_INT_BMISS) { 2113 sc->sc_stats.ast_bmiss++; 2114 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 2115 } 2116 if (status & HAL_INT_GTT) 2117 sc->sc_stats.ast_tx_timeout++; 2118 if (status & HAL_INT_CST) 2119 sc->sc_stats.ast_tx_cst++; 2120 if (status & HAL_INT_MIB) { 2121 sc->sc_stats.ast_mib++; 2122 ATH_PCU_LOCK(sc); 2123 /* 2124 * Disable interrupts until we service the MIB 2125 * interrupt; otherwise it will continue to fire. 2126 */ 2127 ath_hal_intrset(ah, 0); 2128 /* 2129 * Let the hal handle the event. We assume it will 2130 * clear whatever condition caused the interrupt. 2131 */ 2132 ath_hal_mibevent(ah, &sc->sc_halstats); 2133 /* 2134 * Don't reset the interrupt if we've just 2135 * kicked the PCU, or we may get a nested 2136 * RXEOL before the rxproc has had a chance 2137 * to run. 2138 */ 2139 if (sc->sc_kickpcu == 0) 2140 ath_hal_intrset(ah, sc->sc_imask); 2141 ATH_PCU_UNLOCK(sc); 2142 } 2143 if (status & HAL_INT_RXORN) { 2144 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 2145 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); 2146 sc->sc_stats.ast_rxorn++; 2147 } 2148 if (status & HAL_INT_TSFOOR) { 2149 device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__); 2150 sc->sc_syncbeacon = 1; 2151 } 2152 } 2153 ATH_PCU_LOCK(sc); 2154 sc->sc_intr_cnt--; 2155 ATH_PCU_UNLOCK(sc); 2156 2157 ath_power_restore_power_state(sc); 2158 } 2159 2160 static void 2161 ath_fatal_proc(void *arg, int pending) 2162 { 2163 struct ath_softc *sc = arg; 2164 struct ifnet *ifp = sc->sc_ifp; 2165 u_int32_t *state; 2166 u_int32_t len; 2167 void *sp; 2168 2169 if_printf(ifp, "hardware error; resetting\n"); 2170 /* 2171 * Fatal errors are unrecoverable. Typically these 2172 * are caused by DMA errors. Collect h/w state from 2173 * the hal so we can diagnose what's going on. 2174 */ 2175 wlan_serialize_enter(); 2176 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 2177 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 2178 state = sp; 2179 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 2180 state[0], state[1] , state[2], state[3], 2181 state[4], state[5]); 2182 } 2183 ath_reset(ifp, ATH_RESET_NOLOSS); 2184 wlan_serialize_exit(); 2185 } 2186 2187 static void 2188 ath_bmiss_vap(struct ieee80211vap *vap) 2189 { 2190 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 2191 2192 /* 2193 * Workaround phantom bmiss interrupts by sanity-checking 2194 * the time of our last rx'd frame. If it is within the 2195 * beacon miss interval then ignore the interrupt. If it's 2196 * truly a bmiss we'll get another interrupt soon and that'll 2197 * be dispatched up for processing. Note this applies only 2198 * for h/w beacon miss events. 2199 */ 2200 2201 /* 2202 * XXX TODO: Just read the TSF during the interrupt path; 2203 * that way we don't have to wake up again just to read it 2204 * again. 2205 */ 2206 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2207 2208 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 2209 u_int64_t lastrx = sc->sc_lastrx; 2210 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 2211 /* XXX should take a locked ref to iv_bss */ 2212 u_int bmisstimeout = 2213 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 2214 2215 DPRINTF(sc, ATH_DEBUG_BEACON, 2216 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 2217 __func__, (unsigned long long) tsf, 2218 (unsigned long long)(tsf - lastrx), 2219 (unsigned long long) lastrx, bmisstimeout); 2220 2221 if (tsf - lastrx <= bmisstimeout) { 2222 sc->sc_stats.ast_bmiss_phantom++; 2223 ath_power_restore_power_state(sc); 2224 return; 2225 } 2226 } 2227 2228 /* 2229 * There's no need to keep the hardware awake during the call 2230 * to av_bmiss(). 2231 */ 2232 ath_power_restore_power_state(sc); 2233 2234 /* 2235 * Attempt to force a beacon resync. 2236 */ 2237 sc->sc_syncbeacon = 1; 2238 2239 ATH_VAP(vap)->av_bmiss(vap); 2240 } 2241 2242 /* XXX this needs a force wakeup! */ 2243 int 2244 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 2245 { 2246 uint32_t rsize; 2247 void *sp; 2248 2249 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 2250 return 0; 2251 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 2252 *hangs = *(uint32_t *)sp; 2253 return 1; 2254 } 2255 2256 static void 2257 ath_bmiss_proc(void *arg, int pending) 2258 { 2259 struct ath_softc *sc = arg; 2260 struct ifnet *ifp = sc->sc_ifp; 2261 uint32_t hangs; 2262 2263 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 2264 2265 /* 2266 * Do a reset upon any becaon miss event. 2267 * 2268 * It may be a non-recognised RX clear hang which needs a reset 2269 * to clear. 2270 */ 2271 wlan_serialize_enter(); 2272 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2273 ath_beacon_miss(sc); 2274 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 2275 ath_reset(ifp, ATH_RESET_NOLOSS); 2276 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 2277 } else { 2278 ath_reset(ifp, ATH_RESET_NOLOSS); 2279 ieee80211_beacon_miss(ifp->if_l2com); 2280 } 2281 2282 /* Force a beacon resync, in case they've drifted */ 2283 sc->sc_syncbeacon = 1; 2284 ath_power_restore_power_state(sc); 2285 2286 wlan_serialize_exit(); 2287 } 2288 2289 /* 2290 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 2291 * calcs together with WME. If necessary disable the crypto 2292 * hardware and mark the 802.11 state so keys will be setup 2293 * with the MIC work done in software. 2294 */ 2295 static void 2296 ath_settkipmic(struct ath_softc *sc) 2297 { 2298 struct ifnet *ifp = sc->sc_ifp; 2299 struct ieee80211com *ic = ifp->if_l2com; 2300 2301 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 2302 if (ic->ic_flags & IEEE80211_F_WME) { 2303 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 2304 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 2305 } else { 2306 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 2307 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 2308 } 2309 } 2310 } 2311 2312 static void 2313 ath_init(void *arg) 2314 { 2315 struct ath_softc *sc = (struct ath_softc *) arg; 2316 struct ifnet *ifp = sc->sc_ifp; 2317 struct ieee80211com *ic = ifp->if_l2com; 2318 struct ath_hal *ah = sc->sc_ah; 2319 HAL_STATUS status; 2320 2321 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 2322 __func__, ifp->if_flags); 2323 2324 ATH_LOCK(sc); 2325 /* 2326 * Stop anything previously setup. This is safe 2327 * whether this is the first time through or not. 2328 */ 2329 ath_stop_locked(ifp); 2330 2331 /* 2332 * The basic interface to setting the hardware in a good 2333 * state is ``reset''. On return the hardware is known to 2334 * be powered up and with interrupts disabled. This must 2335 * be followed by initialization of the appropriate bits 2336 * and then setup of the interrupt mask. 2337 */ 2338 ath_settkipmic(sc); 2339 ath_update_chainmasks(sc, ic->ic_curchan); 2340 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2341 sc->sc_cur_rxchainmask); 2342 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 2343 if_printf(ifp, "unable to reset hardware; hal status %u\n", 2344 status); 2345 ATH_UNLOCK(sc); 2346 return; 2347 } 2348 ath_chan_change(sc, ic->ic_curchan); 2349 2350 /* Let DFS at it in case it's a DFS channel */ 2351 ath_dfs_radar_enable(sc, ic->ic_curchan); 2352 2353 /* Let spectral at in case spectral is enabled */ 2354 ath_spectral_enable(sc, ic->ic_curchan); 2355 2356 /* 2357 * Let bluetooth coexistence at in case it's needed for this channel 2358 */ 2359 ath_btcoex_enable(sc, ic->ic_curchan); 2360 2361 /* 2362 * If we're doing TDMA, enforce the TXOP limitation for chips that 2363 * support it. 2364 */ 2365 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2366 ath_hal_setenforcetxop(sc->sc_ah, 1); 2367 else 2368 ath_hal_setenforcetxop(sc->sc_ah, 0); 2369 2370 /* 2371 * Likewise this is set during reset so update 2372 * state cached in the driver. 2373 */ 2374 sc->sc_diversity = ath_hal_getdiversity(ah); 2375 sc->sc_lastlongcal = 0; 2376 sc->sc_resetcal = 1; 2377 sc->sc_lastcalreset = 0; 2378 sc->sc_lastani = 0; 2379 sc->sc_lastshortcal = 0; 2380 sc->sc_doresetcal = AH_FALSE; 2381 /* 2382 * Beacon timers were cleared here; give ath_newstate() 2383 * a hint that the beacon timers should be poked when 2384 * things transition to the RUN state. 2385 */ 2386 sc->sc_beacons = 0; 2387 2388 /* 2389 * Setup the hardware after reset: the key cache 2390 * is filled as needed and the receive engine is 2391 * set going. Frame transmit is handled entirely 2392 * in the frame output path; there's nothing to do 2393 * here except setup the interrupt mask. 2394 */ 2395 if (ath_startrecv(sc) != 0) { 2396 if_printf(ifp, "unable to start recv logic\n"); 2397 ath_power_restore_power_state(sc); 2398 ATH_UNLOCK(sc); 2399 return; 2400 } 2401 2402 /* 2403 * Enable interrupts. 2404 */ 2405 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 2406 | HAL_INT_RXEOL | HAL_INT_RXORN 2407 | HAL_INT_TXURN 2408 | HAL_INT_FATAL | HAL_INT_GLOBAL; 2409 2410 /* 2411 * Enable RX EDMA bits. Note these overlap with 2412 * HAL_INT_RX and HAL_INT_RXDESC respectively. 2413 */ 2414 if (sc->sc_isedma) 2415 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 2416 2417 /* 2418 * Enable MIB interrupts when there are hardware phy counters. 2419 * Note we only do this (at the moment) for station mode. 2420 */ 2421 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 2422 sc->sc_imask |= HAL_INT_MIB; 2423 2424 /* 2425 * XXX add capability for this. 2426 * 2427 * If we're in STA mode (and maybe IBSS?) then register for 2428 * TSFOOR interrupts. 2429 */ 2430 if (ic->ic_opmode == IEEE80211_M_STA) 2431 sc->sc_imask |= HAL_INT_TSFOOR; 2432 2433 /* Enable global TX timeout and carrier sense timeout if available */ 2434 if (ath_hal_gtxto_supported(ah)) 2435 sc->sc_imask |= HAL_INT_GTT; 2436 2437 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 2438 __func__, sc->sc_imask); 2439 2440 ifp->if_flags |= IFF_RUNNING; 2441 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 2442 ath_hal_intrset(ah, sc->sc_imask); 2443 2444 ath_power_restore_power_state(sc); 2445 ATH_UNLOCK(sc); 2446 2447 #ifdef ATH_TX99_DIAG 2448 if (sc->sc_tx99 != NULL) 2449 sc->sc_tx99->start(sc->sc_tx99); 2450 else 2451 #endif 2452 ieee80211_start_all(ic); /* start all vap's */ 2453 } 2454 2455 static void 2456 ath_stop_locked(struct ifnet *ifp) 2457 { 2458 struct ath_softc *sc = ifp->if_softc; 2459 struct ath_hal *ah = sc->sc_ah; 2460 2461 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 2462 __func__, sc->sc_invalid, ifp->if_flags); 2463 2464 ATH_LOCK_ASSERT(sc); 2465 2466 /* 2467 * Wake the hardware up before fiddling with it. 2468 */ 2469 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2470 2471 if (ifp->if_flags & IFF_RUNNING) { 2472 /* 2473 * Shutdown the hardware and driver: 2474 * reset 802.11 state machine 2475 * turn off timers 2476 * disable interrupts 2477 * turn off the radio 2478 * clear transmit machinery 2479 * clear receive machinery 2480 * drain and release tx queues 2481 * reclaim beacon resources 2482 * power down hardware 2483 * 2484 * Note that some of this work is not possible if the 2485 * hardware is gone (invalid). 2486 */ 2487 #ifdef ATH_TX99_DIAG 2488 if (sc->sc_tx99 != NULL) 2489 sc->sc_tx99->stop(sc->sc_tx99); 2490 #endif 2491 callout_stop(&sc->sc_wd_ch); 2492 sc->sc_wd_timer = 0; 2493 ifp->if_flags &= ~IFF_RUNNING; 2494 if (!sc->sc_invalid) { 2495 if (sc->sc_softled) { 2496 callout_stop(&sc->sc_ledtimer); 2497 ath_hal_gpioset(ah, sc->sc_ledpin, 2498 !sc->sc_ledon); 2499 sc->sc_blinking = 0; 2500 } 2501 ath_hal_intrset(ah, 0); 2502 } 2503 ath_draintxq(sc, ATH_RESET_DEFAULT); 2504 if (!sc->sc_invalid) { 2505 ath_stoprecv(sc, 1); 2506 ath_hal_phydisable(ah); 2507 } else 2508 sc->sc_rxlink = NULL; 2509 ath_beacon_free(sc); /* XXX not needed */ 2510 } 2511 2512 /* And now, restore the current power state */ 2513 ath_power_restore_power_state(sc); 2514 } 2515 2516 /* 2517 * Wait until all pending TX/RX has completed. 2518 * 2519 * This waits until all existing transmit, receive and interrupts 2520 * have completed. It's assumed that the caller has first 2521 * grabbed the reset lock so it doesn't try to do overlapping 2522 * chip resets. 2523 */ 2524 #define MAX_TXRX_ITERATIONS 100 2525 static void 2526 ath_txrx_stop_locked(struct ath_softc *sc) 2527 { 2528 int i = MAX_TXRX_ITERATIONS; 2529 2530 ATH_UNLOCK_ASSERT(sc); 2531 ATH_PCU_LOCK_ASSERT(sc); 2532 2533 /* 2534 * Sleep until all the pending operations have completed. 2535 * 2536 * The caller must ensure that reset has been incremented 2537 * or the pending operations may continue being queued. 2538 */ 2539 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 2540 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 2541 if (i <= 0) 2542 break; 2543 wlan_serialize_sleep(sc, 0, "ath_txrx_stop", (hz + 99) / 100); 2544 i--; 2545 } 2546 2547 if (i <= 0) 2548 device_printf(sc->sc_dev, 2549 "%s: didn't finish after %d iterations\n", 2550 __func__, MAX_TXRX_ITERATIONS); 2551 } 2552 #undef MAX_TXRX_ITERATIONS 2553 2554 #if 0 2555 static void 2556 ath_txrx_stop(struct ath_softc *sc) 2557 { 2558 ATH_UNLOCK_ASSERT(sc); 2559 ATH_PCU_UNLOCK_ASSERT(sc); 2560 2561 ATH_PCU_LOCK(sc); 2562 ath_txrx_stop_locked(sc); 2563 ATH_PCU_UNLOCK(sc); 2564 } 2565 #endif 2566 2567 static void 2568 ath_txrx_start(struct ath_softc *sc) 2569 { 2570 2571 taskqueue_unblock(sc->sc_tq); 2572 } 2573 2574 /* 2575 * Grab the reset lock, and wait around until noone else 2576 * is trying to do anything with it. 2577 * 2578 * This is totally horrible but we can't hold this lock for 2579 * long enough to do TX/RX or we end up with net80211/ip stack 2580 * LORs and eventual deadlock. 2581 * 2582 * "dowait" signals whether to spin, waiting for the reset 2583 * lock count to reach 0. This should (for now) only be used 2584 * during the reset path, as the rest of the code may not 2585 * be locking-reentrant enough to behave correctly. 2586 * 2587 * Another, cleaner way should be found to serialise all of 2588 * these operations. 2589 */ 2590 #define MAX_RESET_ITERATIONS 25 2591 static int 2592 ath_reset_grablock(struct ath_softc *sc, int dowait) 2593 { 2594 int w = 0; 2595 int i = MAX_RESET_ITERATIONS; 2596 2597 ATH_PCU_LOCK_ASSERT(sc); 2598 do { 2599 if (sc->sc_inreset_cnt == 0) { 2600 w = 1; 2601 break; 2602 } 2603 if (dowait == 0) { 2604 w = 0; 2605 break; 2606 } 2607 ATH_PCU_UNLOCK(sc); 2608 wlan_serialize_sleep(sc, 0, "ath_reset_grablock", 2609 (hz + 9) / 10); 2610 i--; 2611 ATH_PCU_LOCK(sc); 2612 } while (i > 0); 2613 2614 /* 2615 * We always increment the refcounter, regardless 2616 * of whether we succeeded to get it in an exclusive 2617 * way. 2618 */ 2619 sc->sc_inreset_cnt++; 2620 2621 if (i <= 0) 2622 device_printf(sc->sc_dev, 2623 "%s: didn't finish after %d iterations\n", 2624 __func__, MAX_RESET_ITERATIONS); 2625 2626 if (w == 0) 2627 device_printf(sc->sc_dev, 2628 "%s: warning, recursive reset path!\n", 2629 __func__); 2630 2631 return w; 2632 } 2633 #undef MAX_RESET_ITERATIONS 2634 2635 /* 2636 * XXX TODO: write ath_reset_releaselock 2637 */ 2638 2639 static void 2640 ath_stop(struct ifnet *ifp) 2641 { 2642 struct ath_softc *sc __unused = ifp->if_softc; 2643 2644 ATH_LOCK(sc); 2645 ath_stop_locked(ifp); 2646 ATH_UNLOCK(sc); 2647 } 2648 2649 /* 2650 * Reset the hardware w/o losing operational state. This is 2651 * basically a more efficient way of doing ath_stop, ath_init, 2652 * followed by state transitions to the current 802.11 2653 * operational state. Used to recover from various errors and 2654 * to reset or reload hardware state. 2655 */ 2656 int 2657 ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2658 { 2659 struct ath_softc *sc = ifp->if_softc; 2660 struct ieee80211com *ic = ifp->if_l2com; 2661 struct ath_hal *ah = sc->sc_ah; 2662 HAL_STATUS status; 2663 int i; 2664 2665 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2666 2667 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2668 ATH_PCU_UNLOCK_ASSERT(sc); 2669 ATH_UNLOCK_ASSERT(sc); 2670 2671 /* Try to (stop any further TX/RX from occuring */ 2672 taskqueue_block(sc->sc_tq); 2673 2674 /* 2675 * Wake the hardware up. 2676 */ 2677 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2678 2679 ATH_PCU_LOCK(sc); 2680 2681 /* 2682 * Grab the reset lock before TX/RX is stopped. 2683 * 2684 * This is needed to ensure that when the TX/RX actually does finish, 2685 * no further TX/RX/reset runs in parallel with this. 2686 */ 2687 if (ath_reset_grablock(sc, 1) == 0) { 2688 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2689 __func__); 2690 } 2691 2692 /* disable interrupts */ 2693 ath_hal_intrset(ah, 0); 2694 2695 /* 2696 * Now, ensure that any in progress TX/RX completes before we 2697 * continue. 2698 */ 2699 ath_txrx_stop_locked(sc); 2700 2701 ATH_PCU_UNLOCK(sc); 2702 2703 /* 2704 * Should now wait for pending TX/RX to complete 2705 * and block future ones from occuring. This needs to be 2706 * done before the TX queue is drained. 2707 */ 2708 ath_draintxq(sc, reset_type); /* stop xmit side */ 2709 2710 /* 2711 * Regardless of whether we're doing a no-loss flush or 2712 * not, stop the PCU and handle what's in the RX queue. 2713 * That way frames aren't dropped which shouldn't be. 2714 */ 2715 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2716 ath_rx_flush(sc); 2717 2718 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2719 /* NB: indicate channel change so we do a full reset */ 2720 ath_update_chainmasks(sc, ic->ic_curchan); 2721 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2722 sc->sc_cur_rxchainmask); 2723 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2724 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2725 __func__, status); 2726 sc->sc_diversity = ath_hal_getdiversity(ah); 2727 2728 /* Let DFS at it in case it's a DFS channel */ 2729 ath_dfs_radar_enable(sc, ic->ic_curchan); 2730 2731 /* Let spectral at in case spectral is enabled */ 2732 ath_spectral_enable(sc, ic->ic_curchan); 2733 2734 /* 2735 * Let bluetooth coexistence at in case it's needed for this channel 2736 */ 2737 ath_btcoex_enable(sc, ic->ic_curchan); 2738 2739 /* 2740 * If we're doing TDMA, enforce the TXOP limitation for chips that 2741 * support it. 2742 */ 2743 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2744 ath_hal_setenforcetxop(sc->sc_ah, 1); 2745 else 2746 ath_hal_setenforcetxop(sc->sc_ah, 0); 2747 2748 if (ath_startrecv(sc) != 0) /* restart recv */ 2749 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2750 /* 2751 * We may be doing a reset in response to an ioctl 2752 * that changes the channel so update any state that 2753 * might change as a result. 2754 */ 2755 ath_chan_change(sc, ic->ic_curchan); 2756 if (sc->sc_beacons) { /* restart beacons */ 2757 #ifdef IEEE80211_SUPPORT_TDMA 2758 if (sc->sc_tdma) 2759 ath_tdma_config(sc, NULL); 2760 else 2761 #endif 2762 ath_beacon_config(sc, NULL); 2763 } 2764 2765 /* 2766 * Release the reset lock and re-enable interrupts here. 2767 * If an interrupt was being processed in ath_intr(), 2768 * it would disable interrupts at this point. So we have 2769 * to atomically enable interrupts and decrement the 2770 * reset counter - this way ath_intr() doesn't end up 2771 * disabling interrupts without a corresponding enable 2772 * in the rest or channel change path. 2773 * 2774 * Grab the TX reference in case we need to transmit. 2775 * That way a parallel transmit doesn't. 2776 */ 2777 ATH_PCU_LOCK(sc); 2778 sc->sc_inreset_cnt--; 2779 sc->sc_txstart_cnt++; 2780 /* XXX only do this if sc_inreset_cnt == 0? */ 2781 ath_hal_intrset(ah, sc->sc_imask); 2782 ATH_PCU_UNLOCK(sc); 2783 2784 /* 2785 * TX and RX can be started here. If it were started with 2786 * sc_inreset_cnt > 0, the TX and RX path would abort. 2787 * Thus if this is a nested call through the reset or 2788 * channel change code, TX completion will occur but 2789 * RX completion and ath_start / ath_tx_start will not 2790 * run. 2791 */ 2792 2793 /* XXX TODO: we need to hold the tx refcount here! */ 2794 2795 /* Restart TX/RX as needed */ 2796 ath_txrx_start(sc); 2797 2798 /* Restart TX completion and pending TX */ 2799 if (reset_type == ATH_RESET_NOLOSS) { 2800 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2801 if (ATH_TXQ_SETUP(sc, i)) { 2802 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2803 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2804 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2805 2806 ATH_TX_LOCK(sc); 2807 ath_txq_sched(sc, &sc->sc_txq[i]); 2808 ATH_TX_UNLOCK(sc); 2809 } 2810 } 2811 } 2812 2813 #if 0 2814 /* remove, DragonFly uses OACTIVE to control if_start calls */ 2815 /* 2816 * This may have been set during an ath_start() call which 2817 * set this once it detected a concurrent TX was going on. 2818 * So, clear it. 2819 */ 2820 IF_LOCK(&ifp->if_snd); 2821 ifq_clr_oactive(&ifp->if_snd); 2822 IF_UNLOCK(&ifp->if_snd); 2823 #endif 2824 2825 ath_power_restore_power_state(sc); 2826 2827 ATH_PCU_LOCK(sc); 2828 sc->sc_txstart_cnt--; 2829 ATH_PCU_UNLOCK(sc); 2830 2831 /* Handle any frames in the TX queue */ 2832 /* 2833 * XXX should this be done by the caller, rather than 2834 * ath_reset() ? 2835 */ 2836 ath_tx_kick(sc); /* restart xmit */ 2837 return 0; 2838 } 2839 2840 static int 2841 ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2842 { 2843 struct ieee80211com *ic = vap->iv_ic; 2844 struct ifnet *ifp = ic->ic_ifp; 2845 struct ath_softc *sc = ifp->if_softc; 2846 struct ath_hal *ah = sc->sc_ah; 2847 2848 switch (cmd) { 2849 case IEEE80211_IOC_TXPOWER: 2850 /* 2851 * If per-packet TPC is enabled, then we have nothing 2852 * to do; otherwise we need to force the global limit. 2853 * All this can happen directly; no need to reset. 2854 */ 2855 if (!ath_hal_gettpc(ah)) 2856 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2857 return 0; 2858 } 2859 /* XXX? Full or NOLOSS? */ 2860 return ath_reset(ifp, ATH_RESET_FULL); 2861 } 2862 2863 struct ath_buf * 2864 _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 2865 { 2866 struct ath_buf *bf; 2867 2868 ATH_TXBUF_LOCK_ASSERT(sc); 2869 2870 if (btype == ATH_BUFTYPE_MGMT) 2871 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 2872 else 2873 bf = TAILQ_FIRST(&sc->sc_txbuf); 2874 2875 if (bf == NULL) { 2876 sc->sc_stats.ast_tx_getnobuf++; 2877 } else { 2878 if (bf->bf_flags & ATH_BUF_BUSY) { 2879 sc->sc_stats.ast_tx_getbusybuf++; 2880 bf = NULL; 2881 } 2882 } 2883 2884 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 2885 if (btype == ATH_BUFTYPE_MGMT) 2886 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 2887 else { 2888 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2889 sc->sc_txbuf_cnt--; 2890 2891 /* 2892 * This shuldn't happen; however just to be 2893 * safe print a warning and fudge the txbuf 2894 * count. 2895 */ 2896 if (sc->sc_txbuf_cnt < 0) { 2897 device_printf(sc->sc_dev, 2898 "%s: sc_txbuf_cnt < 0?\n", 2899 __func__); 2900 sc->sc_txbuf_cnt = 0; 2901 } 2902 } 2903 } else 2904 bf = NULL; 2905 2906 if (bf == NULL) { 2907 /* XXX should check which list, mgmt or otherwise */ 2908 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2909 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2910 "out of xmit buffers" : "xmit buffer busy"); 2911 return NULL; 2912 } 2913 2914 /* XXX TODO: should do this at buffer list initialisation */ 2915 /* XXX (then, ensure the buffer has the right flag set) */ 2916 bf->bf_flags = 0; 2917 if (btype == ATH_BUFTYPE_MGMT) 2918 bf->bf_flags |= ATH_BUF_MGMT; 2919 else 2920 bf->bf_flags &= (~ATH_BUF_MGMT); 2921 2922 /* Valid bf here; clear some basic fields */ 2923 bf->bf_next = NULL; /* XXX just to be sure */ 2924 bf->bf_last = NULL; /* XXX again, just to be sure */ 2925 bf->bf_comp = NULL; /* XXX again, just to be sure */ 2926 bzero(&bf->bf_state, sizeof(bf->bf_state)); 2927 2928 /* 2929 * Track the descriptor ID only if doing EDMA 2930 */ 2931 if (sc->sc_isedma) { 2932 bf->bf_descid = sc->sc_txbuf_descid; 2933 sc->sc_txbuf_descid++; 2934 } 2935 2936 return bf; 2937 } 2938 2939 /* 2940 * When retrying a software frame, buffers marked ATH_BUF_BUSY 2941 * can't be thrown back on the queue as they could still be 2942 * in use by the hardware. 2943 * 2944 * This duplicates the buffer, or returns NULL. 2945 * 2946 * The descriptor is also copied but the link pointers and 2947 * the DMA segments aren't copied; this frame should thus 2948 * be again passed through the descriptor setup/chain routines 2949 * so the link is correct. 2950 * 2951 * The caller must free the buffer using ath_freebuf(). 2952 */ 2953 struct ath_buf * 2954 ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf) 2955 { 2956 struct ath_buf *tbf; 2957 2958 tbf = ath_getbuf(sc, 2959 (bf->bf_flags & ATH_BUF_MGMT) ? 2960 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 2961 if (tbf == NULL) 2962 return NULL; /* XXX failure? Why? */ 2963 2964 /* Copy basics */ 2965 tbf->bf_next = NULL; 2966 tbf->bf_nseg = bf->bf_nseg; 2967 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE; 2968 tbf->bf_status = bf->bf_status; 2969 tbf->bf_m = bf->bf_m; 2970 tbf->bf_node = bf->bf_node; 2971 KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__)); 2972 /* will be setup by the chain/setup function */ 2973 tbf->bf_lastds = NULL; 2974 /* for now, last == self */ 2975 tbf->bf_last = tbf; 2976 tbf->bf_comp = bf->bf_comp; 2977 2978 /* NOTE: DMA segments will be setup by the setup/chain functions */ 2979 2980 /* The caller has to re-init the descriptor + links */ 2981 2982 /* 2983 * Free the DMA mapping here, before we NULL the mbuf. 2984 * We must only call bus_dmamap_unload() once per mbuf chain 2985 * or behaviour is undefined. 2986 */ 2987 if (bf->bf_m != NULL) { 2988 /* 2989 * XXX is this POSTWRITE call required? 2990 */ 2991 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2992 BUS_DMASYNC_POSTWRITE); 2993 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2994 } 2995 2996 bf->bf_m = NULL; 2997 bf->bf_node = NULL; 2998 2999 /* Copy state */ 3000 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 3001 3002 return tbf; 3003 } 3004 3005 struct ath_buf * 3006 ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 3007 { 3008 struct ath_buf *bf; 3009 3010 ATH_TXBUF_LOCK(sc); 3011 bf = _ath_getbuf_locked(sc, btype); 3012 /* 3013 * If a mgmt buffer was requested but we're out of those, 3014 * try requesting a normal one. 3015 */ 3016 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 3017 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 3018 ATH_TXBUF_UNLOCK(sc); 3019 if (bf == NULL) { 3020 #if 0 3021 struct ifnet *ifp = sc->sc_ifp; 3022 #endif 3023 3024 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 3025 sc->sc_stats.ast_tx_qstop++; 3026 #if 0 3027 /* remove, DragonFly uses OACTIVE to control if_start calls */ 3028 IF_LOCK(&ifp->if_snd); 3029 ifq_set_oactive(&ifp->if_snd); 3030 IF_UNLOCK(&ifp->if_snd); 3031 #endif 3032 } 3033 return bf; 3034 } 3035 3036 #if 0 3037 3038 static void 3039 ath_qflush(struct ifnet *ifp) 3040 { 3041 3042 /* XXX TODO */ 3043 } 3044 3045 #endif 3046 3047 /* 3048 * Transmit a single frame. 3049 * 3050 * net80211 will free the node reference if the transmit 3051 * fails, so don't free the node reference here. 3052 */ 3053 static int 3054 ath_transmit(struct ifnet *ifp, struct mbuf *m) 3055 { 3056 struct ieee80211com *ic = ifp->if_l2com; 3057 struct ath_softc *sc = ic->ic_ifp->if_softc; 3058 struct ieee80211_node *ni; 3059 struct mbuf *next; 3060 struct ath_buf *bf; 3061 ath_bufhead frags; 3062 int retval = 0; 3063 3064 /* 3065 * Tell the reset path that we're currently transmitting. 3066 */ 3067 ATH_PCU_LOCK(sc); 3068 if (sc->sc_inreset_cnt > 0) { 3069 DPRINTF(sc, ATH_DEBUG_XMIT, 3070 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 3071 ATH_PCU_UNLOCK(sc); 3072 IF_LOCK(&ifp->if_snd); 3073 sc->sc_stats.ast_tx_qstop++; 3074 #if 0 3075 /* remove, DragonFly uses OACTIVE to control if_start calls */ 3076 ifq_set_oactive(&ifp->if_snd); 3077 #endif 3078 IF_UNLOCK(&ifp->if_snd); 3079 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish"); 3080 m_freem(m); 3081 m = NULL; 3082 return (ENOBUFS); /* XXX should be EINVAL or? */ 3083 } 3084 sc->sc_txstart_cnt++; 3085 ATH_PCU_UNLOCK(sc); 3086 3087 /* Wake the hardware up already */ 3088 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3089 3090 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start"); 3091 /* 3092 * Grab the TX lock - it's ok to do this here; we haven't 3093 * yet started transmitting. 3094 */ 3095 ATH_TX_LOCK(sc); 3096 3097 /* 3098 * Node reference, if there's one. 3099 */ 3100 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 3101 3102 /* 3103 * Enforce how deep a node queue can get. 3104 * 3105 * XXX it would be nicer if we kept an mbuf queue per 3106 * node and only whacked them into ath_bufs when we 3107 * are ready to schedule some traffic from them. 3108 * .. that may come later. 3109 * 3110 * XXX we should also track the per-node hardware queue 3111 * depth so it is easy to limit the _SUM_ of the swq and 3112 * hwq frames. Since we only schedule two HWQ frames 3113 * at a time, this should be OK for now. 3114 */ 3115 if ((!(m->m_flags & M_EAPOL)) && 3116 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) { 3117 sc->sc_stats.ast_tx_nodeq_overflow++; 3118 m_freem(m); 3119 m = NULL; 3120 retval = ENOBUFS; 3121 goto finish; 3122 } 3123 3124 /* 3125 * Check how many TX buffers are available. 3126 * 3127 * If this is for non-EAPOL traffic, just leave some 3128 * space free in order for buffer cloning and raw 3129 * frame transmission to occur. 3130 * 3131 * If it's for EAPOL traffic, ignore this for now. 3132 * Management traffic will be sent via the raw transmit 3133 * method which bypasses this check. 3134 * 3135 * This is needed to ensure that EAPOL frames during 3136 * (re) keying have a chance to go out. 3137 * 3138 * See kern/138379 for more information. 3139 */ 3140 if ((!(m->m_flags & M_EAPOL)) && 3141 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) { 3142 sc->sc_stats.ast_tx_nobuf++; 3143 m_freem(m); 3144 m = NULL; 3145 retval = ENOBUFS; 3146 goto finish; 3147 } 3148 3149 /* 3150 * Grab a TX buffer and associated resources. 3151 * 3152 * If it's an EAPOL frame, allocate a MGMT ath_buf. 3153 * That way even with temporary buffer exhaustion due to 3154 * the data path doesn't leave us without the ability 3155 * to transmit management frames. 3156 * 3157 * Otherwise allocate a normal buffer. 3158 */ 3159 if (m->m_flags & M_EAPOL) 3160 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 3161 else 3162 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 3163 3164 if (bf == NULL) { 3165 /* 3166 * If we failed to allocate a buffer, fail. 3167 * 3168 * We shouldn't fail normally, due to the check 3169 * above. 3170 */ 3171 sc->sc_stats.ast_tx_nobuf++; 3172 #if 0 3173 /* remove, DragonFly uses OACTIVE to control if_start calls */ 3174 IF_LOCK(&ifp->if_snd); 3175 ifq_set_oactive(&ifp->if_snd); 3176 IF_UNLOCK(&ifp->if_snd); 3177 #endif 3178 m_freem(m); 3179 m = NULL; 3180 retval = ENOBUFS; 3181 goto finish; 3182 } 3183 3184 /* 3185 * At this point we have a buffer; so we need to free it 3186 * if we hit any error conditions. 3187 */ 3188 3189 /* 3190 * Check for fragmentation. If this frame 3191 * has been broken up verify we have enough 3192 * buffers to send all the fragments so all 3193 * go out or none... 3194 */ 3195 TAILQ_INIT(&frags); 3196 if ((m->m_flags & M_FRAG) && 3197 !ath_txfrag_setup(sc, &frags, m, ni)) { 3198 DPRINTF(sc, ATH_DEBUG_XMIT, 3199 "%s: out of txfrag buffers\n", __func__); 3200 sc->sc_stats.ast_tx_nofrag++; 3201 ifp->if_oerrors++; 3202 ath_freetx(m); 3203 goto bad; 3204 } 3205 3206 /* 3207 * At this point if we have any TX fragments, then we will 3208 * have bumped the node reference once for each of those. 3209 */ 3210 3211 /* 3212 * XXX Is there anything actually _enforcing_ that the 3213 * fragments are being transmitted in one hit, rather than 3214 * being interleaved with other transmissions on that 3215 * hardware queue? 3216 * 3217 * The ATH TX output lock is the only thing serialising this 3218 * right now. 3219 */ 3220 3221 /* 3222 * Calculate the "next fragment" length field in ath_buf 3223 * in order to let the transmit path know enough about 3224 * what to next write to the hardware. 3225 */ 3226 if (m->m_flags & M_FRAG) { 3227 struct ath_buf *fbf = bf; 3228 struct ath_buf *n_fbf = NULL; 3229 struct mbuf *fm = m->m_nextpkt; 3230 3231 /* 3232 * We need to walk the list of fragments and set 3233 * the next size to the following buffer. 3234 * However, the first buffer isn't in the frag 3235 * list, so we have to do some gymnastics here. 3236 */ 3237 TAILQ_FOREACH(n_fbf, &frags, bf_list) { 3238 fbf->bf_nextfraglen = fm->m_pkthdr.len; 3239 fbf = n_fbf; 3240 fm = fm->m_nextpkt; 3241 } 3242 } 3243 3244 /* 3245 * Bump the ifp output counter. 3246 * 3247 * XXX should use atomics? 3248 */ 3249 ifp->if_opackets++; 3250 nextfrag: 3251 /* 3252 * Pass the frame to the h/w for transmission. 3253 * Fragmented frames have each frag chained together 3254 * with m_nextpkt. We know there are sufficient ath_buf's 3255 * to send all the frags because of work done by 3256 * ath_txfrag_setup. We leave m_nextpkt set while 3257 * calling ath_tx_start so it can use it to extend the 3258 * the tx duration to cover the subsequent frag and 3259 * so it can reclaim all the mbufs in case of an error; 3260 * ath_tx_start clears m_nextpkt once it commits to 3261 * handing the frame to the hardware. 3262 * 3263 * Note: if this fails, then the mbufs are freed but 3264 * not the node reference. 3265 */ 3266 next = m->m_nextpkt; 3267 if (ath_tx_start(sc, ni, bf, m)) { 3268 bad: 3269 ifp->if_oerrors++; 3270 reclaim: 3271 bf->bf_m = NULL; 3272 bf->bf_node = NULL; 3273 ATH_TXBUF_LOCK(sc); 3274 ath_returnbuf_head(sc, bf); 3275 /* 3276 * Free the rest of the node references and 3277 * buffers for the fragment list. 3278 */ 3279 ath_txfrag_cleanup(sc, &frags, ni); 3280 ATH_TXBUF_UNLOCK(sc); 3281 retval = ENOBUFS; 3282 goto finish; 3283 } 3284 3285 /* 3286 * Check here if the node is in power save state. 3287 */ 3288 ath_tx_update_tim(sc, ni, 1); 3289 3290 if (next != NULL) { 3291 /* 3292 * Beware of state changing between frags. 3293 * XXX check sta power-save state? 3294 */ 3295 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 3296 DPRINTF(sc, ATH_DEBUG_XMIT, 3297 "%s: flush fragmented packet, state %s\n", 3298 __func__, 3299 ieee80211_state_name[ni->ni_vap->iv_state]); 3300 /* XXX dmamap */ 3301 ath_freetx(next); 3302 goto reclaim; 3303 } 3304 m = next; 3305 bf = TAILQ_FIRST(&frags); 3306 KASSERT(bf != NULL, ("no buf for txfrag")); 3307 TAILQ_REMOVE(&frags, bf, bf_list); 3308 goto nextfrag; 3309 } 3310 3311 /* 3312 * Bump watchdog timer. 3313 */ 3314 sc->sc_wd_timer = 5; 3315 3316 finish: 3317 ATH_TX_UNLOCK(sc); 3318 3319 /* 3320 * Finished transmitting! 3321 */ 3322 ATH_PCU_LOCK(sc); 3323 sc->sc_txstart_cnt--; 3324 ATH_PCU_UNLOCK(sc); 3325 3326 /* Sleep the hardware if required */ 3327 ath_power_restore_power_state(sc); 3328 3329 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished"); 3330 3331 return (retval); 3332 } 3333 3334 static int 3335 ath_media_change(struct ifnet *ifp) 3336 { 3337 int error = ieee80211_media_change(ifp); 3338 /* NB: only the fixed rate can change and that doesn't need a reset */ 3339 return (error == ENETRESET ? 0 : error); 3340 } 3341 3342 /* 3343 * Block/unblock tx+rx processing while a key change is done. 3344 * We assume the caller serializes key management operations 3345 * so we only need to worry about synchronization with other 3346 * uses that originate in the driver. 3347 */ 3348 static void 3349 ath_key_update_begin(struct ieee80211vap *vap) 3350 { 3351 struct ifnet *ifp = vap->iv_ic->ic_ifp; 3352 struct ath_softc *sc = ifp->if_softc; 3353 3354 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3355 taskqueue_block(sc->sc_tq); 3356 } 3357 3358 static void 3359 ath_key_update_end(struct ieee80211vap *vap) 3360 { 3361 struct ifnet *ifp = vap->iv_ic->ic_ifp; 3362 struct ath_softc *sc = ifp->if_softc; 3363 3364 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3365 taskqueue_unblock(sc->sc_tq); 3366 } 3367 3368 static void 3369 ath_update_promisc(struct ifnet *ifp) 3370 { 3371 struct ath_softc *sc = ifp->if_softc; 3372 u_int32_t rfilt; 3373 3374 /* configure rx filter */ 3375 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3376 rfilt = ath_calcrxfilter(sc); 3377 ath_hal_setrxfilter(sc->sc_ah, rfilt); 3378 ath_power_restore_power_state(sc); 3379 3380 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 3381 } 3382 3383 static void 3384 ath_update_mcast(struct ifnet *ifp) 3385 { 3386 struct ath_softc *sc = ifp->if_softc; 3387 u_int32_t mfilt[2]; 3388 3389 /* calculate and install multicast filter */ 3390 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 3391 struct ifmultiaddr *ifma; 3392 /* 3393 * Merge multicast addresses to form the hardware filter. 3394 */ 3395 mfilt[0] = mfilt[1] = 0; 3396 #if 0 3397 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 3398 #endif 3399 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3400 caddr_t dl; 3401 u_int32_t val; 3402 u_int8_t pos; 3403 3404 /* calculate XOR of eight 6bit values */ 3405 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 3406 val = LE_READ_4(dl + 0); 3407 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3408 val = LE_READ_4(dl + 3); 3409 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3410 pos &= 0x3f; 3411 mfilt[pos / 32] |= (1 << (pos % 32)); 3412 } 3413 #if 0 3414 if_maddr_runlock(ifp); 3415 #endif 3416 } else 3417 mfilt[0] = mfilt[1] = ~0; 3418 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3419 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 3420 ath_power_restore_power_state(sc); 3421 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 3422 __func__, mfilt[0], mfilt[1]); 3423 } 3424 3425 void 3426 ath_mode_init(struct ath_softc *sc) 3427 { 3428 struct ifnet *ifp = sc->sc_ifp; 3429 struct ath_hal *ah = sc->sc_ah; 3430 u_int32_t rfilt; 3431 3432 /* configure rx filter */ 3433 rfilt = ath_calcrxfilter(sc); 3434 ath_hal_setrxfilter(ah, rfilt); 3435 3436 /* configure operational mode */ 3437 ath_hal_setopmode(ah); 3438 3439 #if 0 3440 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE, 3441 "%s: ah=%p, ifp=%p, if_addr=%p\n", 3442 __func__, 3443 ah, 3444 ifp, 3445 (ifp == NULL) ? NULL : ifp->if_addr); 3446 #endif 3447 3448 /* handle any link-level address change */ 3449 ath_hal_setmac(ah, IF_LLADDR(ifp)); 3450 3451 /* calculate and install multicast filter */ 3452 ath_update_mcast(ifp); 3453 } 3454 3455 /* 3456 * Set the slot time based on the current setting. 3457 */ 3458 void 3459 ath_setslottime(struct ath_softc *sc) 3460 { 3461 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3462 struct ath_hal *ah = sc->sc_ah; 3463 u_int usec; 3464 3465 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 3466 usec = 13; 3467 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 3468 usec = 21; 3469 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 3470 /* honor short/long slot time only in 11g */ 3471 /* XXX shouldn't honor on pure g or turbo g channel */ 3472 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3473 usec = HAL_SLOT_TIME_9; 3474 else 3475 usec = HAL_SLOT_TIME_20; 3476 } else 3477 usec = HAL_SLOT_TIME_9; 3478 3479 DPRINTF(sc, ATH_DEBUG_RESET, 3480 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 3481 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 3482 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 3483 3484 /* Wake up the hardware first before updating the slot time */ 3485 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3486 ath_hal_setslottime(ah, usec); 3487 ath_power_restore_power_state(sc); 3488 sc->sc_updateslot = OK; 3489 } 3490 3491 /* 3492 * Callback from the 802.11 layer to update the 3493 * slot time based on the current setting. 3494 */ 3495 static void 3496 ath_updateslot(struct ifnet *ifp) 3497 { 3498 struct ath_softc *sc = ifp->if_softc; 3499 struct ieee80211com *ic = ifp->if_l2com; 3500 3501 /* 3502 * When not coordinating the BSS, change the hardware 3503 * immediately. For other operation we defer the change 3504 * until beacon updates have propagated to the stations. 3505 * 3506 * XXX sc_updateslot isn't changed behind a lock? 3507 */ 3508 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3509 ic->ic_opmode == IEEE80211_M_MBSS) 3510 sc->sc_updateslot = UPDATE; 3511 else 3512 ath_setslottime(sc); 3513 } 3514 3515 /* 3516 * Append the contents of src to dst; both queues 3517 * are assumed to be locked. 3518 */ 3519 void 3520 ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 3521 { 3522 3523 ATH_TXQ_LOCK_ASSERT(src); 3524 ATH_TXQ_LOCK_ASSERT(dst); 3525 3526 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 3527 dst->axq_link = src->axq_link; 3528 src->axq_link = NULL; 3529 dst->axq_depth += src->axq_depth; 3530 dst->axq_aggr_depth += src->axq_aggr_depth; 3531 src->axq_depth = 0; 3532 src->axq_aggr_depth = 0; 3533 } 3534 3535 /* 3536 * Reset the hardware, with no loss. 3537 * 3538 * This can't be used for a general case reset. 3539 */ 3540 static void 3541 ath_reset_proc(void *arg, int pending) 3542 { 3543 struct ath_softc *sc = arg; 3544 struct ifnet *ifp = sc->sc_ifp; 3545 3546 #if 0 3547 if_printf(ifp, "%s: resetting\n", __func__); 3548 #endif 3549 wlan_serialize_enter(); 3550 ath_reset(ifp, ATH_RESET_NOLOSS); 3551 wlan_serialize_exit(); 3552 } 3553 3554 /* 3555 * Reset the hardware after detecting beacons have stopped. 3556 */ 3557 static void 3558 ath_bstuck_proc(void *arg, int pending) 3559 { 3560 struct ath_softc *sc = arg; 3561 struct ifnet *ifp = sc->sc_ifp; 3562 uint32_t hangs = 0; 3563 3564 wlan_serialize_enter(); 3565 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 3566 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 3567 3568 #ifdef ATH_DEBUG_ALQ 3569 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON)) 3570 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL); 3571 #endif 3572 3573 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 3574 sc->sc_bmisscount); 3575 sc->sc_stats.ast_bstuck++; 3576 /* 3577 * This assumes that there's no simultaneous channel mode change 3578 * occuring. 3579 */ 3580 ath_reset(ifp, ATH_RESET_NOLOSS); 3581 wlan_serialize_exit(); 3582 } 3583 3584 static void 3585 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3586 { 3587 bus_addr_t *paddr = (bus_addr_t*) arg; 3588 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 3589 *paddr = segs->ds_addr; 3590 } 3591 3592 /* 3593 * Allocate the descriptors and appropriate DMA tag/setup. 3594 * 3595 * For some situations (eg EDMA TX completion), there isn't a requirement 3596 * for the ath_buf entries to be allocated. 3597 */ 3598 int 3599 ath_descdma_alloc_desc(struct ath_softc *sc, 3600 struct ath_descdma *dd, ath_bufhead *head, 3601 const char *name, int ds_size, int ndesc) 3602 { 3603 #define DS2PHYS(_dd, _ds) \ 3604 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3605 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 3606 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3607 struct ifnet *ifp = sc->sc_ifp; 3608 int error; 3609 3610 dd->dd_descsize = ds_size; 3611 3612 DPRINTF(sc, ATH_DEBUG_RESET, 3613 "%s: %s DMA: %u desc, %d bytes per descriptor\n", 3614 __func__, name, ndesc, dd->dd_descsize); 3615 3616 dd->dd_name = name; 3617 dd->dd_desc_len = dd->dd_descsize * ndesc; 3618 3619 /* 3620 * Merlin work-around: 3621 * Descriptors that cross the 4KB boundary can't be used. 3622 * Assume one skipped descriptor per 4KB page. 3623 */ 3624 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3625 int numpages = dd->dd_desc_len / 4096; 3626 dd->dd_desc_len += ds_size * numpages; 3627 } 3628 3629 /* 3630 * Setup DMA descriptor area. 3631 * 3632 * BUS_DMA_ALLOCNOW is not used; we never use bounce 3633 * buffers for the descriptors themselves. 3634 */ 3635 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3636 PAGE_SIZE, 0, /* alignment, bounds */ 3637 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3638 BUS_SPACE_MAXADDR, /* highaddr */ 3639 NULL, NULL, /* filter, filterarg */ 3640 dd->dd_desc_len, /* maxsize */ 3641 1, /* nsegments */ 3642 dd->dd_desc_len, /* maxsegsize */ 3643 0, /* flags */ 3644 &dd->dd_dmat); 3645 if (error != 0) { 3646 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3647 return error; 3648 } 3649 3650 /* allocate descriptors */ 3651 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 3652 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 3653 &dd->dd_dmamap); 3654 if (error != 0) { 3655 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3656 "error %u\n", ndesc, dd->dd_name, error); 3657 goto fail1; 3658 } 3659 3660 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3661 dd->dd_desc, dd->dd_desc_len, 3662 ath_load_cb, &dd->dd_desc_paddr, 3663 BUS_DMA_NOWAIT); 3664 if (error != 0) { 3665 if_printf(ifp, "unable to map %s descriptors, error %u\n", 3666 dd->dd_name, error); 3667 goto fail2; 3668 } 3669 3670 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3671 __func__, dd->dd_name, (uint8_t *) dd->dd_desc, 3672 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, 3673 /*XXX*/ (u_long) dd->dd_desc_len); 3674 3675 return (0); 3676 3677 fail2: 3678 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3679 fail1: 3680 bus_dma_tag_destroy(dd->dd_dmat); 3681 memset(dd, 0, sizeof(*dd)); 3682 return error; 3683 #undef DS2PHYS 3684 #undef ATH_DESC_4KB_BOUND_CHECK 3685 } 3686 3687 int 3688 ath_descdma_setup(struct ath_softc *sc, 3689 struct ath_descdma *dd, ath_bufhead *head, 3690 const char *name, int ds_size, int nbuf, int ndesc) 3691 { 3692 #define DS2PHYS(_dd, _ds) \ 3693 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 3694 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 3695 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 3696 struct ifnet *ifp = sc->sc_ifp; 3697 uint8_t *ds; 3698 struct ath_buf *bf; 3699 int i, bsize, error; 3700 3701 /* Allocate descriptors */ 3702 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size, 3703 nbuf * ndesc); 3704 3705 /* Assume any errors during allocation were dealt with */ 3706 if (error != 0) { 3707 return (error); 3708 } 3709 3710 ds = (uint8_t *) dd->dd_desc; 3711 3712 /* allocate rx buffers */ 3713 bsize = sizeof(struct ath_buf) * nbuf; 3714 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT|M_ZERO); 3715 if (bf == NULL) { 3716 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3717 dd->dd_name, bsize); 3718 goto fail3; 3719 } 3720 dd->dd_bufptr = bf; 3721 3722 TAILQ_INIT(head); 3723 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) { 3724 bf->bf_desc = (struct ath_desc *) ds; 3725 bf->bf_daddr = DS2PHYS(dd, ds); 3726 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3727 /* 3728 * Merlin WAR: Skip descriptor addresses which 3729 * cause 4KB boundary crossing along any point 3730 * in the descriptor. 3731 */ 3732 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 3733 dd->dd_descsize)) { 3734 /* Start at the next page */ 3735 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 3736 bf->bf_desc = (struct ath_desc *) ds; 3737 bf->bf_daddr = DS2PHYS(dd, ds); 3738 } 3739 } 3740 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3741 &bf->bf_dmamap); 3742 if (error != 0) { 3743 if_printf(ifp, "unable to create dmamap for %s " 3744 "buffer %u, error %u\n", dd->dd_name, i, error); 3745 ath_descdma_cleanup(sc, dd, head); 3746 return error; 3747 } 3748 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 3749 TAILQ_INSERT_TAIL(head, bf, bf_list); 3750 } 3751 3752 /* 3753 * XXX TODO: ensure that ds doesn't overflow the descriptor 3754 * allocation otherwise weird stuff will occur and crash your 3755 * machine. 3756 */ 3757 return 0; 3758 /* XXX this should likely just call ath_descdma_cleanup() */ 3759 fail3: 3760 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3761 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3762 bus_dma_tag_destroy(dd->dd_dmat); 3763 memset(dd, 0, sizeof(*dd)); 3764 return error; 3765 #undef DS2PHYS 3766 #undef ATH_DESC_4KB_BOUND_CHECK 3767 } 3768 3769 /* 3770 * Allocate ath_buf entries but no descriptor contents. 3771 * 3772 * This is for RX EDMA where the descriptors are the header part of 3773 * the RX buffer. 3774 */ 3775 int 3776 ath_descdma_setup_rx_edma(struct ath_softc *sc, 3777 struct ath_descdma *dd, ath_bufhead *head, 3778 const char *name, int nbuf, int rx_status_len) 3779 { 3780 struct ifnet *ifp = sc->sc_ifp; 3781 struct ath_buf *bf; 3782 int i, bsize, error; 3783 3784 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n", 3785 __func__, name, nbuf); 3786 3787 dd->dd_name = name; 3788 /* 3789 * This is (mostly) purely for show. We're not allocating any actual 3790 * descriptors here as EDMA RX has the descriptor be part 3791 * of the RX buffer. 3792 * 3793 * However, dd_desc_len is used by ath_descdma_free() to determine 3794 * whether we have already freed this DMA mapping. 3795 */ 3796 dd->dd_desc_len = rx_status_len * nbuf; 3797 dd->dd_descsize = rx_status_len; 3798 3799 /* allocate rx buffers */ 3800 bsize = sizeof(struct ath_buf) * nbuf; 3801 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT | M_ZERO); 3802 if (bf == NULL) { 3803 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3804 dd->dd_name, bsize); 3805 error = ENOMEM; 3806 goto fail3; 3807 } 3808 dd->dd_bufptr = bf; 3809 3810 TAILQ_INIT(head); 3811 for (i = 0; i < nbuf; i++, bf++) { 3812 bf->bf_desc = NULL; 3813 bf->bf_daddr = 0; 3814 bf->bf_lastds = NULL; /* Just an initial value */ 3815 3816 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3817 &bf->bf_dmamap); 3818 if (error != 0) { 3819 if_printf(ifp, "unable to create dmamap for %s " 3820 "buffer %u, error %u\n", dd->dd_name, i, error); 3821 ath_descdma_cleanup(sc, dd, head); 3822 return error; 3823 } 3824 TAILQ_INSERT_TAIL(head, bf, bf_list); 3825 } 3826 return 0; 3827 fail3: 3828 memset(dd, 0, sizeof(*dd)); 3829 return error; 3830 } 3831 3832 void 3833 ath_descdma_cleanup(struct ath_softc *sc, 3834 struct ath_descdma *dd, ath_bufhead *head) 3835 { 3836 struct ath_buf *bf; 3837 struct ieee80211_node *ni; 3838 int do_warning = 0; 3839 3840 if (dd->dd_dmamap != 0) { 3841 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3842 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3843 bus_dma_tag_destroy(dd->dd_dmat); 3844 } 3845 3846 if (head != NULL) { 3847 TAILQ_FOREACH(bf, head, bf_list) { 3848 if (bf->bf_m) { 3849 /* 3850 * XXX warn if there's buffers here. 3851 * XXX it should have been freed by the 3852 * owner! 3853 */ 3854 3855 if (do_warning == 0) { 3856 do_warning = 1; 3857 device_printf(sc->sc_dev, 3858 "%s: %s: mbuf should've been" 3859 " unmapped/freed!\n", 3860 __func__, 3861 dd->dd_name); 3862 } 3863 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3864 BUS_DMASYNC_POSTREAD); 3865 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3866 m_freem(bf->bf_m); 3867 bf->bf_m = NULL; 3868 } 3869 if (bf->bf_dmamap != NULL) { 3870 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3871 bf->bf_dmamap = NULL; 3872 } 3873 ni = bf->bf_node; 3874 bf->bf_node = NULL; 3875 if (ni != NULL) { 3876 /* 3877 * Reclaim node reference. 3878 */ 3879 ieee80211_free_node(ni); 3880 } 3881 } 3882 } 3883 3884 if (head != NULL) 3885 TAILQ_INIT(head); 3886 3887 if (dd->dd_bufptr != NULL) 3888 kfree(dd->dd_bufptr, M_ATHDEV); 3889 memset(dd, 0, sizeof(*dd)); 3890 } 3891 3892 static int 3893 ath_desc_alloc(struct ath_softc *sc) 3894 { 3895 int error; 3896 3897 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3898 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER); 3899 if (error != 0) { 3900 return error; 3901 } 3902 sc->sc_txbuf_cnt = ath_txbuf; 3903 3904 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 3905 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, 3906 ATH_TXDESC); 3907 if (error != 0) { 3908 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3909 return error; 3910 } 3911 3912 /* 3913 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 3914 * flag doesn't have to be set in ath_getbuf_locked(). 3915 */ 3916 3917 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3918 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); 3919 if (error != 0) { 3920 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3921 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3922 &sc->sc_txbuf_mgmt); 3923 return error; 3924 } 3925 return 0; 3926 } 3927 3928 static void 3929 ath_desc_free(struct ath_softc *sc) 3930 { 3931 3932 if (sc->sc_bdma.dd_desc_len != 0) 3933 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3934 if (sc->sc_txdma.dd_desc_len != 0) 3935 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3936 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 3937 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3938 &sc->sc_txbuf_mgmt); 3939 } 3940 3941 static struct ieee80211_node * 3942 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3943 { 3944 struct ieee80211com *ic = vap->iv_ic; 3945 struct ath_softc *sc = ic->ic_ifp->if_softc; 3946 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3947 struct ath_node *an; 3948 3949 an = kmalloc(space, M_80211_NODE, M_INTWAIT|M_ZERO); 3950 if (an == NULL) { 3951 /* XXX stat+msg */ 3952 return NULL; 3953 } 3954 ath_rate_node_init(sc, an); 3955 3956 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3957 ksnprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3958 device_get_nameunit(sc->sc_dev), an); 3959 #if 0 3960 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3961 #endif 3962 3963 /* XXX setup ath_tid */ 3964 ath_tx_tid_init(sc, an); 3965 3966 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__, 3967 ath_hal_ether_sprintf(mac), an); 3968 return &an->an_node; 3969 } 3970 3971 static void 3972 ath_node_cleanup(struct ieee80211_node *ni) 3973 { 3974 struct ieee80211com *ic = ni->ni_ic; 3975 struct ath_softc *sc = ic->ic_ifp->if_softc; 3976 3977 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__, 3978 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni)); 3979 3980 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3981 ath_tx_node_flush(sc, ATH_NODE(ni)); 3982 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3983 sc->sc_node_cleanup(ni); 3984 } 3985 3986 static void 3987 ath_node_free(struct ieee80211_node *ni) 3988 { 3989 struct ieee80211com *ic = ni->ni_ic; 3990 struct ath_softc *sc = ic->ic_ifp->if_softc; 3991 3992 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__, 3993 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni)); 3994 #if 0 3995 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3996 #endif 3997 sc->sc_node_free(ni); 3998 } 3999 4000 static void 4001 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 4002 { 4003 struct ieee80211com *ic = ni->ni_ic; 4004 struct ath_softc *sc = ic->ic_ifp->if_softc; 4005 struct ath_hal *ah = sc->sc_ah; 4006 4007 *rssi = ic->ic_node_getrssi(ni); 4008 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 4009 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 4010 else 4011 *noise = -95; /* nominally correct */ 4012 } 4013 4014 /* 4015 * Set the default antenna. 4016 */ 4017 void 4018 ath_setdefantenna(struct ath_softc *sc, u_int antenna) 4019 { 4020 struct ath_hal *ah = sc->sc_ah; 4021 4022 /* XXX block beacon interrupts */ 4023 ath_hal_setdefantenna(ah, antenna); 4024 if (sc->sc_defant != antenna) 4025 sc->sc_stats.ast_ant_defswitch++; 4026 sc->sc_defant = antenna; 4027 sc->sc_rxotherant = 0; 4028 } 4029 4030 static void 4031 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 4032 { 4033 txq->axq_qnum = qnum; 4034 txq->axq_ac = 0; 4035 txq->axq_depth = 0; 4036 txq->axq_aggr_depth = 0; 4037 txq->axq_intrcnt = 0; 4038 txq->axq_link = NULL; 4039 txq->axq_softc = sc; 4040 TAILQ_INIT(&txq->axq_q); 4041 TAILQ_INIT(&txq->axq_tidq); 4042 TAILQ_INIT(&txq->fifo.axq_q); 4043 ATH_TXQ_LOCK_INIT(sc, txq); 4044 } 4045 4046 /* 4047 * Setup a h/w transmit queue. 4048 */ 4049 static struct ath_txq * 4050 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 4051 { 4052 #define N(a) (sizeof(a)/sizeof(a[0])) 4053 struct ath_hal *ah = sc->sc_ah; 4054 HAL_TXQ_INFO qi; 4055 int qnum; 4056 4057 memset(&qi, 0, sizeof(qi)); 4058 qi.tqi_subtype = subtype; 4059 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 4060 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 4061 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 4062 /* 4063 * Enable interrupts only for EOL and DESC conditions. 4064 * We mark tx descriptors to receive a DESC interrupt 4065 * when a tx queue gets deep; otherwise waiting for the 4066 * EOL to reap descriptors. Note that this is done to 4067 * reduce interrupt load and this only defers reaping 4068 * descriptors, never transmitting frames. Aside from 4069 * reducing interrupts this also permits more concurrency. 4070 * The only potential downside is if the tx queue backs 4071 * up in which case the top half of the kernel may backup 4072 * due to a lack of tx descriptors. 4073 */ 4074 if (sc->sc_isedma) 4075 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 4076 HAL_TXQ_TXOKINT_ENABLE; 4077 else 4078 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 4079 HAL_TXQ_TXDESCINT_ENABLE; 4080 4081 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 4082 if (qnum == -1) { 4083 /* 4084 * NB: don't print a message, this happens 4085 * normally on parts with too few tx queues 4086 */ 4087 return NULL; 4088 } 4089 if (qnum >= N(sc->sc_txq)) { 4090 device_printf(sc->sc_dev, 4091 "hal qnum %u out of range, max %zu!\n", 4092 qnum, N(sc->sc_txq)); 4093 ath_hal_releasetxqueue(ah, qnum); 4094 return NULL; 4095 } 4096 if (!ATH_TXQ_SETUP(sc, qnum)) { 4097 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 4098 sc->sc_txqsetup |= 1<<qnum; 4099 } 4100 return &sc->sc_txq[qnum]; 4101 #undef N 4102 } 4103 4104 /* 4105 * Setup a hardware data transmit queue for the specified 4106 * access control. The hal may not support all requested 4107 * queues in which case it will return a reference to a 4108 * previously setup queue. We record the mapping from ac's 4109 * to h/w queues for use by ath_tx_start and also track 4110 * the set of h/w queues being used to optimize work in the 4111 * transmit interrupt handler and related routines. 4112 */ 4113 static int 4114 ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 4115 { 4116 #define N(a) (sizeof(a)/sizeof(a[0])) 4117 struct ath_txq *txq; 4118 4119 if (ac >= N(sc->sc_ac2q)) { 4120 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 4121 ac, N(sc->sc_ac2q)); 4122 return 0; 4123 } 4124 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4125 if (txq != NULL) { 4126 txq->axq_ac = ac; 4127 sc->sc_ac2q[ac] = txq; 4128 return 1; 4129 } else 4130 return 0; 4131 #undef N 4132 } 4133 4134 /* 4135 * Update WME parameters for a transmit queue. 4136 */ 4137 static int 4138 ath_txq_update(struct ath_softc *sc, int ac) 4139 { 4140 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4141 #define ATH_TXOP_TO_US(v) (v<<5) 4142 struct ifnet *ifp = sc->sc_ifp; 4143 struct ieee80211com *ic = ifp->if_l2com; 4144 struct ath_txq *txq = sc->sc_ac2q[ac]; 4145 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4146 struct ath_hal *ah = sc->sc_ah; 4147 HAL_TXQ_INFO qi; 4148 4149 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4150 #ifdef IEEE80211_SUPPORT_TDMA 4151 if (sc->sc_tdma) { 4152 /* 4153 * AIFS is zero so there's no pre-transmit wait. The 4154 * burst time defines the slot duration and is configured 4155 * through net80211. The QCU is setup to not do post-xmit 4156 * back off, lockout all lower-priority QCU's, and fire 4157 * off the DMA beacon alert timer which is setup based 4158 * on the slot configuration. 4159 */ 4160 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4161 | HAL_TXQ_TXERRINT_ENABLE 4162 | HAL_TXQ_TXURNINT_ENABLE 4163 | HAL_TXQ_TXEOLINT_ENABLE 4164 | HAL_TXQ_DBA_GATED 4165 | HAL_TXQ_BACKOFF_DISABLE 4166 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 4167 ; 4168 qi.tqi_aifs = 0; 4169 /* XXX +dbaprep? */ 4170 qi.tqi_readyTime = sc->sc_tdmaslotlen; 4171 qi.tqi_burstTime = qi.tqi_readyTime; 4172 } else { 4173 #endif 4174 /* 4175 * XXX shouldn't this just use the default flags 4176 * used in the previous queue setup? 4177 */ 4178 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4179 | HAL_TXQ_TXERRINT_ENABLE 4180 | HAL_TXQ_TXDESCINT_ENABLE 4181 | HAL_TXQ_TXURNINT_ENABLE 4182 | HAL_TXQ_TXEOLINT_ENABLE 4183 ; 4184 qi.tqi_aifs = wmep->wmep_aifsn; 4185 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4186 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 4187 qi.tqi_readyTime = 0; 4188 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4189 #ifdef IEEE80211_SUPPORT_TDMA 4190 } 4191 #endif 4192 4193 DPRINTF(sc, ATH_DEBUG_RESET, 4194 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 4195 __func__, txq->axq_qnum, qi.tqi_qflags, 4196 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4197 4198 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4199 if_printf(ifp, "unable to update hardware queue " 4200 "parameters for %s traffic!\n", 4201 ieee80211_wme_acnames[ac]); 4202 return 0; 4203 } else { 4204 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4205 return 1; 4206 } 4207 #undef ATH_TXOP_TO_US 4208 #undef ATH_EXPONENT_TO_VALUE 4209 } 4210 4211 /* 4212 * Callback from the 802.11 layer to update WME parameters. 4213 */ 4214 int 4215 ath_wme_update(struct ieee80211com *ic) 4216 { 4217 struct ath_softc *sc = ic->ic_ifp->if_softc; 4218 4219 return !ath_txq_update(sc, WME_AC_BE) || 4220 !ath_txq_update(sc, WME_AC_BK) || 4221 !ath_txq_update(sc, WME_AC_VI) || 4222 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4223 } 4224 4225 /* 4226 * Reclaim resources for a setup queue. 4227 */ 4228 static void 4229 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4230 { 4231 4232 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4233 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4234 ATH_TXQ_LOCK_DESTROY(txq); 4235 } 4236 4237 /* 4238 * Reclaim all tx queue resources. 4239 */ 4240 static void 4241 ath_tx_cleanup(struct ath_softc *sc) 4242 { 4243 int i; 4244 4245 ATH_TXBUF_LOCK_DESTROY(sc); 4246 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4247 if (ATH_TXQ_SETUP(sc, i)) 4248 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4249 } 4250 4251 /* 4252 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4253 * using the current rates in sc_rixmap. 4254 */ 4255 int 4256 ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4257 { 4258 int rix = sc->sc_rixmap[rate]; 4259 /* NB: return lowest rix for invalid rate */ 4260 return (rix == 0xff ? 0 : rix); 4261 } 4262 4263 static void 4264 ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 4265 struct ath_buf *bf) 4266 { 4267 struct ieee80211_node *ni = bf->bf_node; 4268 struct ifnet *ifp = sc->sc_ifp; 4269 struct ieee80211com *ic = ifp->if_l2com; 4270 int sr, lr, pri; 4271 4272 if (ts->ts_status == 0) { 4273 u_int8_t txant = ts->ts_antenna; 4274 sc->sc_stats.ast_ant_tx[txant]++; 4275 sc->sc_ant_tx[txant]++; 4276 if (ts->ts_finaltsi != 0) 4277 sc->sc_stats.ast_tx_altrate++; 4278 pri = M_WME_GETAC(bf->bf_m); 4279 if (pri >= WME_AC_VO) 4280 ic->ic_wme.wme_hipri_traffic++; 4281 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 4282 ni->ni_inact = ni->ni_inact_reload; 4283 } else { 4284 if (ts->ts_status & HAL_TXERR_XRETRY) 4285 sc->sc_stats.ast_tx_xretries++; 4286 if (ts->ts_status & HAL_TXERR_FIFO) 4287 sc->sc_stats.ast_tx_fifoerr++; 4288 if (ts->ts_status & HAL_TXERR_FILT) 4289 sc->sc_stats.ast_tx_filtered++; 4290 if (ts->ts_status & HAL_TXERR_XTXOP) 4291 sc->sc_stats.ast_tx_xtxop++; 4292 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4293 sc->sc_stats.ast_tx_timerexpired++; 4294 4295 if (bf->bf_m->m_flags & M_FF) 4296 sc->sc_stats.ast_ff_txerr++; 4297 } 4298 /* XXX when is this valid? */ 4299 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR) 4300 sc->sc_stats.ast_tx_desccfgerr++; 4301 /* 4302 * This can be valid for successful frame transmission! 4303 * If there's a TX FIFO underrun during aggregate transmission, 4304 * the MAC will pad the rest of the aggregate with delimiters. 4305 * If a BA is returned, the frame is marked as "OK" and it's up 4306 * to the TX completion code to notice which frames weren't 4307 * successfully transmitted. 4308 */ 4309 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN) 4310 sc->sc_stats.ast_tx_data_underrun++; 4311 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN) 4312 sc->sc_stats.ast_tx_delim_underrun++; 4313 4314 sr = ts->ts_shortretry; 4315 lr = ts->ts_longretry; 4316 sc->sc_stats.ast_tx_shortretry += sr; 4317 sc->sc_stats.ast_tx_longretry += lr; 4318 4319 } 4320 4321 /* 4322 * The default completion. If fail is 1, this means 4323 * "please don't retry the frame, and just return -1 status 4324 * to the net80211 stack. 4325 */ 4326 void 4327 ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4328 { 4329 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4330 int st; 4331 4332 if (fail == 1) 4333 st = -1; 4334 else 4335 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 4336 ts->ts_status : HAL_TXERR_XRETRY; 4337 4338 #if 0 4339 if (bf->bf_state.bfs_dobaw) 4340 device_printf(sc->sc_dev, 4341 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 4342 __func__, 4343 bf, 4344 SEQNO(bf->bf_state.bfs_seqno)); 4345 #endif 4346 if (bf->bf_next != NULL) 4347 device_printf(sc->sc_dev, 4348 "%s: bf %p: seqno %d: bf_next not NULL!\n", 4349 __func__, 4350 bf, 4351 SEQNO(bf->bf_state.bfs_seqno)); 4352 4353 /* 4354 * Check if the node software queue is empty; if so 4355 * then clear the TIM. 4356 * 4357 * This needs to be done before the buffer is freed as 4358 * otherwise the node reference will have been released 4359 * and the node may not actually exist any longer. 4360 * 4361 * XXX I don't like this belonging here, but it's cleaner 4362 * to do it here right now then all the other places 4363 * where ath_tx_default_comp() is called. 4364 * 4365 * XXX TODO: during drain, ensure that the callback is 4366 * being called so we get a chance to update the TIM. 4367 */ 4368 if (bf->bf_node) { 4369 ATH_TX_LOCK(sc); 4370 ath_tx_update_tim(sc, bf->bf_node, 0); 4371 ATH_TX_UNLOCK(sc); 4372 } 4373 4374 /* 4375 * Do any tx complete callback. Note this must 4376 * be done before releasing the node reference. 4377 * This will free the mbuf, release the net80211 4378 * node and recycle the ath_buf. 4379 */ 4380 ath_tx_freebuf(sc, bf, st); 4381 } 4382 4383 /* 4384 * Update rate control with the given completion status. 4385 */ 4386 void 4387 ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4388 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4389 int nframes, int nbad) 4390 { 4391 struct ath_node *an; 4392 4393 /* Only for unicast frames */ 4394 if (ni == NULL) 4395 return; 4396 4397 an = ATH_NODE(ni); 4398 ATH_NODE_UNLOCK_ASSERT(an); 4399 4400 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4401 ATH_NODE_LOCK(an); 4402 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4403 ATH_NODE_UNLOCK(an); 4404 } 4405 } 4406 4407 /* 4408 * Process the completion of the given buffer. 4409 * 4410 * This calls the rate control update and then the buffer completion. 4411 * This will either free the buffer or requeue it. In any case, the 4412 * bf pointer should be treated as invalid after this function is called. 4413 */ 4414 void 4415 ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, 4416 struct ath_tx_status *ts, struct ath_buf *bf) 4417 { 4418 struct ieee80211_node *ni = bf->bf_node; 4419 struct ath_node *an = NULL; 4420 4421 ATH_TX_UNLOCK_ASSERT(sc); 4422 ATH_TXQ_UNLOCK_ASSERT(txq); 4423 4424 /* If unicast frame, update general statistics */ 4425 if (ni != NULL) { 4426 an = ATH_NODE(ni); 4427 /* update statistics */ 4428 ath_tx_update_stats(sc, ts, bf); 4429 } 4430 4431 /* 4432 * Call the completion handler. 4433 * The completion handler is responsible for 4434 * calling the rate control code. 4435 * 4436 * Frames with no completion handler get the 4437 * rate control code called here. 4438 */ 4439 if (bf->bf_comp == NULL) { 4440 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4441 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 4442 /* 4443 * XXX assume this isn't an aggregate 4444 * frame. 4445 */ 4446 ath_tx_update_ratectrl(sc, ni, 4447 bf->bf_state.bfs_rc, ts, 4448 bf->bf_state.bfs_pktlen, 1, 4449 (ts->ts_status == 0 ? 0 : 1)); 4450 } 4451 ath_tx_default_comp(sc, bf, 0); 4452 } else 4453 bf->bf_comp(sc, bf, 0); 4454 } 4455 4456 4457 4458 /* 4459 * Process completed xmit descriptors from the specified queue. 4460 * Kick the packet scheduler if needed. This can occur from this 4461 * particular task. 4462 */ 4463 static int 4464 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4465 { 4466 struct ath_hal *ah = sc->sc_ah; 4467 struct ath_buf *bf; 4468 struct ath_desc *ds; 4469 struct ath_tx_status *ts; 4470 struct ieee80211_node *ni; 4471 #ifdef IEEE80211_SUPPORT_SUPERG 4472 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 4473 #endif /* IEEE80211_SUPPORT_SUPERG */ 4474 int nacked; 4475 HAL_STATUS status; 4476 4477 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4478 __func__, txq->axq_qnum, 4479 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4480 txq->axq_link); 4481 4482 ATH_KTR(sc, ATH_KTR_TXCOMP, 4, 4483 "ath_tx_processq: txq=%u head %p link %p depth %p", 4484 txq->axq_qnum, 4485 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4486 txq->axq_link, 4487 txq->axq_depth); 4488 4489 nacked = 0; 4490 for (;;) { 4491 ATH_TXQ_LOCK(txq); 4492 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4493 bf = TAILQ_FIRST(&txq->axq_q); 4494 if (bf == NULL) { 4495 ATH_TXQ_UNLOCK(txq); 4496 break; 4497 } 4498 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 4499 ts = &bf->bf_status.ds_txstat; 4500 4501 status = ath_hal_txprocdesc(ah, ds, ts); 4502 #ifdef ATH_DEBUG 4503 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4504 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4505 status == HAL_OK); 4506 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) 4507 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4508 status == HAL_OK); 4509 #endif 4510 #ifdef ATH_DEBUG_ALQ 4511 if (if_ath_alq_checkdebug(&sc->sc_alq, 4512 ATH_ALQ_EDMA_TXSTATUS)) { 4513 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 4514 sc->sc_tx_statuslen, 4515 (char *) ds); 4516 } 4517 #endif 4518 4519 if (status == HAL_EINPROGRESS) { 4520 ATH_KTR(sc, ATH_KTR_TXCOMP, 3, 4521 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", 4522 txq->axq_qnum, bf, ds); 4523 ATH_TXQ_UNLOCK(txq); 4524 break; 4525 } 4526 ATH_TXQ_REMOVE(txq, bf, bf_list); 4527 4528 /* 4529 * Sanity check. 4530 */ 4531 if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) { 4532 device_printf(sc->sc_dev, 4533 "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n", 4534 __func__, 4535 txq->axq_qnum, 4536 bf, 4537 bf->bf_state.bfs_tx_queue); 4538 } 4539 if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) { 4540 device_printf(sc->sc_dev, 4541 "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n", 4542 __func__, 4543 txq->axq_qnum, 4544 bf->bf_last, 4545 bf->bf_last->bf_state.bfs_tx_queue); 4546 } 4547 4548 #if 0 4549 if (txq->axq_depth > 0) { 4550 /* 4551 * More frames follow. Mark the buffer busy 4552 * so it's not re-used while the hardware may 4553 * still re-read the link field in the descriptor. 4554 * 4555 * Use the last buffer in an aggregate as that 4556 * is where the hardware may be - intermediate 4557 * descriptors won't be "busy". 4558 */ 4559 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4560 } else 4561 txq->axq_link = NULL; 4562 #else 4563 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4564 #endif 4565 if (bf->bf_state.bfs_aggr) 4566 txq->axq_aggr_depth--; 4567 4568 ni = bf->bf_node; 4569 4570 ATH_KTR(sc, ATH_KTR_TXCOMP, 5, 4571 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", 4572 txq->axq_qnum, bf, ds, ni, ts->ts_status); 4573 /* 4574 * If unicast frame was ack'd update RSSI, 4575 * including the last rx time used to 4576 * workaround phantom bmiss interrupts. 4577 */ 4578 if (ni != NULL && ts->ts_status == 0 && 4579 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 4580 nacked++; 4581 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4582 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4583 ts->ts_rssi); 4584 } 4585 ATH_TXQ_UNLOCK(txq); 4586 4587 /* 4588 * Update statistics and call completion 4589 */ 4590 ath_tx_process_buf_completion(sc, txq, ts, bf); 4591 4592 /* XXX at this point, bf and ni may be totally invalid */ 4593 } 4594 #ifdef IEEE80211_SUPPORT_SUPERG 4595 /* 4596 * Flush fast-frame staging queue when traffic slows. 4597 */ 4598 if (txq->axq_depth <= 1) 4599 ieee80211_ff_flush(ic, txq->axq_ac); 4600 #endif 4601 4602 /* Kick the software TXQ scheduler */ 4603 if (dosched) { 4604 ATH_TX_LOCK(sc); 4605 ath_txq_sched(sc, txq); 4606 ATH_TX_UNLOCK(sc); 4607 } 4608 4609 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4610 "ath_tx_processq: txq=%u: done", 4611 txq->axq_qnum); 4612 4613 return nacked; 4614 } 4615 4616 #define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4617 4618 /* 4619 * Deferred processing of transmit interrupt; special-cased 4620 * for a single hardware transmit queue (e.g. 5210 and 5211). 4621 */ 4622 static void 4623 ath_tx_proc_q0(void *arg, int npending) 4624 { 4625 struct ath_softc *sc = arg; 4626 #if 0 4627 struct ifnet *ifp = sc->sc_ifp; 4628 #endif 4629 uint32_t txqs; 4630 4631 wlan_serialize_enter(); 4632 ATH_PCU_LOCK(sc); 4633 sc->sc_txproc_cnt++; 4634 txqs = sc->sc_txq_active; 4635 sc->sc_txq_active &= ~txqs; 4636 ATH_PCU_UNLOCK(sc); 4637 4638 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4639 4640 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4641 "ath_tx_proc_q0: txqs=0x%08x", txqs); 4642 4643 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 4644 /* XXX why is lastrx updated in tx code? */ 4645 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4646 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4647 ath_tx_processq(sc, sc->sc_cabq, 1); 4648 #if 0 4649 /* remove, DragonFly uses OACTIVE to control if_start calls */ 4650 IF_LOCK(&ifp->if_snd); 4651 ifq_clr_oactive(&ifp->if_snd); 4652 IF_UNLOCK(&ifp->if_snd); 4653 #endif 4654 sc->sc_wd_timer = 0; 4655 4656 if (sc->sc_softled) 4657 ath_led_event(sc, sc->sc_txrix); 4658 4659 ATH_PCU_LOCK(sc); 4660 sc->sc_txproc_cnt--; 4661 ATH_PCU_UNLOCK(sc); 4662 4663 ath_power_restore_power_state(sc); 4664 4665 ath_tx_kick(sc); 4666 wlan_serialize_exit(); 4667 } 4668 4669 /* 4670 * Deferred processing of transmit interrupt; special-cased 4671 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4672 */ 4673 static void 4674 ath_tx_proc_q0123(void *arg, int npending) 4675 { 4676 struct ath_softc *sc = arg; 4677 #if 0 4678 struct ifnet *ifp = sc->sc_ifp; 4679 #endif 4680 int nacked; 4681 uint32_t txqs; 4682 4683 wlan_serialize_enter(); 4684 ATH_PCU_LOCK(sc); 4685 sc->sc_txproc_cnt++; 4686 txqs = sc->sc_txq_active; 4687 sc->sc_txq_active &= ~txqs; 4688 ATH_PCU_UNLOCK(sc); 4689 4690 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4691 4692 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4693 "ath_tx_proc_q0123: txqs=0x%08x", txqs); 4694 4695 /* 4696 * Process each active queue. 4697 */ 4698 nacked = 0; 4699 if (TXQACTIVE(txqs, 0)) 4700 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 4701 if (TXQACTIVE(txqs, 1)) 4702 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 4703 if (TXQACTIVE(txqs, 2)) 4704 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 4705 if (TXQACTIVE(txqs, 3)) 4706 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 4707 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4708 ath_tx_processq(sc, sc->sc_cabq, 1); 4709 if (nacked) 4710 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4711 4712 #if 0 4713 /* remove, DragonFly uses OACTIVE to control if_start calls */ 4714 IF_LOCK(&ifp->if_snd); 4715 ifq_clr_oactive(&ifp->if_snd); 4716 IF_UNLOCK(&ifp->if_snd); 4717 #endif 4718 sc->sc_wd_timer = 0; 4719 4720 if (sc->sc_softled) 4721 ath_led_event(sc, sc->sc_txrix); 4722 4723 ATH_PCU_LOCK(sc); 4724 sc->sc_txproc_cnt--; 4725 ATH_PCU_UNLOCK(sc); 4726 4727 ath_power_restore_power_state(sc); 4728 4729 ath_tx_kick(sc); 4730 wlan_serialize_exit(); 4731 } 4732 4733 /* 4734 * Deferred processing of transmit interrupt. 4735 */ 4736 static void 4737 ath_tx_proc(void *arg, int npending) 4738 { 4739 struct ath_softc *sc = arg; 4740 #if 0 4741 struct ifnet *ifp = sc->sc_ifp; 4742 #endif 4743 int i, nacked; 4744 uint32_t txqs; 4745 4746 wlan_serialize_enter(); 4747 ATH_PCU_LOCK(sc); 4748 sc->sc_txproc_cnt++; 4749 txqs = sc->sc_txq_active; 4750 sc->sc_txq_active &= ~txqs; 4751 ATH_PCU_UNLOCK(sc); 4752 4753 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4754 4755 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); 4756 4757 /* 4758 * Process each active queue. 4759 */ 4760 nacked = 0; 4761 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4762 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 4763 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 4764 if (nacked) 4765 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4766 4767 #if 0 4768 /* remove, DragonFly uses OACTIVE to control if_start calls */ 4769 /* XXX check this inside of IF_LOCK? */ 4770 IF_LOCK(&ifp->if_snd); 4771 ifq_clr_oactive(&ifp->if_snd); 4772 IF_UNLOCK(&ifp->if_snd); 4773 #endif 4774 sc->sc_wd_timer = 0; 4775 4776 if (sc->sc_softled) 4777 ath_led_event(sc, sc->sc_txrix); 4778 4779 ATH_PCU_LOCK(sc); 4780 sc->sc_txproc_cnt--; 4781 ATH_PCU_UNLOCK(sc); 4782 4783 ath_power_restore_power_state(sc); 4784 4785 ath_tx_kick(sc); 4786 wlan_serialize_exit(); 4787 } 4788 #undef TXQACTIVE 4789 4790 /* 4791 * Deferred processing of TXQ rescheduling. 4792 */ 4793 static void 4794 ath_txq_sched_tasklet(void *arg, int npending) 4795 { 4796 struct ath_softc *sc = arg; 4797 int i; 4798 4799 wlan_serialize_enter(); 4800 4801 /* XXX is skipping ok? */ 4802 ATH_PCU_LOCK(sc); 4803 #if 0 4804 if (sc->sc_inreset_cnt > 0) { 4805 device_printf(sc->sc_dev, 4806 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4807 ATH_PCU_UNLOCK(sc); 4808 wlan_serialize_exit(); 4809 return; 4810 } 4811 #endif 4812 sc->sc_txproc_cnt++; 4813 ATH_PCU_UNLOCK(sc); 4814 4815 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4816 4817 ATH_TX_LOCK(sc); 4818 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4819 if (ATH_TXQ_SETUP(sc, i)) { 4820 ath_txq_sched(sc, &sc->sc_txq[i]); 4821 } 4822 } 4823 ATH_TX_UNLOCK(sc); 4824 4825 ath_power_restore_power_state(sc); 4826 4827 ATH_PCU_LOCK(sc); 4828 sc->sc_txproc_cnt--; 4829 ATH_PCU_UNLOCK(sc); 4830 wlan_serialize_exit(); 4831 } 4832 4833 void 4834 ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 4835 { 4836 4837 ATH_TXBUF_LOCK_ASSERT(sc); 4838 4839 if (bf->bf_flags & ATH_BUF_MGMT) 4840 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 4841 else { 4842 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4843 sc->sc_txbuf_cnt++; 4844 if (sc->sc_txbuf_cnt > ath_txbuf) { 4845 device_printf(sc->sc_dev, 4846 "%s: sc_txbuf_cnt > %d?\n", 4847 __func__, 4848 ath_txbuf); 4849 sc->sc_txbuf_cnt = ath_txbuf; 4850 } 4851 } 4852 } 4853 4854 void 4855 ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 4856 { 4857 4858 ATH_TXBUF_LOCK_ASSERT(sc); 4859 4860 if (bf->bf_flags & ATH_BUF_MGMT) 4861 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 4862 else { 4863 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 4864 sc->sc_txbuf_cnt++; 4865 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 4866 device_printf(sc->sc_dev, 4867 "%s: sc_txbuf_cnt > %d?\n", 4868 __func__, 4869 ATH_TXBUF); 4870 sc->sc_txbuf_cnt = ATH_TXBUF; 4871 } 4872 } 4873 } 4874 4875 /* 4876 * Free the holding buffer if it exists 4877 */ 4878 void 4879 ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq) 4880 { 4881 ATH_TXBUF_UNLOCK_ASSERT(sc); 4882 ATH_TXQ_LOCK_ASSERT(txq); 4883 4884 if (txq->axq_holdingbf == NULL) 4885 return; 4886 4887 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY; 4888 4889 ATH_TXBUF_LOCK(sc); 4890 ath_returnbuf_tail(sc, txq->axq_holdingbf); 4891 ATH_TXBUF_UNLOCK(sc); 4892 4893 txq->axq_holdingbf = NULL; 4894 } 4895 4896 /* 4897 * Add this buffer to the holding queue, freeing the previous 4898 * one if it exists. 4899 */ 4900 static void 4901 ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf) 4902 { 4903 struct ath_txq *txq; 4904 4905 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4906 4907 ATH_TXBUF_UNLOCK_ASSERT(sc); 4908 ATH_TXQ_LOCK_ASSERT(txq); 4909 4910 /* XXX assert ATH_BUF_BUSY is set */ 4911 4912 /* XXX assert the tx queue is under the max number */ 4913 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) { 4914 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n", 4915 __func__, 4916 bf, 4917 bf->bf_state.bfs_tx_queue); 4918 bf->bf_flags &= ~ATH_BUF_BUSY; 4919 ath_returnbuf_tail(sc, bf); 4920 return; 4921 } 4922 ath_txq_freeholdingbuf(sc, txq); 4923 txq->axq_holdingbf = bf; 4924 } 4925 4926 /* 4927 * Return a buffer to the pool and update the 'busy' flag on the 4928 * previous 'tail' entry. 4929 * 4930 * This _must_ only be called when the buffer is involved in a completed 4931 * TX. The logic is that if it was part of an active TX, the previous 4932 * buffer on the list is now not involved in a halted TX DMA queue, waiting 4933 * for restart (eg for TDMA.) 4934 * 4935 * The caller must free the mbuf and recycle the node reference. 4936 * 4937 * XXX This method of handling busy / holding buffers is insanely stupid. 4938 * It requires bf_state.bfs_tx_queue to be correctly assigned. It would 4939 * be much nicer if buffers in the processq() methods would instead be 4940 * always completed there (pushed onto a txq or ath_bufhead) so we knew 4941 * exactly what hardware queue they came from in the first place. 4942 */ 4943 void 4944 ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 4945 { 4946 struct ath_txq *txq; 4947 4948 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4949 4950 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 4951 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 4952 4953 /* 4954 * If this buffer is busy, push it onto the holding queue. 4955 */ 4956 if (bf->bf_flags & ATH_BUF_BUSY) { 4957 ATH_TXQ_LOCK(txq); 4958 ath_txq_addholdingbuf(sc, bf); 4959 ATH_TXQ_UNLOCK(txq); 4960 return; 4961 } 4962 4963 /* 4964 * Not a busy buffer, so free normally 4965 */ 4966 ATH_TXBUF_LOCK(sc); 4967 ath_returnbuf_tail(sc, bf); 4968 ATH_TXBUF_UNLOCK(sc); 4969 } 4970 4971 /* 4972 * This is currently used by ath_tx_draintxq() and 4973 * ath_tx_tid_free_pkts(). 4974 * 4975 * It recycles a single ath_buf. 4976 */ 4977 void 4978 ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 4979 { 4980 struct ieee80211_node *ni = bf->bf_node; 4981 struct mbuf *m0 = bf->bf_m; 4982 4983 /* 4984 * Make sure that we only sync/unload if there's an mbuf. 4985 * If not (eg we cloned a buffer), the unload will have already 4986 * occured. 4987 */ 4988 if (bf->bf_m != NULL) { 4989 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4990 BUS_DMASYNC_POSTWRITE); 4991 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4992 } 4993 4994 bf->bf_node = NULL; 4995 bf->bf_m = NULL; 4996 4997 /* Free the buffer, it's not needed any longer */ 4998 ath_freebuf(sc, bf); 4999 5000 /* Pass the buffer back to net80211 - completing it */ 5001 ieee80211_tx_complete(ni, m0, status); 5002 } 5003 5004 static struct ath_buf * 5005 ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq) 5006 { 5007 struct ath_buf *bf; 5008 5009 ATH_TXQ_LOCK_ASSERT(txq); 5010 5011 /* 5012 * Drain the FIFO queue first, then if it's 5013 * empty, move to the normal frame queue. 5014 */ 5015 bf = TAILQ_FIRST(&txq->fifo.axq_q); 5016 if (bf != NULL) { 5017 /* 5018 * Is it the last buffer in this set? 5019 * Decrement the FIFO counter. 5020 */ 5021 if (bf->bf_flags & ATH_BUF_FIFOEND) { 5022 if (txq->axq_fifo_depth == 0) { 5023 device_printf(sc->sc_dev, 5024 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n", 5025 __func__, 5026 txq->axq_qnum, 5027 txq->fifo.axq_depth); 5028 } else 5029 txq->axq_fifo_depth--; 5030 } 5031 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 5032 return (bf); 5033 } 5034 5035 /* 5036 * Debugging! 5037 */ 5038 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) { 5039 device_printf(sc->sc_dev, 5040 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n", 5041 __func__, 5042 txq->axq_qnum, 5043 txq->axq_fifo_depth, 5044 txq->fifo.axq_depth); 5045 } 5046 5047 /* 5048 * Now drain the pending queue. 5049 */ 5050 bf = TAILQ_FIRST(&txq->axq_q); 5051 if (bf == NULL) { 5052 txq->axq_link = NULL; 5053 return (NULL); 5054 } 5055 ATH_TXQ_REMOVE(txq, bf, bf_list); 5056 return (bf); 5057 } 5058 5059 void 5060 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 5061 { 5062 #ifdef ATH_DEBUG 5063 struct ath_hal *ah = sc->sc_ah; 5064 #endif 5065 struct ath_buf *bf; 5066 u_int ix; 5067 5068 /* 5069 * NB: this assumes output has been stopped and 5070 * we do not need to block ath_tx_proc 5071 */ 5072 for (ix = 0;; ix++) { 5073 ATH_TXQ_LOCK(txq); 5074 bf = ath_tx_draintxq_get_one(sc, txq); 5075 if (bf == NULL) { 5076 ATH_TXQ_UNLOCK(txq); 5077 break; 5078 } 5079 if (bf->bf_state.bfs_aggr) 5080 txq->axq_aggr_depth--; 5081 #ifdef ATH_DEBUG 5082 if (sc->sc_debug & ATH_DEBUG_RESET) { 5083 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 5084 int status = 0; 5085 5086 /* 5087 * EDMA operation has a TX completion FIFO 5088 * separate from the TX descriptor, so this 5089 * method of checking the "completion" status 5090 * is wrong. 5091 */ 5092 if (! sc->sc_isedma) { 5093 status = (ath_hal_txprocdesc(ah, 5094 bf->bf_lastds, 5095 &bf->bf_status.ds_txstat) == HAL_OK); 5096 } 5097 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); 5098 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 5099 bf->bf_m->m_len, 0, -1); 5100 } 5101 #endif /* ATH_DEBUG */ 5102 /* 5103 * Since we're now doing magic in the completion 5104 * functions, we -must- call it for aggregation 5105 * destinations or BAW tracking will get upset. 5106 */ 5107 /* 5108 * Clear ATH_BUF_BUSY; the completion handler 5109 * will free the buffer. 5110 */ 5111 ATH_TXQ_UNLOCK(txq); 5112 bf->bf_flags &= ~ATH_BUF_BUSY; 5113 if (bf->bf_comp) 5114 bf->bf_comp(sc, bf, 1); 5115 else 5116 ath_tx_default_comp(sc, bf, 1); 5117 } 5118 5119 /* 5120 * Free the holding buffer if it exists 5121 */ 5122 ATH_TXQ_LOCK(txq); 5123 ath_txq_freeholdingbuf(sc, txq); 5124 ATH_TXQ_UNLOCK(txq); 5125 5126 /* 5127 * Drain software queued frames which are on 5128 * active TIDs. 5129 */ 5130 ath_tx_txq_drain(sc, txq); 5131 } 5132 5133 static void 5134 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 5135 { 5136 struct ath_hal *ah = sc->sc_ah; 5137 5138 ATH_TXQ_LOCK_ASSERT(txq); 5139 5140 DPRINTF(sc, ATH_DEBUG_RESET, 5141 "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, " 5142 "link %p, holdingbf=%p\n", 5143 __func__, 5144 txq->axq_qnum, 5145 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 5146 (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)), 5147 (int) ath_hal_numtxpending(ah, txq->axq_qnum), 5148 txq->axq_flags, 5149 txq->axq_link, 5150 txq->axq_holdingbf); 5151 5152 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 5153 /* We've stopped TX DMA, so mark this as stopped. */ 5154 txq->axq_flags &= ~ATH_TXQ_PUTRUNNING; 5155 5156 #ifdef ATH_DEBUG 5157 if ((sc->sc_debug & ATH_DEBUG_RESET) 5158 && (txq->axq_holdingbf != NULL)) { 5159 ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0); 5160 } 5161 #endif 5162 } 5163 5164 int 5165 ath_stoptxdma(struct ath_softc *sc) 5166 { 5167 struct ath_hal *ah = sc->sc_ah; 5168 int i; 5169 5170 /* XXX return value */ 5171 if (sc->sc_invalid) 5172 return 0; 5173 5174 if (!sc->sc_invalid) { 5175 /* don't touch the hardware if marked invalid */ 5176 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 5177 __func__, sc->sc_bhalq, 5178 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 5179 NULL); 5180 5181 /* stop the beacon queue */ 5182 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 5183 5184 /* Stop the data queues */ 5185 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5186 if (ATH_TXQ_SETUP(sc, i)) { 5187 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5188 ath_tx_stopdma(sc, &sc->sc_txq[i]); 5189 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5190 } 5191 } 5192 } 5193 5194 return 1; 5195 } 5196 5197 #ifdef ATH_DEBUG 5198 void 5199 ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq) 5200 { 5201 struct ath_hal *ah = sc->sc_ah; 5202 struct ath_buf *bf; 5203 int i = 0; 5204 5205 if (! (sc->sc_debug & ATH_DEBUG_RESET)) 5206 return; 5207 5208 device_printf(sc->sc_dev, "%s: Q%d: begin\n", 5209 __func__, txq->axq_qnum); 5210 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 5211 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 5212 ath_hal_txprocdesc(ah, bf->bf_lastds, 5213 &bf->bf_status.ds_txstat) == HAL_OK); 5214 i++; 5215 } 5216 device_printf(sc->sc_dev, "%s: Q%d: end\n", 5217 __func__, txq->axq_qnum); 5218 } 5219 #endif /* ATH_DEBUG */ 5220 5221 /* 5222 * Drain the transmit queues and reclaim resources. 5223 */ 5224 void 5225 ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 5226 { 5227 struct ath_hal *ah = sc->sc_ah; 5228 #ifdef ATH_DEBUG 5229 struct ifnet *ifp = sc->sc_ifp; 5230 #endif 5231 int i; 5232 struct ath_buf *bf_last; 5233 5234 (void) ath_stoptxdma(sc); 5235 5236 /* 5237 * Dump the queue contents 5238 */ 5239 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5240 /* 5241 * XXX TODO: should we just handle the completed TX frames 5242 * here, whether or not the reset is a full one or not? 5243 */ 5244 if (ATH_TXQ_SETUP(sc, i)) { 5245 #ifdef ATH_DEBUG 5246 if (sc->sc_debug & ATH_DEBUG_RESET) 5247 ath_tx_dump(sc, &sc->sc_txq[i]); 5248 #endif /* ATH_DEBUG */ 5249 if (reset_type == ATH_RESET_NOLOSS) { 5250 ath_tx_processq(sc, &sc->sc_txq[i], 0); 5251 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5252 /* 5253 * Free the holding buffer; DMA is now 5254 * stopped. 5255 */ 5256 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 5257 /* 5258 * Setup the link pointer to be the 5259 * _last_ buffer/descriptor in the list. 5260 * If there's nothing in the list, set it 5261 * to NULL. 5262 */ 5263 bf_last = ATH_TXQ_LAST(&sc->sc_txq[i], 5264 axq_q_s); 5265 if (bf_last != NULL) { 5266 ath_hal_gettxdesclinkptr(ah, 5267 bf_last->bf_lastds, 5268 &sc->sc_txq[i].axq_link); 5269 } else { 5270 sc->sc_txq[i].axq_link = NULL; 5271 } 5272 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5273 } else 5274 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5275 } 5276 } 5277 #ifdef ATH_DEBUG 5278 if (sc->sc_debug & ATH_DEBUG_RESET) { 5279 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 5280 if (bf != NULL && bf->bf_m != NULL) { 5281 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 5282 ath_hal_txprocdesc(ah, bf->bf_lastds, 5283 &bf->bf_status.ds_txstat) == HAL_OK); 5284 ieee80211_dump_pkt(ifp->if_l2com, 5285 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5286 0, -1); 5287 } 5288 } 5289 #endif /* ATH_DEBUG */ 5290 #if 0 5291 /* remove, DragonFly uses OACTIVE to control if_start calls */ 5292 IF_LOCK(&ifp->if_snd); 5293 ifq_clr_oactive(&ifp->if_snd); 5294 IF_UNLOCK(&ifp->if_snd); 5295 #endif 5296 sc->sc_wd_timer = 0; 5297 } 5298 5299 /* 5300 * Update internal state after a channel change. 5301 */ 5302 static void 5303 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5304 { 5305 enum ieee80211_phymode mode; 5306 5307 /* 5308 * Change channels and update the h/w rate map 5309 * if we're switching; e.g. 11a to 11b/g. 5310 */ 5311 mode = ieee80211_chan2mode(chan); 5312 if (mode != sc->sc_curmode) 5313 ath_setcurmode(sc, mode); 5314 sc->sc_curchan = chan; 5315 } 5316 5317 /* 5318 * Set/change channels. If the channel is really being changed, 5319 * it's done by resetting the chip. To accomplish this we must 5320 * first cleanup any pending DMA, then restart stuff after a la 5321 * ath_init. 5322 */ 5323 static int 5324 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5325 { 5326 struct ifnet *ifp = sc->sc_ifp; 5327 struct ieee80211com *ic = ifp->if_l2com; 5328 struct ath_hal *ah = sc->sc_ah; 5329 int ret = 0; 5330 5331 /* Treat this as an interface reset */ 5332 ATH_PCU_UNLOCK_ASSERT(sc); 5333 ATH_UNLOCK_ASSERT(sc); 5334 5335 /* (Try to) stop TX/RX from occuring */ 5336 taskqueue_block(sc->sc_tq); 5337 5338 ATH_PCU_LOCK(sc); 5339 5340 /* Stop new RX/TX/interrupt completion */ 5341 if (ath_reset_grablock(sc, 1) == 0) { 5342 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5343 __func__); 5344 } 5345 5346 ath_hal_intrset(ah, 0); 5347 5348 /* Stop pending RX/TX completion */ 5349 ath_txrx_stop_locked(sc); 5350 5351 ATH_PCU_UNLOCK(sc); 5352 5353 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 5354 __func__, ieee80211_chan2ieee(ic, chan), 5355 chan->ic_freq, chan->ic_flags); 5356 if (chan != sc->sc_curchan) { 5357 HAL_STATUS status; 5358 /* 5359 * To switch channels clear any pending DMA operations; 5360 * wait long enough for the RX fifo to drain, reset the 5361 * hardware at the new frequency, and then re-enable 5362 * the relevant bits of the h/w. 5363 */ 5364 #if 0 5365 ath_hal_intrset(ah, 0); /* disable interrupts */ 5366 #endif 5367 ath_stoprecv(sc, 1); /* turn off frame recv */ 5368 /* 5369 * First, handle completed TX/RX frames. 5370 */ 5371 ath_rx_flush(sc); 5372 ath_draintxq(sc, ATH_RESET_NOLOSS); 5373 /* 5374 * Next, flush the non-scheduled frames. 5375 */ 5376 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 5377 5378 ath_update_chainmasks(sc, chan); 5379 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 5380 sc->sc_cur_rxchainmask); 5381 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 5382 if_printf(ifp, "%s: unable to reset " 5383 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 5384 __func__, ieee80211_chan2ieee(ic, chan), 5385 chan->ic_freq, chan->ic_flags, status); 5386 ret = EIO; 5387 goto finish; 5388 } 5389 sc->sc_diversity = ath_hal_getdiversity(ah); 5390 5391 /* Let DFS at it in case it's a DFS channel */ 5392 ath_dfs_radar_enable(sc, chan); 5393 5394 /* Let spectral at in case spectral is enabled */ 5395 ath_spectral_enable(sc, chan); 5396 5397 /* 5398 * Let bluetooth coexistence at in case it's needed for this 5399 * channel 5400 */ 5401 ath_btcoex_enable(sc, ic->ic_curchan); 5402 5403 /* 5404 * If we're doing TDMA, enforce the TXOP limitation for chips 5405 * that support it. 5406 */ 5407 if (sc->sc_hasenforcetxop && sc->sc_tdma) 5408 ath_hal_setenforcetxop(sc->sc_ah, 1); 5409 else 5410 ath_hal_setenforcetxop(sc->sc_ah, 0); 5411 5412 /* 5413 * Re-enable rx framework. 5414 */ 5415 if (ath_startrecv(sc) != 0) { 5416 if_printf(ifp, "%s: unable to restart recv logic\n", 5417 __func__); 5418 ret = EIO; 5419 goto finish; 5420 } 5421 5422 /* 5423 * Change channels and update the h/w rate map 5424 * if we're switching; e.g. 11a to 11b/g. 5425 */ 5426 ath_chan_change(sc, chan); 5427 5428 /* 5429 * Reset clears the beacon timers; reset them 5430 * here if needed. 5431 */ 5432 if (sc->sc_beacons) { /* restart beacons */ 5433 #ifdef IEEE80211_SUPPORT_TDMA 5434 if (sc->sc_tdma) 5435 ath_tdma_config(sc, NULL); 5436 else 5437 #endif 5438 ath_beacon_config(sc, NULL); 5439 } 5440 5441 /* 5442 * Re-enable interrupts. 5443 */ 5444 #if 0 5445 ath_hal_intrset(ah, sc->sc_imask); 5446 #endif 5447 } 5448 5449 finish: 5450 ATH_PCU_LOCK(sc); 5451 sc->sc_inreset_cnt--; 5452 /* XXX only do this if sc_inreset_cnt == 0? */ 5453 ath_hal_intrset(ah, sc->sc_imask); 5454 ATH_PCU_UNLOCK(sc); 5455 5456 #if 0 5457 /* remove, DragonFly uses OACTIVE to control if_start calls */ 5458 IF_LOCK(&ifp->if_snd); 5459 ifq_clr_oactive(&ifp->if_snd); 5460 IF_UNLOCK(&ifp->if_snd); 5461 #endif 5462 ath_txrx_start(sc); 5463 /* XXX ath_start? */ 5464 5465 return ret; 5466 } 5467 5468 /* 5469 * Periodically recalibrate the PHY to account 5470 * for temperature/environment changes. 5471 */ 5472 static void 5473 ath_calibrate(void *arg) 5474 { 5475 struct ath_softc *sc = arg; 5476 struct ath_hal *ah = sc->sc_ah; 5477 struct ifnet *ifp = sc->sc_ifp; 5478 struct ieee80211com *ic = ifp->if_l2com; 5479 HAL_BOOL longCal, isCalDone = AH_TRUE; 5480 HAL_BOOL aniCal, shortCal = AH_FALSE; 5481 int nextcal; 5482 5483 /* 5484 * Force the hardware awake for ANI work. 5485 */ 5486 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5487 5488 /* Skip trying to do this if we're in reset */ 5489 if (sc->sc_inreset_cnt) 5490 goto restart; 5491 5492 wlan_serialize_enter(); 5493 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 5494 goto restart; 5495 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5496 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5497 if (sc->sc_doresetcal) 5498 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5499 5500 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5501 if (aniCal) { 5502 sc->sc_stats.ast_ani_cal++; 5503 sc->sc_lastani = ticks; 5504 ath_hal_ani_poll(ah, sc->sc_curchan); 5505 } 5506 5507 if (longCal) { 5508 sc->sc_stats.ast_per_cal++; 5509 sc->sc_lastlongcal = ticks; 5510 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5511 /* 5512 * Rfgain is out of bounds, reset the chip 5513 * to load new gain values. 5514 */ 5515 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5516 "%s: rfgain change\n", __func__); 5517 sc->sc_stats.ast_per_rfgain++; 5518 sc->sc_resetcal = 0; 5519 sc->sc_doresetcal = AH_TRUE; 5520 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5521 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5522 goto done; 5523 } 5524 /* 5525 * If this long cal is after an idle period, then 5526 * reset the data collection state so we start fresh. 5527 */ 5528 if (sc->sc_resetcal) { 5529 (void) ath_hal_calreset(ah, sc->sc_curchan); 5530 sc->sc_lastcalreset = ticks; 5531 sc->sc_lastshortcal = ticks; 5532 sc->sc_resetcal = 0; 5533 sc->sc_doresetcal = AH_TRUE; 5534 } 5535 } 5536 5537 /* Only call if we're doing a short/long cal, not for ANI calibration */ 5538 if (shortCal || longCal) { 5539 isCalDone = AH_FALSE; 5540 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 5541 if (longCal) { 5542 /* 5543 * Calibrate noise floor data again in case of change. 5544 */ 5545 ath_hal_process_noisefloor(ah); 5546 } 5547 } else { 5548 DPRINTF(sc, ATH_DEBUG_ANY, 5549 "%s: calibration of channel %u failed\n", 5550 __func__, sc->sc_curchan->ic_freq); 5551 sc->sc_stats.ast_per_calfail++; 5552 } 5553 if (shortCal) 5554 sc->sc_lastshortcal = ticks; 5555 } 5556 if (!isCalDone) { 5557 restart: 5558 /* 5559 * Use a shorter interval to potentially collect multiple 5560 * data samples required to complete calibration. Once 5561 * we're told the work is done we drop back to a longer 5562 * interval between requests. We're more aggressive doing 5563 * work when operating as an AP to improve operation right 5564 * after startup. 5565 */ 5566 sc->sc_lastshortcal = ticks; 5567 nextcal = ath_shortcalinterval*hz/1000; 5568 if (sc->sc_opmode != HAL_M_HOSTAP) 5569 nextcal *= 10; 5570 sc->sc_doresetcal = AH_TRUE; 5571 } else { 5572 /* nextcal should be the shortest time for next event */ 5573 nextcal = ath_longcalinterval*hz; 5574 if (sc->sc_lastcalreset == 0) 5575 sc->sc_lastcalreset = sc->sc_lastlongcal; 5576 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 5577 sc->sc_resetcal = 1; /* setup reset next trip */ 5578 sc->sc_doresetcal = AH_FALSE; 5579 } 5580 /* ANI calibration may occur more often than short/long/resetcal */ 5581 if (ath_anicalinterval > 0) 5582 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5583 5584 if (nextcal != 0) { 5585 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 5586 __func__, nextcal, isCalDone ? "" : "!"); 5587 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 5588 } else { 5589 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 5590 __func__); 5591 /* NB: don't rearm timer */ 5592 } 5593 done: 5594 /* 5595 * Restore power state now that we're done. 5596 */ 5597 ath_power_restore_power_state(sc); 5598 wlan_serialize_exit(); 5599 } 5600 5601 static void 5602 ath_scan_start(struct ieee80211com *ic) 5603 { 5604 struct ifnet *ifp = ic->ic_ifp; 5605 struct ath_softc *sc = ifp->if_softc; 5606 struct ath_hal *ah = sc->sc_ah; 5607 u_int32_t rfilt; 5608 5609 /* XXX calibration timer? */ 5610 5611 ATH_LOCK(sc); 5612 sc->sc_scanning = 1; 5613 sc->sc_syncbeacon = 0; 5614 rfilt = ath_calcrxfilter(sc); 5615 ATH_UNLOCK(sc); 5616 5617 ATH_PCU_LOCK(sc); 5618 ath_hal_setrxfilter(ah, rfilt); 5619 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 5620 ATH_PCU_UNLOCK(sc); 5621 5622 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5623 __func__, rfilt, ath_hal_ether_sprintf(ifp->if_broadcastaddr)); 5624 } 5625 5626 static void 5627 ath_scan_end(struct ieee80211com *ic) 5628 { 5629 struct ifnet *ifp = ic->ic_ifp; 5630 struct ath_softc *sc = ifp->if_softc; 5631 struct ath_hal *ah = sc->sc_ah; 5632 u_int32_t rfilt; 5633 5634 ATH_LOCK(sc); 5635 sc->sc_scanning = 0; 5636 rfilt = ath_calcrxfilter(sc); 5637 ATH_UNLOCK(sc); 5638 5639 ATH_PCU_LOCK(sc); 5640 ath_hal_setrxfilter(ah, rfilt); 5641 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5642 5643 ath_hal_process_noisefloor(ah); 5644 ATH_PCU_UNLOCK(sc); 5645 5646 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5647 __func__, rfilt, ath_hal_ether_sprintf(sc->sc_curbssid), 5648 sc->sc_curaid); 5649 } 5650 5651 #ifdef ATH_ENABLE_11N 5652 /* 5653 * For now, just do a channel change. 5654 * 5655 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 5656 * control state and resetting the hardware without dropping frames out 5657 * of the queue. 5658 * 5659 * The unfortunate trouble here is making absolutely sure that the 5660 * channel width change has propagated enough so the hardware 5661 * absolutely isn't handed bogus frames for it's current operating 5662 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 5663 * does occur in parallel, we need to make certain we've blocked 5664 * any further ongoing TX (and RX, that can cause raw TX) 5665 * before we do this. 5666 */ 5667 static void 5668 ath_update_chw(struct ieee80211com *ic) 5669 { 5670 struct ifnet *ifp = ic->ic_ifp; 5671 struct ath_softc *sc = ifp->if_softc; 5672 5673 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 5674 ath_set_channel(ic); 5675 } 5676 #endif /* ATH_ENABLE_11N */ 5677 5678 static void 5679 ath_set_channel(struct ieee80211com *ic) 5680 { 5681 struct ifnet *ifp = ic->ic_ifp; 5682 struct ath_softc *sc = ifp->if_softc; 5683 5684 (void) ath_chan_set(sc, ic->ic_curchan); 5685 /* 5686 * If we are returning to our bss channel then mark state 5687 * so the next recv'd beacon's tsf will be used to sync the 5688 * beacon timers. Note that since we only hear beacons in 5689 * sta/ibss mode this has no effect in other operating modes. 5690 */ 5691 ATH_LOCK(sc); 5692 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5693 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5694 sc->sc_syncbeacon = 1; 5695 ath_power_restore_power_state(sc); 5696 ATH_UNLOCK(sc); 5697 } 5698 5699 /* 5700 * Walk the vap list and check if there any vap's in RUN state. 5701 */ 5702 static int 5703 ath_isanyrunningvaps(struct ieee80211vap *this) 5704 { 5705 struct ieee80211com *ic = this->iv_ic; 5706 struct ieee80211vap *vap; 5707 5708 IEEE80211_LOCK_ASSERT(ic); 5709 5710 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5711 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5712 return 1; 5713 } 5714 return 0; 5715 } 5716 5717 static int 5718 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5719 { 5720 struct ieee80211com *ic = vap->iv_ic; 5721 struct ath_softc *sc = ic->ic_ifp->if_softc; 5722 struct ath_vap *avp = ATH_VAP(vap); 5723 struct ath_hal *ah = sc->sc_ah; 5724 struct ieee80211_node *ni = NULL; 5725 int i, error, stamode; 5726 u_int32_t rfilt; 5727 int csa_run_transition = 0; 5728 enum ieee80211_state ostate = vap->iv_state; 5729 5730 static const HAL_LED_STATE leds[] = { 5731 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5732 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5733 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5734 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5735 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5736 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5737 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5738 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5739 }; 5740 5741 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5742 ieee80211_state_name[ostate], 5743 ieee80211_state_name[nstate]); 5744 5745 /* 5746 * net80211 _should_ have the comlock asserted at this point. 5747 * There are some comments around the calls to vap->iv_newstate 5748 * which indicate that it (newstate) may end up dropping the 5749 * lock. This and the subsequent lock assert check after newstate 5750 * are an attempt to catch these and figure out how/why. 5751 */ 5752 IEEE80211_LOCK_ASSERT(ic); 5753 5754 /* Before we touch the hardware - wake it up */ 5755 /* 5756 * If the NIC is in anything other than SLEEP state, 5757 * we need to ensure that self-generated frames are 5758 * set for PWRMGT=0. Otherwise we may end up with 5759 * strange situations. 5760 * 5761 * XXX TODO: is this actually the case? :-) 5762 */ 5763 if (nstate != IEEE80211_S_SLEEP) 5764 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5765 5766 /* 5767 * Now, wake the thing up. 5768 */ 5769 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5770 5771 if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5772 csa_run_transition = 1; 5773 5774 wlan_serialize_exit(); 5775 callout_stop_sync(&sc->sc_cal_ch); 5776 wlan_serialize_enter(); 5777 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5778 5779 if (nstate == IEEE80211_S_SCAN) { 5780 /* 5781 * Scanning: turn off beacon miss and don't beacon. 5782 * Mark beacon state so when we reach RUN state we'll 5783 * [re]setup beacons. Unblock the task q thread so 5784 * deferred interrupt processing is done. 5785 */ 5786 5787 /* Ensure we stay awake during scan */ 5788 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5789 ath_power_setpower(sc, HAL_PM_AWAKE); 5790 5791 ath_hal_intrset(ah, 5792 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5793 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5794 sc->sc_beacons = 0; 5795 taskqueue_unblock(sc->sc_tq); 5796 } 5797 5798 ni = ieee80211_ref_node(vap->iv_bss); 5799 rfilt = ath_calcrxfilter(sc); 5800 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5801 vap->iv_opmode == IEEE80211_M_AHDEMO || 5802 vap->iv_opmode == IEEE80211_M_IBSS); 5803 5804 /* 5805 * XXX Dont need to do this (and others) if we've transitioned 5806 * from SLEEP->RUN. 5807 */ 5808 if (stamode && nstate == IEEE80211_S_RUN) { 5809 sc->sc_curaid = ni->ni_associd; 5810 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5811 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5812 } 5813 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5814 __func__, rfilt, 5815 ath_hal_ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 5816 ath_hal_setrxfilter(ah, rfilt); 5817 5818 /* XXX is this to restore keycache on resume? */ 5819 if (vap->iv_opmode != IEEE80211_M_STA && 5820 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 5821 for (i = 0; i < IEEE80211_WEP_NKID; i++) 5822 if (ath_hal_keyisvalid(ah, i)) 5823 ath_hal_keysetmac(ah, i, ni->ni_bssid); 5824 } 5825 5826 /* 5827 * Invoke the parent method to do net80211 work. 5828 */ 5829 error = avp->av_newstate(vap, nstate, arg); 5830 if (error != 0) 5831 goto bad; 5832 5833 /* 5834 * See above: ensure av_newstate() doesn't drop the lock 5835 * on us. 5836 */ 5837 IEEE80211_LOCK_ASSERT(ic); 5838 5839 if (nstate == IEEE80211_S_RUN) { 5840 /* NB: collect bss node again, it may have changed */ 5841 ieee80211_free_node(ni); 5842 ni = ieee80211_ref_node(vap->iv_bss); 5843 5844 DPRINTF(sc, ATH_DEBUG_STATE, 5845 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5846 "capinfo 0x%04x chan %d\n", __func__, 5847 vap->iv_flags, ni->ni_intval, 5848 ath_hal_ether_sprintf(ni->ni_bssid), 5849 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5850 5851 switch (vap->iv_opmode) { 5852 #ifdef IEEE80211_SUPPORT_TDMA 5853 case IEEE80211_M_AHDEMO: 5854 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 5855 break; 5856 /* fall thru... */ 5857 #endif 5858 case IEEE80211_M_HOSTAP: 5859 case IEEE80211_M_IBSS: 5860 case IEEE80211_M_MBSS: 5861 /* 5862 * Allocate and setup the beacon frame. 5863 * 5864 * Stop any previous beacon DMA. This may be 5865 * necessary, for example, when an ibss merge 5866 * causes reconfiguration; there will be a state 5867 * transition from RUN->RUN that means we may 5868 * be called with beacon transmission active. 5869 */ 5870 ath_hal_stoptxdma(ah, sc->sc_bhalq); 5871 5872 error = ath_beacon_alloc(sc, ni); 5873 if (error != 0) 5874 goto bad; 5875 /* 5876 * If joining an adhoc network defer beacon timer 5877 * configuration to the next beacon frame so we 5878 * have a current TSF to use. Otherwise we're 5879 * starting an ibss/bss so there's no need to delay; 5880 * if this is the first vap moving to RUN state, then 5881 * beacon state needs to be [re]configured. 5882 */ 5883 if (vap->iv_opmode == IEEE80211_M_IBSS && 5884 ni->ni_tstamp.tsf != 0) { 5885 sc->sc_syncbeacon = 1; 5886 } else if (!sc->sc_beacons) { 5887 #ifdef IEEE80211_SUPPORT_TDMA 5888 if (vap->iv_caps & IEEE80211_C_TDMA) 5889 ath_tdma_config(sc, vap); 5890 else 5891 #endif 5892 ath_beacon_config(sc, vap); 5893 sc->sc_beacons = 1; 5894 } 5895 break; 5896 case IEEE80211_M_STA: 5897 /* 5898 * Defer beacon timer configuration to the next 5899 * beacon frame so we have a current TSF to use 5900 * (any TSF collected when scanning is likely old). 5901 * However if it's due to a CSA -> RUN transition, 5902 * force a beacon update so we pick up a lack of 5903 * beacons from an AP in CAC and thus force a 5904 * scan. 5905 * 5906 * And, there's also corner cases here where 5907 * after a scan, the AP may have disappeared. 5908 * In that case, we may not receive an actual 5909 * beacon to update the beacon timer and thus we 5910 * won't get notified of the missing beacons. 5911 */ 5912 if (ostate != IEEE80211_S_RUN && 5913 ostate != IEEE80211_S_SLEEP) { 5914 DPRINTF(sc, ATH_DEBUG_BEACON, 5915 "%s: STA; syncbeacon=1\n", __func__); 5916 sc->sc_syncbeacon = 1; 5917 5918 if (csa_run_transition) 5919 ath_beacon_config(sc, vap); 5920 5921 /* 5922 * PR: kern/175227 5923 * 5924 * Reconfigure beacons during reset; as 5925 * otherwise 5926 * we won't get the beacon timers reprogrammed 5927 * after a reset and thus we won't pick up a 5928 * beacon miss interrupt. 5929 * 5930 * Hopefully we'll see a beacon before the BMISS 5931 * timer fires (too often), leading to a STA 5932 * disassociation. 5933 */ 5934 sc->sc_beacons = 1; 5935 } 5936 break; 5937 case IEEE80211_M_MONITOR: 5938 /* 5939 * Monitor mode vaps have only INIT->RUN and RUN->RUN 5940 * transitions so we must re-enable interrupts here to 5941 * handle the case of a single monitor mode vap. 5942 */ 5943 ath_hal_intrset(ah, sc->sc_imask); 5944 break; 5945 case IEEE80211_M_WDS: 5946 break; 5947 default: 5948 break; 5949 } 5950 /* 5951 * Let the hal process statistics collected during a 5952 * scan so it can provide calibrated noise floor data. 5953 */ 5954 ath_hal_process_noisefloor(ah); 5955 /* 5956 * Reset rssi stats; maybe not the best place... 5957 */ 5958 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5959 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5960 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 5961 5962 /* 5963 * Force awake for RUN mode 5964 */ 5965 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5966 ath_power_setpower(sc, HAL_PM_AWAKE); 5967 5968 /* 5969 * Finally, start any timers and the task q thread 5970 * (in case we didn't go through SCAN state). 5971 */ 5972 if (ath_longcalinterval != 0) { 5973 /* start periodic recalibration timer */ 5974 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5975 } else { 5976 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5977 "%s: calibration disabled\n", __func__); 5978 } 5979 5980 taskqueue_unblock(sc->sc_tq); 5981 } else if (nstate == IEEE80211_S_INIT) { 5982 /* 5983 * If there are no vaps left in RUN state then 5984 * shutdown host/driver operation: 5985 * o disable interrupts 5986 * o disable the task queue thread 5987 * o mark beacon processing as stopped 5988 */ 5989 if (!ath_isanyrunningvaps(vap)) { 5990 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5991 /* disable interrupts */ 5992 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5993 taskqueue_block(sc->sc_tq); 5994 sc->sc_beacons = 0; 5995 } 5996 #ifdef IEEE80211_SUPPORT_TDMA 5997 ath_hal_setcca(ah, AH_TRUE); 5998 #endif 5999 } else if (nstate == IEEE80211_S_SLEEP) { 6000 /* We're going to sleep, so transition appropriately */ 6001 /* For now, only do this if we're a single STA vap */ 6002 if (sc->sc_nvaps == 1 && 6003 vap->iv_opmode == IEEE80211_M_STA) { 6004 DPRINTF(sc, ATH_DEBUG_BEACON, 6005 "%s: syncbeacon=%d\n", 6006 __func__, sc->sc_syncbeacon); 6007 /* 6008 * Always at least set the self-generated 6009 * frame config to set PWRMGT=1. 6010 */ 6011 ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP); 6012 6013 /* 6014 * If we're not syncing beacons, transition 6015 * to NETWORK_SLEEP. 6016 * 6017 * We stay awake if syncbeacon > 0 in case 6018 * we need to listen for some beacons otherwise 6019 * our beacon timer config may be wrong. 6020 */ 6021 if (sc->sc_syncbeacon == 0) { 6022 ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP); 6023 } 6024 } 6025 } 6026 bad: 6027 ieee80211_free_node(ni); 6028 6029 /* 6030 * Restore the power state - either to what it was, or 6031 * to network_sleep if it's alright. 6032 */ 6033 ath_power_restore_power_state(sc); 6034 6035 return error; 6036 } 6037 6038 /* 6039 * Allocate a key cache slot to the station so we can 6040 * setup a mapping from key index to node. The key cache 6041 * slot is needed for managing antenna state and for 6042 * compression when stations do not use crypto. We do 6043 * it uniliaterally here; if crypto is employed this slot 6044 * will be reassigned. 6045 */ 6046 static void 6047 ath_setup_stationkey(struct ieee80211_node *ni) 6048 { 6049 struct ieee80211vap *vap = ni->ni_vap; 6050 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6051 ieee80211_keyix keyix, rxkeyix; 6052 6053 /* XXX should take a locked ref to vap->iv_bss */ 6054 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 6055 /* 6056 * Key cache is full; we'll fall back to doing 6057 * the more expensive lookup in software. Note 6058 * this also means no h/w compression. 6059 */ 6060 /* XXX msg+statistic */ 6061 } else { 6062 /* XXX locking? */ 6063 ni->ni_ucastkey.wk_keyix = keyix; 6064 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 6065 /* NB: must mark device key to get called back on delete */ 6066 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 6067 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 6068 /* NB: this will create a pass-thru key entry */ 6069 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 6070 } 6071 } 6072 6073 /* 6074 * Setup driver-specific state for a newly associated node. 6075 * Note that we're called also on a re-associate, the isnew 6076 * param tells us if this is the first time or not. 6077 */ 6078 static void 6079 ath_newassoc(struct ieee80211_node *ni, int isnew) 6080 { 6081 struct ath_node *an = ATH_NODE(ni); 6082 struct ieee80211vap *vap = ni->ni_vap; 6083 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 6084 const struct ieee80211_txparam *tp = ni->ni_txparms; 6085 6086 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 6087 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 6088 6089 DPRINTF(sc, ATH_DEBUG_NODE, 6090 "%s: %s: reassoc; isnew=%d, is_powersave=%d\n", 6091 __func__, 6092 ath_hal_ether_sprintf(ni->ni_macaddr), 6093 isnew, 6094 an->an_is_powersave); 6095 6096 ATH_NODE_LOCK(an); 6097 ath_rate_newassoc(sc, an, isnew); 6098 ATH_NODE_UNLOCK(an); 6099 6100 if (isnew && 6101 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 6102 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 6103 ath_setup_stationkey(ni); 6104 6105 /* 6106 * If we're reassociating, make sure that any paused queues 6107 * get unpaused. 6108 * 6109 * Now, we may hvae frames in the hardware queue for this node. 6110 * So if we are reassociating and there are frames in the queue, 6111 * we need to go through the cleanup path to ensure that they're 6112 * marked as non-aggregate. 6113 */ 6114 if (! isnew) { 6115 DPRINTF(sc, ATH_DEBUG_NODE, 6116 "%s: %s: reassoc; is_powersave=%d\n", 6117 __func__, 6118 ath_hal_ether_sprintf(ni->ni_macaddr), 6119 an->an_is_powersave); 6120 6121 /* XXX for now, we can't hold the lock across assoc */ 6122 ath_tx_node_reassoc(sc, an); 6123 6124 /* XXX for now, we can't hold the lock across wakeup */ 6125 if (an->an_is_powersave) 6126 ath_tx_node_wakeup(sc, an); 6127 } 6128 } 6129 6130 static int 6131 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 6132 int nchans, struct ieee80211_channel chans[]) 6133 { 6134 struct ath_softc *sc = ic->ic_ifp->if_softc; 6135 struct ath_hal *ah = sc->sc_ah; 6136 HAL_STATUS status; 6137 6138 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6139 "%s: rd %u cc %u location %c%s\n", 6140 __func__, reg->regdomain, reg->country, reg->location, 6141 reg->ecm ? " ecm" : ""); 6142 6143 status = ath_hal_set_channels(ah, chans, nchans, 6144 reg->country, reg->regdomain); 6145 if (status != HAL_OK) { 6146 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 6147 __func__, status); 6148 return EINVAL; /* XXX */ 6149 } 6150 6151 return 0; 6152 } 6153 6154 static void 6155 ath_getradiocaps(struct ieee80211com *ic, 6156 int maxchans, int *nchans, struct ieee80211_channel chans[]) 6157 { 6158 struct ath_softc *sc = ic->ic_ifp->if_softc; 6159 struct ath_hal *ah = sc->sc_ah; 6160 6161 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 6162 __func__, SKU_DEBUG, CTRY_DEFAULT); 6163 6164 /* XXX check return */ 6165 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 6166 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 6167 6168 } 6169 6170 static int 6171 ath_getchannels(struct ath_softc *sc) 6172 { 6173 struct ifnet *ifp = sc->sc_ifp; 6174 struct ieee80211com *ic = ifp->if_l2com; 6175 struct ath_hal *ah = sc->sc_ah; 6176 HAL_STATUS status; 6177 6178 /* 6179 * Collect channel set based on EEPROM contents. 6180 */ 6181 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 6182 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 6183 if (status != HAL_OK) { 6184 if_printf(ifp, "%s: unable to collect channel list from hal, " 6185 "status %d\n", __func__, status); 6186 return EINVAL; 6187 } 6188 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 6189 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 6190 /* XXX map Atheros sku's to net80211 SKU's */ 6191 /* XXX net80211 types too small */ 6192 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 6193 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 6194 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 6195 ic->ic_regdomain.isocc[1] = ' '; 6196 6197 ic->ic_regdomain.ecm = 1; 6198 ic->ic_regdomain.location = 'I'; 6199 6200 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 6201 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 6202 __func__, sc->sc_eerd, sc->sc_eecc, 6203 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 6204 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 6205 return 0; 6206 } 6207 6208 static int 6209 ath_rate_setup(struct ath_softc *sc, u_int mode) 6210 { 6211 struct ath_hal *ah = sc->sc_ah; 6212 const HAL_RATE_TABLE *rt; 6213 6214 switch (mode) { 6215 case IEEE80211_MODE_11A: 6216 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 6217 break; 6218 case IEEE80211_MODE_HALF: 6219 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 6220 break; 6221 case IEEE80211_MODE_QUARTER: 6222 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 6223 break; 6224 case IEEE80211_MODE_11B: 6225 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 6226 break; 6227 case IEEE80211_MODE_11G: 6228 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 6229 break; 6230 case IEEE80211_MODE_TURBO_A: 6231 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 6232 break; 6233 case IEEE80211_MODE_TURBO_G: 6234 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6235 break; 6236 case IEEE80211_MODE_STURBO_A: 6237 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6238 break; 6239 case IEEE80211_MODE_11NA: 6240 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6241 break; 6242 case IEEE80211_MODE_11NG: 6243 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6244 break; 6245 default: 6246 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6247 __func__, mode); 6248 return 0; 6249 } 6250 sc->sc_rates[mode] = rt; 6251 return (rt != NULL); 6252 } 6253 6254 static void 6255 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6256 { 6257 #define N(a) (sizeof(a)/sizeof(a[0])) 6258 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6259 static const struct { 6260 u_int rate; /* tx/rx 802.11 rate */ 6261 u_int16_t timeOn; /* LED on time (ms) */ 6262 u_int16_t timeOff; /* LED off time (ms) */ 6263 } blinkrates[] = { 6264 { 108, 40, 10 }, 6265 { 96, 44, 11 }, 6266 { 72, 50, 13 }, 6267 { 48, 57, 14 }, 6268 { 36, 67, 16 }, 6269 { 24, 80, 20 }, 6270 { 22, 100, 25 }, 6271 { 18, 133, 34 }, 6272 { 12, 160, 40 }, 6273 { 10, 200, 50 }, 6274 { 6, 240, 58 }, 6275 { 4, 267, 66 }, 6276 { 2, 400, 100 }, 6277 { 0, 500, 130 }, 6278 /* XXX half/quarter rates */ 6279 }; 6280 const HAL_RATE_TABLE *rt; 6281 int i, j; 6282 6283 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6284 rt = sc->sc_rates[mode]; 6285 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6286 for (i = 0; i < rt->rateCount; i++) { 6287 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6288 if (rt->info[i].phy != IEEE80211_T_HT) 6289 sc->sc_rixmap[ieeerate] = i; 6290 else 6291 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6292 } 6293 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6294 for (i = 0; i < N(sc->sc_hwmap); i++) { 6295 if (i >= rt->rateCount) { 6296 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6297 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6298 continue; 6299 } 6300 sc->sc_hwmap[i].ieeerate = 6301 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6302 if (rt->info[i].phy == IEEE80211_T_HT) 6303 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6304 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6305 if (rt->info[i].shortPreamble || 6306 rt->info[i].phy == IEEE80211_T_OFDM) 6307 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6308 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 6309 for (j = 0; j < N(blinkrates)-1; j++) 6310 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6311 break; 6312 /* NB: this uses the last entry if the rate isn't found */ 6313 /* XXX beware of overlow */ 6314 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6315 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6316 } 6317 sc->sc_currates = rt; 6318 sc->sc_curmode = mode; 6319 /* 6320 * All protection frames are transmited at 2Mb/s for 6321 * 11g, otherwise at 1Mb/s. 6322 */ 6323 if (mode == IEEE80211_MODE_11G) 6324 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6325 else 6326 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 6327 /* NB: caller is responsible for resetting rate control state */ 6328 #undef N 6329 } 6330 6331 static void 6332 ath_watchdog(void *arg) 6333 { 6334 struct ath_softc *sc = arg; 6335 int do_reset = 0; 6336 6337 wlan_serialize_enter(); 6338 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 6339 struct ifnet *ifp = sc->sc_ifp; 6340 uint32_t hangs; 6341 6342 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6343 6344 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6345 hangs != 0) { 6346 if_printf(ifp, "%s hang detected (0x%x)\n", 6347 hangs & 0xff ? "bb" : "mac", hangs); 6348 } else 6349 if_printf(ifp, "device timeout\n"); 6350 do_reset = 1; 6351 ifp->if_oerrors++; 6352 sc->sc_stats.ast_watchdog++; 6353 ath_power_restore_power_state(sc); 6354 } 6355 6356 /* 6357 * We can't hold the lock across the ath_reset() call. 6358 * 6359 * And since this routine can't hold a lock and sleep, 6360 * do the reset deferred. 6361 */ 6362 if (do_reset) { 6363 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 6364 } 6365 6366 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 6367 wlan_serialize_exit(); 6368 } 6369 6370 /* 6371 * (DragonFly network start) 6372 */ 6373 static void 6374 ath_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 6375 { 6376 struct ath_softc *sc = ifp->if_softc; 6377 struct mbuf *m; 6378 6379 wlan_assert_serialized(); 6380 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 6381 6382 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) { 6383 ifq_purge(&ifp->if_snd); 6384 return; 6385 } 6386 ifq_set_oactive(&ifp->if_snd); 6387 for (;;) { 6388 m = ifq_dequeue(&ifp->if_snd); 6389 if (m == NULL) 6390 break; 6391 ath_transmit(ifp, m); 6392 } 6393 ifq_clr_oactive(&ifp->if_snd); 6394 } 6395 6396 /* 6397 * Fetch the rate control statistics for the given node. 6398 */ 6399 static int 6400 ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs) 6401 { 6402 struct ath_node *an; 6403 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 6404 struct ieee80211_node *ni; 6405 int error = 0; 6406 6407 /* Perform a lookup on the given node */ 6408 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr); 6409 if (ni == NULL) { 6410 error = EINVAL; 6411 goto bad; 6412 } 6413 6414 /* Lock the ath_node */ 6415 an = ATH_NODE(ni); 6416 ATH_NODE_LOCK(an); 6417 6418 /* Fetch the rate control stats for this node */ 6419 error = ath_rate_fetch_node_stats(sc, an, rs); 6420 6421 /* No matter what happens here, just drop through */ 6422 6423 /* Unlock the ath_node */ 6424 ATH_NODE_UNLOCK(an); 6425 6426 /* Unref the node */ 6427 ieee80211_node_decref(ni); 6428 6429 bad: 6430 return (error); 6431 } 6432 6433 #ifdef ATH_DIAGAPI 6434 /* 6435 * Diagnostic interface to the HAL. This is used by various 6436 * tools to do things like retrieve register contents for 6437 * debugging. The mechanism is intentionally opaque so that 6438 * it can change frequently w/o concern for compatiblity. 6439 */ 6440 static int 6441 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 6442 { 6443 struct ath_hal *ah = sc->sc_ah; 6444 u_int id = ad->ad_id & ATH_DIAG_ID; 6445 void *indata = NULL; 6446 void *outdata = NULL; 6447 u_int32_t insize = ad->ad_in_size; 6448 u_int32_t outsize = ad->ad_out_size; 6449 int error = 0; 6450 6451 if (ad->ad_id & ATH_DIAG_IN) { 6452 /* 6453 * Copy in data. 6454 */ 6455 indata = kmalloc(insize, M_TEMP, M_INTWAIT); 6456 if (indata == NULL) { 6457 error = ENOMEM; 6458 goto bad; 6459 } 6460 error = copyin(ad->ad_in_data, indata, insize); 6461 if (error) 6462 goto bad; 6463 } 6464 if (ad->ad_id & ATH_DIAG_DYN) { 6465 /* 6466 * Allocate a buffer for the results (otherwise the HAL 6467 * returns a pointer to a buffer where we can read the 6468 * results). Note that we depend on the HAL leaving this 6469 * pointer for us to use below in reclaiming the buffer; 6470 * may want to be more defensive. 6471 */ 6472 outdata = kmalloc(outsize, M_TEMP, M_INTWAIT); 6473 if (outdata == NULL) { 6474 error = ENOMEM; 6475 goto bad; 6476 } 6477 } 6478 6479 if (id != HAL_DIAG_REGS) 6480 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6481 6482 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 6483 if (outsize < ad->ad_out_size) 6484 ad->ad_out_size = outsize; 6485 if (outdata != NULL) 6486 error = copyout(outdata, ad->ad_out_data, 6487 ad->ad_out_size); 6488 } else { 6489 error = EINVAL; 6490 } 6491 if (id != HAL_DIAG_REGS) 6492 ath_power_restore_power_state(sc); 6493 bad: 6494 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 6495 kfree(indata, M_TEMP); 6496 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 6497 kfree(outdata, M_TEMP); 6498 return error; 6499 } 6500 #endif /* ATH_DIAGAPI */ 6501 6502 static int 6503 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, 6504 struct ucred *cr __unused) 6505 { 6506 #define IS_RUNNING(ifp) \ 6507 ((ifp->if_flags & IFF_UP) && (ifp->if_flags & IFF_RUNNING)) 6508 struct ath_softc *sc = ifp->if_softc; 6509 struct ieee80211com *ic = ifp->if_l2com; 6510 struct ifreq *ifr = (struct ifreq *)data; 6511 const HAL_RATE_TABLE *rt; 6512 int error = 0; 6513 6514 switch (cmd) { 6515 case SIOCSIFFLAGS: 6516 ATH_LOCK(sc); 6517 if (IS_RUNNING(ifp)) { 6518 /* 6519 * To avoid rescanning another access point, 6520 * do not call ath_init() here. Instead, 6521 * only reflect promisc mode settings. 6522 */ 6523 ath_mode_init(sc); 6524 } else if (ifp->if_flags & IFF_UP) { 6525 /* 6526 * Beware of being called during attach/detach 6527 * to reset promiscuous mode. In that case we 6528 * will still be marked UP but not RUNNING. 6529 * However trying to re-init the interface 6530 * is the wrong thing to do as we've already 6531 * torn down much of our state. There's 6532 * probably a better way to deal with this. 6533 */ 6534 if (!sc->sc_invalid) 6535 ath_init(sc); /* XXX lose error */ 6536 } else { 6537 ath_stop_locked(ifp); 6538 if (!sc->sc_invalid) 6539 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 6540 } 6541 ATH_UNLOCK(sc); 6542 break; 6543 case SIOCGIFMEDIA: 6544 case SIOCSIFMEDIA: 6545 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 6546 break; 6547 case SIOCGATHSTATS: 6548 /* NB: embed these numbers to get a consistent view */ 6549 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 6550 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 6551 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 6552 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 6553 #ifdef IEEE80211_SUPPORT_TDMA 6554 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 6555 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 6556 #endif 6557 rt = sc->sc_currates; 6558 sc->sc_stats.ast_tx_rate = 6559 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 6560 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 6561 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 6562 return copyout(&sc->sc_stats, 6563 ifr->ifr_data, sizeof (sc->sc_stats)); 6564 case SIOCGATHAGSTATS: 6565 return copyout(&sc->sc_aggr_stats, 6566 ifr->ifr_data, sizeof (sc->sc_aggr_stats)); 6567 case SIOCZATHSTATS: 6568 error = priv_check(curthread, PRIV_DRIVER); 6569 if (error == 0) { 6570 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 6571 memset(&sc->sc_aggr_stats, 0, 6572 sizeof(sc->sc_aggr_stats)); 6573 memset(&sc->sc_intr_stats, 0, 6574 sizeof(sc->sc_intr_stats)); 6575 } 6576 break; 6577 #ifdef ATH_DIAGAPI 6578 case SIOCGATHDIAG: 6579 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 6580 break; 6581 case SIOCGATHPHYERR: 6582 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 6583 break; 6584 #endif 6585 case SIOCGATHSPECTRAL: 6586 error = ath_ioctl_spectral(sc,(struct ath_diag*) ifr); 6587 break; 6588 case SIOCGATHNODERATESTATS: 6589 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr); 6590 break; 6591 case SIOCGIFADDR: 6592 error = ether_ioctl(ifp, cmd, data); 6593 break; 6594 default: 6595 error = EINVAL; 6596 break; 6597 } 6598 return error; 6599 #undef IS_RUNNING 6600 } 6601 6602 /* 6603 * Announce various information on device/driver attach. 6604 */ 6605 static void 6606 ath_announce(struct ath_softc *sc) 6607 { 6608 struct ifnet *ifp = sc->sc_ifp; 6609 struct ath_hal *ah = sc->sc_ah; 6610 6611 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 6612 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6613 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 6614 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 6615 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6616 if (bootverbose) { 6617 int i; 6618 for (i = 0; i <= WME_AC_VO; i++) { 6619 struct ath_txq *txq = sc->sc_ac2q[i]; 6620 if_printf(ifp, "Use hw queue %u for %s traffic\n", 6621 txq->axq_qnum, ieee80211_wme_acnames[i]); 6622 } 6623 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 6624 sc->sc_cabq->axq_qnum); 6625 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 6626 } 6627 if (ath_rxbuf != ATH_RXBUF) 6628 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 6629 if (ath_txbuf != ATH_TXBUF) 6630 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 6631 if (sc->sc_mcastkey && bootverbose) 6632 if_printf(ifp, "using multicast key search\n"); 6633 } 6634 6635 static void 6636 ath_dfs_tasklet(void *p, int npending) 6637 { 6638 struct ath_softc *sc = (struct ath_softc *) p; 6639 struct ifnet *ifp = sc->sc_ifp; 6640 struct ieee80211com *ic = ifp->if_l2com; 6641 6642 /* 6643 * If previous processing has found a radar event, 6644 * signal this to the net80211 layer to begin DFS 6645 * processing. 6646 */ 6647 wlan_serialize_enter(); 6648 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 6649 /* DFS event found, initiate channel change */ 6650 /* 6651 * XXX doesn't currently tell us whether the event 6652 * XXX was found in the primary or extension 6653 * XXX channel! 6654 */ 6655 IEEE80211_LOCK(ic); 6656 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6657 IEEE80211_UNLOCK(ic); 6658 } 6659 wlan_serialize_exit(); 6660 } 6661 6662 #if 0 6663 /* 6664 * Enable/disable power save. This must be called with 6665 * no TX driver locks currently held, so it should only 6666 * be called from the RX path (which doesn't hold any 6667 * TX driver locks.) 6668 */ 6669 static void 6670 ath_node_powersave(struct ieee80211_node *ni, int enable) 6671 { 6672 #ifdef ATH_SW_PSQ 6673 struct ath_node *an = ATH_NODE(ni); 6674 struct ieee80211com *ic = ni->ni_ic; 6675 struct ath_softc *sc = ic->ic_ifp->if_softc; 6676 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6677 6678 /* XXX and no TXQ locks should be held here */ 6679 6680 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6s: enable=%d\n", 6681 __func__, 6682 ath_hal_ether_sprintf(ni->ni_macaddr), 6683 !! enable); 6684 6685 /* Suspend or resume software queue handling */ 6686 if (enable) 6687 ath_tx_node_sleep(sc, an); 6688 else 6689 ath_tx_node_wakeup(sc, an); 6690 6691 /* Update net80211 state */ 6692 if (avp->av_node_ps) 6693 avp->av_node_ps(ni, enable); 6694 #else 6695 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6696 6697 /* Update net80211 state */ 6698 if (avp->av_node_ps) 6699 avp->av_node_ps(ni, enable); 6700 #endif/* ATH_SW_PSQ */ 6701 } 6702 6703 #endif 6704 6705 /* 6706 * Notification from net80211 that the powersave queue state has 6707 * changed. 6708 * 6709 * Since the software queue also may have some frames: 6710 * 6711 * + if the node software queue has frames and the TID state 6712 * is 0, we set the TIM; 6713 * + if the node and the stack are both empty, we clear the TIM bit. 6714 * + If the stack tries to set the bit, always set it. 6715 * + If the stack tries to clear the bit, only clear it if the 6716 * software queue in question is also cleared. 6717 * 6718 * TODO: this is called during node teardown; so let's ensure this 6719 * is all correctly handled and that the TIM bit is cleared. 6720 * It may be that the node flush is called _AFTER_ the net80211 6721 * stack clears the TIM. 6722 * 6723 * Here is the racy part. Since it's possible >1 concurrent, 6724 * overlapping TXes will appear complete with a TX completion in 6725 * another thread, it's possible that the concurrent TIM calls will 6726 * clash. We can't hold the node lock here because setting the 6727 * TIM grabs the net80211 comlock and this may cause a LOR. 6728 * The solution is either to totally serialise _everything_ at 6729 * this point (ie, all TX, completion and any reset/flush go into 6730 * one taskqueue) or a new "ath TIM lock" needs to be created that 6731 * just wraps the driver state change and this call to avp->av_set_tim(). 6732 * 6733 * The same race exists in the net80211 power save queue handling 6734 * as well. Since multiple transmitting threads may queue frames 6735 * into the driver, as well as ps-poll and the driver transmitting 6736 * frames (and thus clearing the psq), it's quite possible that 6737 * a packet entering the PSQ and a ps-poll being handled will 6738 * race, causing the TIM to be cleared and not re-set. 6739 */ 6740 static int 6741 ath_node_set_tim(struct ieee80211_node *ni, int enable) 6742 { 6743 #ifdef ATH_SW_PSQ 6744 struct ieee80211com *ic = ni->ni_ic; 6745 struct ath_softc *sc = ic->ic_ifp->if_softc; 6746 struct ath_node *an = ATH_NODE(ni); 6747 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6748 int changed = 0; 6749 6750 ATH_TX_LOCK(sc); 6751 an->an_stack_psq = enable; 6752 6753 /* 6754 * This will get called for all operating modes, 6755 * even if avp->av_set_tim is unset. 6756 * It's currently set for hostap/ibss modes; but 6757 * the same infrastructure is used for both STA 6758 * and AP/IBSS node power save. 6759 */ 6760 if (avp->av_set_tim == NULL) { 6761 ATH_TX_UNLOCK(sc); 6762 return (0); 6763 } 6764 6765 /* 6766 * If setting the bit, always set it here. 6767 * If clearing the bit, only clear it if the 6768 * software queue is also empty. 6769 * 6770 * If the node has left power save, just clear the TIM 6771 * bit regardless of the state of the power save queue. 6772 * 6773 * XXX TODO: although atomics are used, it's quite possible 6774 * that a race will occur between this and setting/clearing 6775 * in another thread. TX completion will occur always in 6776 * one thread, however setting/clearing the TIM bit can come 6777 * from a variety of different process contexts! 6778 */ 6779 if (enable && an->an_tim_set == 1) { 6780 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6781 "%s: %s: enable=%d, tim_set=1, ignoring\n", 6782 __func__, 6783 ath_hal_ether_sprintf(ni->ni_macaddr), 6784 enable); 6785 ATH_TX_UNLOCK(sc); 6786 } else if (enable) { 6787 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6788 "%s: %s: enable=%d, enabling TIM\n", 6789 __func__, 6790 ath_hal_ether_sprintf(ni->ni_macaddr), 6791 enable); 6792 an->an_tim_set = 1; 6793 ATH_TX_UNLOCK(sc); 6794 changed = avp->av_set_tim(ni, enable); 6795 } else if (an->an_swq_depth == 0) { 6796 /* disable */ 6797 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6798 "%s: %s: enable=%d, an_swq_depth == 0, disabling\n", 6799 __func__, 6800 ath_hal_ether_sprintf(ni->ni_macaddr), 6801 enable); 6802 an->an_tim_set = 0; 6803 ATH_TX_UNLOCK(sc); 6804 changed = avp->av_set_tim(ni, enable); 6805 } else if (! an->an_is_powersave) { 6806 /* 6807 * disable regardless; the node isn't in powersave now 6808 */ 6809 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6810 "%s: %s: enable=%d, an_pwrsave=0, disabling\n", 6811 __func__, 6812 ath_hal_ether_sprintf(ni->ni_macaddr), 6813 enable); 6814 an->an_tim_set = 0; 6815 ATH_TX_UNLOCK(sc); 6816 changed = avp->av_set_tim(ni, enable); 6817 } else { 6818 /* 6819 * psq disable, node is currently in powersave, node 6820 * software queue isn't empty, so don't clear the TIM bit 6821 * for now. 6822 */ 6823 ATH_TX_UNLOCK(sc); 6824 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6825 "%s: %s: enable=%d, an_swq_depth > 0, ignoring\n", 6826 __func__, 6827 ath_hal_ether_sprintf(ni->ni_macaddr), 6828 enable); 6829 changed = 0; 6830 } 6831 6832 return (changed); 6833 #else 6834 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6835 6836 /* 6837 * Some operating modes don't set av_set_tim(), so don't 6838 * update it here. 6839 */ 6840 if (avp->av_set_tim == NULL) 6841 return (0); 6842 6843 return (avp->av_set_tim(ni, enable)); 6844 #endif /* ATH_SW_PSQ */ 6845 } 6846 6847 /* 6848 * Set or update the TIM from the software queue. 6849 * 6850 * Check the software queue depth before attempting to do lock 6851 * anything; that avoids trying to obtain the lock. Then, 6852 * re-check afterwards to ensure nothing has changed in the 6853 * meantime. 6854 * 6855 * set: This is designed to be called from the TX path, after 6856 * a frame has been queued; to see if the swq > 0. 6857 * 6858 * clear: This is designed to be called from the buffer completion point 6859 * (right now it's ath_tx_default_comp()) where the state of 6860 * a software queue has changed. 6861 * 6862 * It makes sense to place it at buffer free / completion rather 6863 * than after each software queue operation, as there's no real 6864 * point in churning the TIM bit as the last frames in the software 6865 * queue are transmitted. If they fail and we retry them, we'd 6866 * just be setting the TIM bit again anyway. 6867 */ 6868 void 6869 ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, 6870 int enable) 6871 { 6872 #ifdef ATH_SW_PSQ 6873 struct ath_node *an; 6874 struct ath_vap *avp; 6875 6876 /* Don't do this for broadcast/etc frames */ 6877 if (ni == NULL) 6878 return; 6879 6880 an = ATH_NODE(ni); 6881 avp = ATH_VAP(ni->ni_vap); 6882 6883 /* 6884 * And for operating modes without the TIM handler set, let's 6885 * just skip those. 6886 */ 6887 if (avp->av_set_tim == NULL) 6888 return; 6889 6890 ATH_TX_LOCK_ASSERT(sc); 6891 6892 if (enable) { 6893 if (an->an_is_powersave && 6894 an->an_tim_set == 0 && 6895 an->an_swq_depth != 0) { 6896 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6897 "%s: %s: swq_depth>0, tim_set=0, set!\n", 6898 __func__, 6899 ath_hal_ether_sprintf(ni->ni_macaddr)); 6900 an->an_tim_set = 1; 6901 (void) avp->av_set_tim(ni, 1); 6902 } 6903 } else { 6904 /* 6905 * Don't bother grabbing the lock unless the queue is empty. 6906 */ 6907 if (&an->an_swq_depth != 0) 6908 return; 6909 6910 if (an->an_is_powersave && 6911 an->an_stack_psq == 0 && 6912 an->an_tim_set == 1 && 6913 an->an_swq_depth == 0) { 6914 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6915 "%s: %s: swq_depth=0, tim_set=1, psq_set=0," 6916 " clear!\n", 6917 __func__, 6918 ath_hal_ether_sprintf(ni->ni_macaddr)); 6919 an->an_tim_set = 0; 6920 (void) avp->av_set_tim(ni, 0); 6921 } 6922 } 6923 #else 6924 return; 6925 #endif /* ATH_SW_PSQ */ 6926 } 6927 6928 #if 0 6929 /* 6930 * Received a ps-poll frame from net80211. 6931 * 6932 * Here we get a chance to serve out a software-queued frame ourselves 6933 * before we punt it to net80211 to transmit us one itself - either 6934 * because there's traffic in the net80211 psq, or a NULL frame to 6935 * indicate there's nothing else. 6936 */ 6937 static void 6938 ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m) 6939 { 6940 #ifdef ATH_SW_PSQ 6941 struct ath_node *an; 6942 struct ath_vap *avp; 6943 struct ieee80211com *ic = ni->ni_ic; 6944 struct ath_softc *sc = ic->ic_ifp->if_softc; 6945 int tid; 6946 6947 /* Just paranoia */ 6948 if (ni == NULL) 6949 return; 6950 6951 /* 6952 * Unassociated (temporary node) station. 6953 */ 6954 if (ni->ni_associd == 0) 6955 return; 6956 6957 /* 6958 * We do have an active node, so let's begin looking into it. 6959 */ 6960 an = ATH_NODE(ni); 6961 avp = ATH_VAP(ni->ni_vap); 6962 6963 /* 6964 * For now, we just call the original ps-poll method. 6965 * Once we're ready to flip this on: 6966 * 6967 * + Set leak to 1, as no matter what we're going to have 6968 * to send a frame; 6969 * + Check the software queue and if there's something in it, 6970 * schedule the highest TID thas has traffic from this node. 6971 * Then make sure we schedule the software scheduler to 6972 * run so it picks up said frame. 6973 * 6974 * That way whatever happens, we'll at least send _a_ frame 6975 * to the given node. 6976 * 6977 * Again, yes, it's crappy QoS if the node has multiple 6978 * TIDs worth of traffic - but let's get it working first 6979 * before we optimise it. 6980 * 6981 * Also yes, there's definitely latency here - we're not 6982 * direct dispatching to the hardware in this path (and 6983 * we're likely being called from the packet receive path, 6984 * so going back into TX may be a little hairy!) but again 6985 * I'd like to get this working first before optimising 6986 * turn-around time. 6987 */ 6988 6989 ATH_TX_LOCK(sc); 6990 6991 /* 6992 * Legacy - we're called and the node isn't asleep. 6993 * Immediately punt. 6994 */ 6995 if (! an->an_is_powersave) { 6996 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6997 "%s: %6D: not in powersave?\n", 6998 __func__, 6999 ni->ni_macaddr, 7000 ":"); 7001 ATH_TX_UNLOCK(sc); 7002 if (avp->av_recv_pspoll) 7003 avp->av_recv_pspoll(ni, m); 7004 return; 7005 } 7006 7007 /* 7008 * We're in powersave. 7009 * 7010 * Leak a frame. 7011 */ 7012 an->an_leak_count = 1; 7013 7014 /* 7015 * Now, if there's no frames in the node, just punt to 7016 * recv_pspoll. 7017 * 7018 * Don't bother checking if the TIM bit is set, we really 7019 * only care if there are any frames here! 7020 */ 7021 if (an->an_swq_depth == 0) { 7022 ATH_TX_UNLOCK(sc); 7023 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7024 "%s: %6D: SWQ empty; punting to net80211\n", 7025 __func__, 7026 ni->ni_macaddr, 7027 ":"); 7028 if (avp->av_recv_pspoll) 7029 avp->av_recv_pspoll(ni, m); 7030 return; 7031 } 7032 7033 /* 7034 * Ok, let's schedule the highest TID that has traffic 7035 * and then schedule something. 7036 */ 7037 for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) { 7038 struct ath_tid *atid = &an->an_tid[tid]; 7039 /* 7040 * No frames? Skip. 7041 */ 7042 if (atid->axq_depth == 0) 7043 continue; 7044 ath_tx_tid_sched(sc, atid); 7045 /* 7046 * XXX we could do a direct call to the TXQ 7047 * scheduler code here to optimise latency 7048 * at the expense of a REALLY deep callstack. 7049 */ 7050 ATH_TX_UNLOCK(sc); 7051 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 7052 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7053 "%s: %6D: leaking frame to TID %d\n", 7054 __func__, 7055 ni->ni_macaddr, 7056 ":", 7057 tid); 7058 return; 7059 } 7060 7061 ATH_TX_UNLOCK(sc); 7062 7063 /* 7064 * XXX nothing in the TIDs at this point? Eek. 7065 */ 7066 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 7067 "%s: %6D: TIDs empty, but ath_node showed traffic?!\n", 7068 __func__, 7069 ni->ni_macaddr, 7070 ":"); 7071 if (avp->av_recv_pspoll) 7072 avp->av_recv_pspoll(ni, m); 7073 #else 7074 if (avp->av_recv_pspoll) 7075 avp->av_recv_pspoll(ni, m); 7076 #endif /* ATH_SW_PSQ */ 7077 } 7078 7079 #endif 7080 7081 MODULE_VERSION(if_ath, 1); 7082 MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 7083 #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 7084 MODULE_DEPEND(if_ath, alq, 1, 1, 1); 7085 #endif 7086