1 /*- 2 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 3. Neither the names of the above-listed copyright holders nor the names 16 * of any contributors may be used to endorse or promote products derived 17 * from this software without specific prior written permission. 18 * 19 * Alternatively, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") version 2 as published by the Free 21 * Software Foundation. 22 * 23 * NO WARRANTY 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 27 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 28 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 29 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 32 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 34 * THE POSSIBILITY OF SUCH DAMAGES. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 /* 41 * Driver for the Atheros Wireless LAN controller. 42 * 43 * This software is derived from work of Atsushi Onoe; his contribution 44 * is greatly appreciated. 45 */ 46 47 #include "opt_inet.h" 48 #include "opt_ath.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/sysctl.h> 53 #include <sys/mbuf.h> 54 #include <sys/malloc.h> 55 #include <sys/lock.h> 56 #include <sys/mutex.h> 57 #include <sys/kernel.h> 58 #include <sys/socket.h> 59 #include <sys/sockio.h> 60 #include <sys/errno.h> 61 #include <sys/callout.h> 62 #include <sys/bus.h> 63 #include <sys/endian.h> 64 #include <sys/kthread.h> 65 #include <sys/taskqueue.h> 66 67 #include <machine/bus.h> 68 69 #include <net/if.h> 70 #include <net/if_dl.h> 71 #include <net/if_media.h> 72 #include <net/if_types.h> 73 #include <net/if_arp.h> 74 #include <net/ethernet.h> 75 #include <net/if_llc.h> 76 77 #include <net80211/ieee80211_var.h> 78 79 #include <net/bpf.h> 80 81 #ifdef INET 82 #include <netinet/in.h> 83 #include <netinet/if_ether.h> 84 #endif 85 86 #include <dev/ath/if_athvar.h> 87 #include <contrib/dev/ath/ah_desc.h> 88 #include <contrib/dev/ath/ah_devid.h> /* XXX for softled */ 89 90 #ifdef ATH_TX99_DIAG 91 #include <dev/ath/ath_tx99/ath_tx99.h> 92 #endif 93 94 /* unaligned little endian access */ 95 #define LE_READ_2(p) \ 96 ((u_int16_t) \ 97 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) 98 #define LE_READ_4(p) \ 99 ((u_int32_t) \ 100 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ 101 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) 102 103 enum { 104 ATH_LED_TX, 105 ATH_LED_RX, 106 ATH_LED_POLL, 107 }; 108 109 static void ath_init(void *); 110 static void ath_stop_locked(struct ifnet *); 111 static void ath_stop(struct ifnet *); 112 static void ath_start(struct ifnet *); 113 static int ath_reset(struct ifnet *); 114 static int ath_media_change(struct ifnet *); 115 static void ath_watchdog(struct ifnet *); 116 static int ath_ioctl(struct ifnet *, u_long, caddr_t); 117 static void ath_fatal_proc(void *, int); 118 static void ath_rxorn_proc(void *, int); 119 static void ath_bmiss_proc(void *, int); 120 static int ath_key_alloc(struct ieee80211com *, 121 const struct ieee80211_key *, 122 ieee80211_keyix *, ieee80211_keyix *); 123 static int ath_key_delete(struct ieee80211com *, 124 const struct ieee80211_key *); 125 static int ath_key_set(struct ieee80211com *, const struct ieee80211_key *, 126 const u_int8_t mac[IEEE80211_ADDR_LEN]); 127 static void ath_key_update_begin(struct ieee80211com *); 128 static void ath_key_update_end(struct ieee80211com *); 129 static void ath_mode_init(struct ath_softc *); 130 static void ath_setslottime(struct ath_softc *); 131 static void ath_updateslot(struct ifnet *); 132 static int ath_beaconq_setup(struct ath_hal *); 133 static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 134 static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 135 static void ath_beacon_proc(void *, int); 136 static void ath_bstuck_proc(void *, int); 137 static void ath_beacon_free(struct ath_softc *); 138 static void ath_beacon_config(struct ath_softc *); 139 static void ath_descdma_cleanup(struct ath_softc *sc, 140 struct ath_descdma *, ath_bufhead *); 141 static int ath_desc_alloc(struct ath_softc *); 142 static void ath_desc_free(struct ath_softc *); 143 static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *); 144 static void ath_node_free(struct ieee80211_node *); 145 static u_int8_t ath_node_getrssi(const struct ieee80211_node *); 146 static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 147 static void ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 148 struct ieee80211_node *ni, 149 int subtype, int rssi, u_int32_t rstamp); 150 static void ath_setdefantenna(struct ath_softc *, u_int); 151 static void ath_rx_proc(void *, int); 152 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 153 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 154 static int ath_tx_setup(struct ath_softc *, int, int); 155 static int ath_wme_update(struct ieee80211com *); 156 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 157 static void ath_tx_cleanup(struct ath_softc *); 158 static int ath_tx_start(struct ath_softc *, struct ieee80211_node *, 159 struct ath_buf *, struct mbuf *); 160 static void ath_tx_proc_q0(void *, int); 161 static void ath_tx_proc_q0123(void *, int); 162 static void ath_tx_proc(void *, int); 163 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 164 static void ath_draintxq(struct ath_softc *); 165 static void ath_stoprecv(struct ath_softc *); 166 static int ath_startrecv(struct ath_softc *); 167 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 168 static void ath_next_scan(void *); 169 static void ath_calibrate(void *); 170 static int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); 171 static void ath_setup_stationkey(struct ieee80211_node *); 172 static void ath_newassoc(struct ieee80211_node *, int); 173 static int ath_getchannels(struct ath_softc *, 174 HAL_REG_DOMAIN, HAL_CTRY_CODE, HAL_BOOL, HAL_BOOL); 175 static void ath_led_event(struct ath_softc *, int); 176 static void ath_update_txpow(struct ath_softc *); 177 178 static int ath_rate_setup(struct ath_softc *, u_int mode); 179 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 180 181 static void ath_sysctlattach(struct ath_softc *); 182 static int ath_raw_xmit(struct ieee80211_node *, 183 struct mbuf *, const struct ieee80211_bpf_params *); 184 static void ath_bpfattach(struct ath_softc *); 185 static void ath_announce(struct ath_softc *); 186 187 SYSCTL_DECL(_hw_ath); 188 189 /* XXX validate sysctl values */ 190 static int ath_dwelltime = 200; /* 5 channels/second */ 191 SYSCTL_INT(_hw_ath, OID_AUTO, dwell, CTLFLAG_RW, &ath_dwelltime, 192 0, "channel dwell time (ms) for AP/station scanning"); 193 static int ath_calinterval = 30; /* calibrate every 30 secs */ 194 SYSCTL_INT(_hw_ath, OID_AUTO, calibrate, CTLFLAG_RW, &ath_calinterval, 195 0, "chip calibration interval (secs)"); 196 static int ath_outdoor = AH_TRUE; /* outdoor operation */ 197 SYSCTL_INT(_hw_ath, OID_AUTO, outdoor, CTLFLAG_RW, &ath_outdoor, 198 0, "outdoor operation"); 199 TUNABLE_INT("hw.ath.outdoor", &ath_outdoor); 200 static int ath_xchanmode = AH_TRUE; /* extended channel use */ 201 SYSCTL_INT(_hw_ath, OID_AUTO, xchanmode, CTLFLAG_RW, &ath_xchanmode, 202 0, "extended channel mode"); 203 TUNABLE_INT("hw.ath.xchanmode", &ath_xchanmode); 204 static int ath_countrycode = CTRY_DEFAULT; /* country code */ 205 SYSCTL_INT(_hw_ath, OID_AUTO, countrycode, CTLFLAG_RW, &ath_countrycode, 206 0, "country code"); 207 TUNABLE_INT("hw.ath.countrycode", &ath_countrycode); 208 static int ath_regdomain = 0; /* regulatory domain */ 209 SYSCTL_INT(_hw_ath, OID_AUTO, regdomain, CTLFLAG_RD, &ath_regdomain, 210 0, "regulatory domain"); 211 212 static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 213 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 214 0, "rx buffers allocated"); 215 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 216 static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 217 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 218 0, "tx buffers allocated"); 219 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 220 221 #ifdef ATH_DEBUG 222 static int ath_debug = 0; 223 SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug, 224 0, "control debugging printfs"); 225 TUNABLE_INT("hw.ath.debug", &ath_debug); 226 enum { 227 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 228 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ 229 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ 230 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ 231 ATH_DEBUG_RATE = 0x00000010, /* rate control */ 232 ATH_DEBUG_RESET = 0x00000020, /* reset processing */ 233 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ 234 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ 235 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ 236 ATH_DEBUG_INTR = 0x00001000, /* ISR */ 237 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ 238 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ 239 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ 240 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ 241 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ 242 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ 243 ATH_DEBUG_NODE = 0x00080000, /* node management */ 244 ATH_DEBUG_LED = 0x00100000, /* led management */ 245 ATH_DEBUG_FF = 0x00200000, /* fast frames */ 246 ATH_DEBUG_DFS = 0x00400000, /* DFS processing */ 247 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ 248 ATH_DEBUG_ANY = 0xffffffff 249 }; 250 #define IFF_DUMPPKTS(sc, m) \ 251 ((sc->sc_debug & (m)) || \ 252 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 253 #define DPRINTF(sc, m, fmt, ...) do { \ 254 if (sc->sc_debug & (m)) \ 255 printf(fmt, __VA_ARGS__); \ 256 } while (0) 257 #define KEYPRINTF(sc, ix, hk, mac) do { \ 258 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ 259 ath_keyprint(sc, __func__, ix, hk, mac); \ 260 } while (0) 261 static void ath_printrxbuf(const struct ath_buf *bf, u_int ix, int); 262 static void ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done); 263 #else 264 #define IFF_DUMPPKTS(sc, m) \ 265 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) 266 #define DPRINTF(sc, m, fmt, ...) do { \ 267 (void) sc; \ 268 } while (0) 269 #define KEYPRINTF(sc, k, ix, mac) do { \ 270 (void) sc; \ 271 } while (0) 272 #endif 273 274 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 275 276 int 277 ath_attach(u_int16_t devid, struct ath_softc *sc) 278 { 279 struct ifnet *ifp; 280 struct ieee80211com *ic = &sc->sc_ic; 281 struct ath_hal *ah = NULL; 282 HAL_STATUS status; 283 int error = 0, i; 284 285 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 286 287 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 288 if (ifp == NULL) { 289 device_printf(sc->sc_dev, "can not if_alloc()\n"); 290 error = ENOSPC; 291 goto bad; 292 } 293 294 /* set these up early for if_printf use */ 295 if_initname(ifp, device_get_name(sc->sc_dev), 296 device_get_unit(sc->sc_dev)); 297 298 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status); 299 if (ah == NULL) { 300 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 301 status); 302 error = ENXIO; 303 goto bad; 304 } 305 if (ah->ah_abi != HAL_ABI_VERSION) { 306 if_printf(ifp, "HAL ABI mismatch detected " 307 "(HAL:0x%x != driver:0x%x)\n", 308 ah->ah_abi, HAL_ABI_VERSION); 309 error = ENXIO; 310 goto bad; 311 } 312 sc->sc_ah = ah; 313 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 314 315 /* 316 * Check if the MAC has multi-rate retry support. 317 * We do this by trying to setup a fake extended 318 * descriptor. MAC's that don't have support will 319 * return false w/o doing anything. MAC's that do 320 * support it will return true w/o doing anything. 321 */ 322 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 323 324 /* 325 * Check if the device has hardware counters for PHY 326 * errors. If so we need to enable the MIB interrupt 327 * so we can act on stat triggers. 328 */ 329 if (ath_hal_hwphycounters(ah)) 330 sc->sc_needmib = 1; 331 332 /* 333 * Get the hardware key cache size. 334 */ 335 sc->sc_keymax = ath_hal_keycachesize(ah); 336 if (sc->sc_keymax > ATH_KEYMAX) { 337 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 338 ATH_KEYMAX, sc->sc_keymax); 339 sc->sc_keymax = ATH_KEYMAX; 340 } 341 /* 342 * Reset the key cache since some parts do not 343 * reset the contents on initial power up. 344 */ 345 for (i = 0; i < sc->sc_keymax; i++) 346 ath_hal_keyreset(ah, i); 347 348 /* 349 * Collect the channel list using the default country 350 * code and including outdoor channels. The 802.11 layer 351 * is resposible for filtering this list based on settings 352 * like the phy mode. 353 */ 354 error = ath_getchannels(sc, ath_regdomain, ath_countrycode, 355 ath_xchanmode != 0, ath_outdoor != 0); 356 if (error != 0) 357 goto bad; 358 359 /* 360 * Setup rate tables for all potential media types. 361 */ 362 ath_rate_setup(sc, IEEE80211_MODE_11A); 363 ath_rate_setup(sc, IEEE80211_MODE_11B); 364 ath_rate_setup(sc, IEEE80211_MODE_11G); 365 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 366 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 367 ath_rate_setup(sc, IEEE80211_MODE_HALF); 368 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 369 370 /* NB: setup here so ath_rate_update is happy */ 371 ath_setcurmode(sc, IEEE80211_MODE_11A); 372 373 /* 374 * Allocate tx+rx descriptors and populate the lists. 375 */ 376 error = ath_desc_alloc(sc); 377 if (error != 0) { 378 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 379 goto bad; 380 } 381 callout_init(&sc->sc_scan_ch, debug_mpsafenet ? CALLOUT_MPSAFE : 0); 382 callout_init(&sc->sc_cal_ch, CALLOUT_MPSAFE); 383 callout_init(&sc->sc_dfs_ch, CALLOUT_MPSAFE); 384 385 ATH_TXBUF_LOCK_INIT(sc); 386 387 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 388 taskqueue_thread_enqueue, &sc->sc_tq); 389 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 390 "%s taskq", ifp->if_xname); 391 392 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc); 393 TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc); 394 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 395 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 396 397 /* 398 * Allocate hardware transmit queues: one queue for 399 * beacon frames and one data queue for each QoS 400 * priority. Note that the hal handles reseting 401 * these queues at the needed time. 402 * 403 * XXX PS-Poll 404 */ 405 sc->sc_bhalq = ath_beaconq_setup(ah); 406 if (sc->sc_bhalq == (u_int) -1) { 407 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 408 error = EIO; 409 goto bad2; 410 } 411 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 412 if (sc->sc_cabq == NULL) { 413 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 414 error = EIO; 415 goto bad2; 416 } 417 ath_txq_init(sc, &sc->sc_mcastq, -1); /* NB: s/w q, qnum not used */ 418 /* NB: insure BK queue is the lowest priority h/w queue */ 419 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 420 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 421 ieee80211_wme_acnames[WME_AC_BK]); 422 error = EIO; 423 goto bad2; 424 } 425 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 426 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 427 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 428 /* 429 * Not enough hardware tx queues to properly do WME; 430 * just punt and assign them all to the same h/w queue. 431 * We could do a better job of this if, for example, 432 * we allocate queues when we switch from station to 433 * AP mode. 434 */ 435 if (sc->sc_ac2q[WME_AC_VI] != NULL) 436 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 437 if (sc->sc_ac2q[WME_AC_BE] != NULL) 438 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 439 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 440 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 441 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 442 } 443 444 /* 445 * Special case certain configurations. Note the 446 * CAB queue is handled by these specially so don't 447 * include them when checking the txq setup mask. 448 */ 449 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 450 case 0x01: 451 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 452 break; 453 case 0x0f: 454 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 455 break; 456 default: 457 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 458 break; 459 } 460 461 /* 462 * Setup rate control. Some rate control modules 463 * call back to change the anntena state so expose 464 * the necessary entry points. 465 * XXX maybe belongs in struct ath_ratectrl? 466 */ 467 sc->sc_setdefantenna = ath_setdefantenna; 468 sc->sc_rc = ath_rate_attach(sc); 469 if (sc->sc_rc == NULL) { 470 error = EIO; 471 goto bad2; 472 } 473 474 sc->sc_blinking = 0; 475 sc->sc_ledstate = 1; 476 sc->sc_ledon = 0; /* low true */ 477 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 478 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 479 /* 480 * Auto-enable soft led processing for IBM cards and for 481 * 5211 minipci cards. Users can also manually enable/disable 482 * support with a sysctl. 483 */ 484 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 485 if (sc->sc_softled) { 486 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); 487 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); 488 } 489 490 ifp->if_softc = sc; 491 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 492 ifp->if_start = ath_start; 493 ifp->if_watchdog = ath_watchdog; 494 ifp->if_ioctl = ath_ioctl; 495 ifp->if_init = ath_init; 496 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 497 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 498 IFQ_SET_READY(&ifp->if_snd); 499 500 ic->ic_ifp = ifp; 501 ic->ic_reset = ath_reset; 502 ic->ic_newassoc = ath_newassoc; 503 ic->ic_updateslot = ath_updateslot; 504 ic->ic_wme.wme_update = ath_wme_update; 505 /* XXX not right but it's not used anywhere important */ 506 ic->ic_phytype = IEEE80211_T_OFDM; 507 ic->ic_opmode = IEEE80211_M_STA; 508 ic->ic_caps = 509 IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 510 | IEEE80211_C_HOSTAP /* hostap mode */ 511 | IEEE80211_C_MONITOR /* monitor mode */ 512 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 513 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 514 | IEEE80211_C_SHSLOT /* short slot time supported */ 515 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 516 ; 517 /* 518 * Query the hal to figure out h/w crypto support. 519 */ 520 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 521 ic->ic_caps |= IEEE80211_C_WEP; 522 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 523 ic->ic_caps |= IEEE80211_C_AES; 524 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 525 ic->ic_caps |= IEEE80211_C_AES_CCM; 526 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 527 ic->ic_caps |= IEEE80211_C_CKIP; 528 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 529 ic->ic_caps |= IEEE80211_C_TKIP; 530 /* 531 * Check if h/w does the MIC and/or whether the 532 * separate key cache entries are required to 533 * handle both tx+rx MIC keys. 534 */ 535 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 536 ic->ic_caps |= IEEE80211_C_TKIPMIC; 537 /* 538 * If the h/w supports storing tx+rx MIC keys 539 * in one cache slot automatically enable use. 540 */ 541 if (ath_hal_hastkipsplit(ah) || 542 !ath_hal_settkipsplit(ah, AH_FALSE)) 543 sc->sc_splitmic = 1; 544 } 545 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 546 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 547 /* 548 * Mark key cache slots associated with global keys 549 * as in use. If we knew TKIP was not to be used we 550 * could leave the +32, +64, and +32+64 slots free. 551 */ 552 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 553 setbit(sc->sc_keymap, i); 554 setbit(sc->sc_keymap, i+64); 555 if (sc->sc_splitmic) { 556 setbit(sc->sc_keymap, i+32); 557 setbit(sc->sc_keymap, i+32+64); 558 } 559 } 560 /* 561 * TPC support can be done either with a global cap or 562 * per-packet support. The latter is not available on 563 * all parts. We're a bit pedantic here as all parts 564 * support a global cap. 565 */ 566 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 567 ic->ic_caps |= IEEE80211_C_TXPMGT; 568 569 /* 570 * Mark WME capability only if we have sufficient 571 * hardware queues to do proper priority scheduling. 572 */ 573 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 574 ic->ic_caps |= IEEE80211_C_WME; 575 /* 576 * Check for misc other capabilities. 577 */ 578 if (ath_hal_hasbursting(ah)) 579 ic->ic_caps |= IEEE80211_C_BURST; 580 581 /* 582 * Indicate we need the 802.11 header padded to a 583 * 32-bit boundary for 4-address and QoS frames. 584 */ 585 ic->ic_flags |= IEEE80211_F_DATAPAD; 586 587 /* 588 * Query the hal about antenna support. 589 */ 590 sc->sc_defant = ath_hal_getdefantenna(ah); 591 592 /* 593 * Not all chips have the VEOL support we want to 594 * use with IBSS beacons; check here for it. 595 */ 596 sc->sc_hasveol = ath_hal_hasveol(ah); 597 598 /* get mac address from hardware */ 599 ath_hal_getmac(ah, ic->ic_myaddr); 600 601 /* call MI attach routine. */ 602 ieee80211_ifattach(ic); 603 sc->sc_opmode = ic->ic_opmode; 604 /* override default methods */ 605 ic->ic_node_alloc = ath_node_alloc; 606 sc->sc_node_free = ic->ic_node_free; 607 ic->ic_node_free = ath_node_free; 608 ic->ic_node_getrssi = ath_node_getrssi; 609 sc->sc_recv_mgmt = ic->ic_recv_mgmt; 610 ic->ic_recv_mgmt = ath_recv_mgmt; 611 sc->sc_newstate = ic->ic_newstate; 612 ic->ic_newstate = ath_newstate; 613 ic->ic_crypto.cs_max_keyix = sc->sc_keymax; 614 ic->ic_crypto.cs_key_alloc = ath_key_alloc; 615 ic->ic_crypto.cs_key_delete = ath_key_delete; 616 ic->ic_crypto.cs_key_set = ath_key_set; 617 ic->ic_crypto.cs_key_update_begin = ath_key_update_begin; 618 ic->ic_crypto.cs_key_update_end = ath_key_update_end; 619 ic->ic_raw_xmit = ath_raw_xmit; 620 /* complete initialization */ 621 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); 622 623 ath_bpfattach(sc); 624 /* 625 * Setup dynamic sysctl's now that country code and 626 * regdomain are available from the hal. 627 */ 628 ath_sysctlattach(sc); 629 630 if (bootverbose) 631 ieee80211_announce(ic); 632 ath_announce(sc); 633 return 0; 634 bad2: 635 ath_tx_cleanup(sc); 636 ath_desc_free(sc); 637 bad: 638 if (ah) 639 ath_hal_detach(ah); 640 if (ifp != NULL) 641 if_free(ifp); 642 sc->sc_invalid = 1; 643 return error; 644 } 645 646 int 647 ath_detach(struct ath_softc *sc) 648 { 649 struct ifnet *ifp = sc->sc_ifp; 650 651 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 652 __func__, ifp->if_flags); 653 654 ath_stop(ifp); 655 bpfdetach(ifp); 656 /* 657 * NB: the order of these is important: 658 * o call the 802.11 layer before detaching the hal to 659 * insure callbacks into the driver to delete global 660 * key cache entries can be handled 661 * o reclaim the tx queue data structures after calling 662 * the 802.11 layer as we'll get called back to reclaim 663 * node state and potentially want to use them 664 * o to cleanup the tx queues the hal is called, so detach 665 * it last 666 * Other than that, it's straightforward... 667 */ 668 ieee80211_ifdetach(&sc->sc_ic); 669 #ifdef ATH_TX99_DIAG 670 if (sc->sc_tx99 != NULL) 671 sc->sc_tx99->detach(sc->sc_tx99); 672 #endif 673 taskqueue_free(sc->sc_tq); 674 ath_rate_detach(sc->sc_rc); 675 ath_desc_free(sc); 676 ath_tx_cleanup(sc); 677 ath_hal_detach(sc->sc_ah); 678 if_free(ifp); 679 680 return 0; 681 } 682 683 void 684 ath_suspend(struct ath_softc *sc) 685 { 686 struct ifnet *ifp = sc->sc_ifp; 687 688 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 689 __func__, ifp->if_flags); 690 691 ath_stop(ifp); 692 } 693 694 void 695 ath_resume(struct ath_softc *sc) 696 { 697 struct ifnet *ifp = sc->sc_ifp; 698 699 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 700 __func__, ifp->if_flags); 701 702 if (ifp->if_flags & IFF_UP) { 703 ath_init(sc); 704 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 705 ath_start(ifp); 706 } 707 if (sc->sc_softled) { 708 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); 709 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); 710 } 711 } 712 713 void 714 ath_shutdown(struct ath_softc *sc) 715 { 716 struct ifnet *ifp = sc->sc_ifp; 717 718 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 719 __func__, ifp->if_flags); 720 721 ath_stop(ifp); 722 } 723 724 /* 725 * Interrupt handler. Most of the actual processing is deferred. 726 */ 727 void 728 ath_intr(void *arg) 729 { 730 struct ath_softc *sc = arg; 731 struct ifnet *ifp = sc->sc_ifp; 732 struct ath_hal *ah = sc->sc_ah; 733 HAL_INT status; 734 735 if (sc->sc_invalid) { 736 /* 737 * The hardware is not ready/present, don't touch anything. 738 * Note this can happen early on if the IRQ is shared. 739 */ 740 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 741 return; 742 } 743 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ 744 return; 745 if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & 746 IFF_DRV_RUNNING))) { 747 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 748 __func__, ifp->if_flags); 749 ath_hal_getisr(ah, &status); /* clear ISR */ 750 ath_hal_intrset(ah, 0); /* disable further intr's */ 751 return; 752 } 753 /* 754 * Figure out the reason(s) for the interrupt. Note 755 * that the hal returns a pseudo-ISR that may include 756 * bits we haven't explicitly enabled so we mask the 757 * value to insure we only process bits we requested. 758 */ 759 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 760 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 761 status &= sc->sc_imask; /* discard unasked for bits */ 762 if (status & HAL_INT_FATAL) { 763 sc->sc_stats.ast_hardware++; 764 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 765 ath_fatal_proc(sc, 0); 766 } else if (status & HAL_INT_RXORN) { 767 sc->sc_stats.ast_rxorn++; 768 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 769 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxorntask); 770 } else { 771 if (status & HAL_INT_SWBA) { 772 /* 773 * Software beacon alert--time to send a beacon. 774 * Handle beacon transmission directly; deferring 775 * this is too slow to meet timing constraints 776 * under load. 777 */ 778 ath_beacon_proc(sc, 0); 779 } 780 if (status & HAL_INT_RXEOL) { 781 /* 782 * NB: the hardware should re-read the link when 783 * RXE bit is written, but it doesn't work at 784 * least on older hardware revs. 785 */ 786 sc->sc_stats.ast_rxeol++; 787 sc->sc_rxlink = NULL; 788 } 789 if (status & HAL_INT_TXURN) { 790 sc->sc_stats.ast_txurn++; 791 /* bump tx trigger level */ 792 ath_hal_updatetxtriglevel(ah, AH_TRUE); 793 } 794 if (status & HAL_INT_RX) 795 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 796 if (status & HAL_INT_TX) 797 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 798 if (status & HAL_INT_BMISS) { 799 sc->sc_stats.ast_bmiss++; 800 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 801 } 802 if (status & HAL_INT_MIB) { 803 sc->sc_stats.ast_mib++; 804 /* 805 * Disable interrupts until we service the MIB 806 * interrupt; otherwise it will continue to fire. 807 */ 808 ath_hal_intrset(ah, 0); 809 /* 810 * Let the hal handle the event. We assume it will 811 * clear whatever condition caused the interrupt. 812 */ 813 ath_hal_mibevent(ah, &sc->sc_halstats); 814 ath_hal_intrset(ah, sc->sc_imask); 815 } 816 } 817 } 818 819 static void 820 ath_fatal_proc(void *arg, int pending) 821 { 822 struct ath_softc *sc = arg; 823 struct ifnet *ifp = sc->sc_ifp; 824 u_int32_t *state; 825 u_int32_t len; 826 827 if_printf(ifp, "hardware error; resetting\n"); 828 /* 829 * Fatal errors are unrecoverable. Typically these 830 * are caused by DMA errors. Collect h/w state from 831 * the hal so we can diagnose what's going on. 832 */ 833 if (ath_hal_getfatalstate(sc->sc_ah, &state, &len)) { 834 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 835 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 836 state[0], state[1] , state[2], state[3], 837 state[4], state[5]); 838 } 839 ath_reset(ifp); 840 } 841 842 static void 843 ath_rxorn_proc(void *arg, int pending) 844 { 845 struct ath_softc *sc = arg; 846 struct ifnet *ifp = sc->sc_ifp; 847 848 if_printf(ifp, "rx FIFO overrun; resetting\n"); 849 ath_reset(ifp); 850 } 851 852 static void 853 ath_bmiss_proc(void *arg, int pending) 854 { 855 struct ath_softc *sc = arg; 856 struct ieee80211com *ic = &sc->sc_ic; 857 858 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 859 KASSERT(ic->ic_opmode == IEEE80211_M_STA, 860 ("unexpect operating mode %u", ic->ic_opmode)); 861 if (ic->ic_state == IEEE80211_S_RUN) { 862 u_int64_t lastrx = sc->sc_lastrx; 863 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 864 u_int bmisstimeout = 865 ic->ic_bmissthreshold * ic->ic_bss->ni_intval * 1024; 866 867 DPRINTF(sc, ATH_DEBUG_BEACON, 868 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 869 __func__, (unsigned long long) tsf, 870 (unsigned long long)(tsf - lastrx), 871 (unsigned long long) lastrx, bmisstimeout); 872 /* 873 * Workaround phantom bmiss interrupts by sanity-checking 874 * the time of our last rx'd frame. If it is within the 875 * beacon miss interval then ignore the interrupt. If it's 876 * truly a bmiss we'll get another interrupt soon and that'll 877 * be dispatched up for processing. 878 */ 879 if (tsf - lastrx > bmisstimeout) { 880 NET_LOCK_GIANT(); 881 ieee80211_beacon_miss(ic); 882 NET_UNLOCK_GIANT(); 883 } else 884 sc->sc_stats.ast_bmiss_phantom++; 885 } 886 } 887 888 /* 889 * Convert net80211 channel to a HAL channel with the flags 890 * constrained to reflect the current operating mode and 891 * the frequency possibly mapped for GSM channels. 892 */ 893 static void 894 ath_mapchan(struct ieee80211com *ic, HAL_CHANNEL *hc, 895 const struct ieee80211_channel *chan) 896 { 897 #define N(a) (sizeof(a) / sizeof(a[0])) 898 static const u_int modeflags[] = { 899 0, /* IEEE80211_MODE_AUTO */ 900 CHANNEL_A, /* IEEE80211_MODE_11A */ 901 CHANNEL_B, /* IEEE80211_MODE_11B */ 902 CHANNEL_PUREG, /* IEEE80211_MODE_11G */ 903 0, /* IEEE80211_MODE_FH */ 904 CHANNEL_ST, /* IEEE80211_MODE_TURBO_A */ 905 CHANNEL_108G /* IEEE80211_MODE_TURBO_G */ 906 }; 907 enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); 908 909 KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode)); 910 KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode)); 911 hc->channelFlags = modeflags[mode]; 912 if (IEEE80211_IS_CHAN_HALF(chan)) 913 hc->channelFlags |= CHANNEL_HALF; 914 if (IEEE80211_IS_CHAN_QUARTER(chan)) 915 hc->channelFlags |= CHANNEL_QUARTER; 916 917 hc->channel = IEEE80211_IS_CHAN_GSM(chan) ? 918 2422 + (922 - chan->ic_freq) : chan->ic_freq; 919 #undef N 920 } 921 922 static void 923 ath_init(void *arg) 924 { 925 struct ath_softc *sc = (struct ath_softc *) arg; 926 struct ieee80211com *ic = &sc->sc_ic; 927 struct ifnet *ifp = sc->sc_ifp; 928 struct ath_hal *ah = sc->sc_ah; 929 HAL_STATUS status; 930 931 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 932 __func__, ifp->if_flags); 933 934 ATH_LOCK(sc); 935 /* 936 * Stop anything previously setup. This is safe 937 * whether this is the first time through or not. 938 */ 939 ath_stop_locked(ifp); 940 941 /* 942 * The basic interface to setting the hardware in a good 943 * state is ``reset''. On return the hardware is known to 944 * be powered up and with interrupts disabled. This must 945 * be followed by initialization of the appropriate bits 946 * and then setup of the interrupt mask. 947 */ 948 ath_mapchan(ic, &sc->sc_curchan, ic->ic_curchan); 949 if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status)) { 950 if_printf(ifp, "unable to reset hardware; hal status %u\n", 951 status); 952 goto done; 953 } 954 955 /* 956 * This is needed only to setup initial state 957 * but it's best done after a reset. 958 */ 959 ath_update_txpow(sc); 960 /* 961 * Likewise this is set during reset so update 962 * state cached in the driver. 963 */ 964 sc->sc_diversity = ath_hal_getdiversity(ah); 965 sc->sc_calinterval = 1; 966 sc->sc_caltries = 0; 967 968 /* 969 * Setup the hardware after reset: the key cache 970 * is filled as needed and the receive engine is 971 * set going. Frame transmit is handled entirely 972 * in the frame output path; there's nothing to do 973 * here except setup the interrupt mask. 974 */ 975 if (ath_startrecv(sc) != 0) { 976 if_printf(ifp, "unable to start recv logic\n"); 977 goto done; 978 } 979 980 /* 981 * Enable interrupts. 982 */ 983 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 984 | HAL_INT_RXEOL | HAL_INT_RXORN 985 | HAL_INT_FATAL | HAL_INT_GLOBAL; 986 /* 987 * Enable MIB interrupts when there are hardware phy counters. 988 * Note we only do this (at the moment) for station mode. 989 */ 990 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 991 sc->sc_imask |= HAL_INT_MIB; 992 ath_hal_intrset(ah, sc->sc_imask); 993 994 ifp->if_drv_flags |= IFF_DRV_RUNNING; 995 ic->ic_state = IEEE80211_S_INIT; 996 997 /* 998 * The hardware should be ready to go now so it's safe 999 * to kick the 802.11 state machine as it's likely to 1000 * immediately call back to us to send mgmt frames. 1001 */ 1002 ath_chan_change(sc, ic->ic_curchan); 1003 #ifdef ATH_TX99_DIAG 1004 if (sc->sc_tx99 != NULL) 1005 sc->sc_tx99->start(sc->sc_tx99); 1006 else 1007 #endif 1008 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 1009 if (ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 1010 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); 1011 } else 1012 ieee80211_new_state(ic, IEEE80211_S_RUN, -1); 1013 done: 1014 ATH_UNLOCK(sc); 1015 } 1016 1017 static void 1018 ath_stop_locked(struct ifnet *ifp) 1019 { 1020 struct ath_softc *sc = ifp->if_softc; 1021 struct ieee80211com *ic = &sc->sc_ic; 1022 struct ath_hal *ah = sc->sc_ah; 1023 1024 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1025 __func__, sc->sc_invalid, ifp->if_flags); 1026 1027 ATH_LOCK_ASSERT(sc); 1028 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1029 /* 1030 * Shutdown the hardware and driver: 1031 * reset 802.11 state machine 1032 * turn off timers 1033 * disable interrupts 1034 * turn off the radio 1035 * clear transmit machinery 1036 * clear receive machinery 1037 * drain and release tx queues 1038 * reclaim beacon resources 1039 * power down hardware 1040 * 1041 * Note that some of this work is not possible if the 1042 * hardware is gone (invalid). 1043 */ 1044 #ifdef ATH_TX99_DIAG 1045 if (sc->sc_tx99 != NULL) 1046 sc->sc_tx99->stop(sc->sc_tx99); 1047 #endif 1048 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); 1049 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1050 ifp->if_timer = 0; 1051 if (!sc->sc_invalid) { 1052 if (sc->sc_softled) { 1053 callout_stop(&sc->sc_ledtimer); 1054 ath_hal_gpioset(ah, sc->sc_ledpin, 1055 !sc->sc_ledon); 1056 sc->sc_blinking = 0; 1057 } 1058 ath_hal_intrset(ah, 0); 1059 } 1060 ath_draintxq(sc); 1061 if (!sc->sc_invalid) { 1062 ath_stoprecv(sc); 1063 ath_hal_phydisable(ah); 1064 } else 1065 sc->sc_rxlink = NULL; 1066 IFQ_DRV_PURGE(&ifp->if_snd); 1067 ath_beacon_free(sc); 1068 } 1069 } 1070 1071 static void 1072 ath_stop(struct ifnet *ifp) 1073 { 1074 struct ath_softc *sc = ifp->if_softc; 1075 1076 ATH_LOCK(sc); 1077 ath_stop_locked(ifp); 1078 if (!sc->sc_invalid) { 1079 /* 1080 * Set the chip in full sleep mode. Note that we are 1081 * careful to do this only when bringing the interface 1082 * completely to a stop. When the chip is in this state 1083 * it must be carefully woken up or references to 1084 * registers in the PCI clock domain may freeze the bus 1085 * (and system). This varies by chip and is mostly an 1086 * issue with newer parts that go to sleep more quickly. 1087 */ 1088 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 1089 } 1090 ATH_UNLOCK(sc); 1091 } 1092 1093 /* 1094 * Reset the hardware w/o losing operational state. This is 1095 * basically a more efficient way of doing ath_stop, ath_init, 1096 * followed by state transitions to the current 802.11 1097 * operational state. Used to recover from various errors and 1098 * to reset or reload hardware state. 1099 */ 1100 static int 1101 ath_reset(struct ifnet *ifp) 1102 { 1103 struct ath_softc *sc = ifp->if_softc; 1104 struct ieee80211com *ic = &sc->sc_ic; 1105 struct ath_hal *ah = sc->sc_ah; 1106 HAL_STATUS status; 1107 1108 /* 1109 * Convert to a HAL channel description with the flags 1110 * constrained to reflect the current operating mode. 1111 */ 1112 ath_mapchan(ic, &sc->sc_curchan, ic->ic_curchan); 1113 1114 ath_hal_intrset(ah, 0); /* disable interrupts */ 1115 ath_draintxq(sc); /* stop xmit side */ 1116 ath_stoprecv(sc); /* stop recv side */ 1117 /* NB: indicate channel change so we do a full reset */ 1118 if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_TRUE, &status)) 1119 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 1120 __func__, status); 1121 ath_update_txpow(sc); /* update tx power state */ 1122 sc->sc_diversity = ath_hal_getdiversity(ah); 1123 sc->sc_calinterval = 1; 1124 sc->sc_caltries = 0; 1125 /* 1126 * We may be doing a reset in response to an ioctl 1127 * that changes the channel so update any state that 1128 * might change as a result. 1129 */ 1130 ath_chan_change(sc, ic->ic_curchan); 1131 if (ath_startrecv(sc) != 0) /* restart recv */ 1132 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 1133 if (ic->ic_state == IEEE80211_S_RUN) 1134 ath_beacon_config(sc); /* restart beacons */ 1135 ath_hal_intrset(ah, sc->sc_imask); 1136 1137 ath_start(ifp); /* restart xmit */ 1138 return 0; 1139 } 1140 1141 static void 1142 ath_start(struct ifnet *ifp) 1143 { 1144 struct ath_softc *sc = ifp->if_softc; 1145 struct ath_hal *ah = sc->sc_ah; 1146 struct ieee80211com *ic = &sc->sc_ic; 1147 struct ieee80211_node *ni; 1148 struct ath_buf *bf; 1149 struct mbuf *m; 1150 struct ieee80211_frame *wh; 1151 struct ether_header *eh; 1152 1153 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 1154 return; 1155 for (;;) { 1156 /* 1157 * Grab a TX buffer and associated resources. 1158 */ 1159 ATH_TXBUF_LOCK(sc); 1160 bf = STAILQ_FIRST(&sc->sc_txbuf); 1161 if (bf != NULL) 1162 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 1163 ATH_TXBUF_UNLOCK(sc); 1164 if (bf == NULL) { 1165 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n", 1166 __func__); 1167 sc->sc_stats.ast_tx_qstop++; 1168 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1169 break; 1170 } 1171 /* 1172 * Poll the management queue for frames; they 1173 * have priority over normal data frames. 1174 */ 1175 IF_DEQUEUE(&ic->ic_mgtq, m); 1176 if (m == NULL) { 1177 /* 1178 * No data frames go out unless we're associated. 1179 */ 1180 if (ic->ic_state != IEEE80211_S_RUN) { 1181 DPRINTF(sc, ATH_DEBUG_XMIT, 1182 "%s: discard data packet, state %s\n", 1183 __func__, 1184 ieee80211_state_name[ic->ic_state]); 1185 sc->sc_stats.ast_tx_discard++; 1186 ATH_TXBUF_LOCK(sc); 1187 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1188 ATH_TXBUF_UNLOCK(sc); 1189 break; 1190 } 1191 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */ 1192 if (m == NULL) { 1193 ATH_TXBUF_LOCK(sc); 1194 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1195 ATH_TXBUF_UNLOCK(sc); 1196 break; 1197 } 1198 /* 1199 * Find the node for the destination so we can do 1200 * things like power save and fast frames aggregation. 1201 */ 1202 if (m->m_len < sizeof(struct ether_header) && 1203 (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { 1204 ic->ic_stats.is_tx_nobuf++; /* XXX */ 1205 ni = NULL; 1206 goto bad; 1207 } 1208 eh = mtod(m, struct ether_header *); 1209 ni = ieee80211_find_txnode(ic, eh->ether_dhost); 1210 if (ni == NULL) { 1211 /* NB: ieee80211_find_txnode does stat+msg */ 1212 m_freem(m); 1213 goto bad; 1214 } 1215 if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && 1216 (m->m_flags & M_PWR_SAV) == 0) { 1217 /* 1218 * Station in power save mode; pass the frame 1219 * to the 802.11 layer and continue. We'll get 1220 * the frame back when the time is right. 1221 */ 1222 ieee80211_pwrsave(ic, ni, m); 1223 goto reclaim; 1224 } 1225 /* calculate priority so we can find the tx queue */ 1226 if (ieee80211_classify(ic, m, ni)) { 1227 DPRINTF(sc, ATH_DEBUG_XMIT, 1228 "%s: discard, classification failure\n", 1229 __func__); 1230 m_freem(m); 1231 goto bad; 1232 } 1233 ifp->if_opackets++; 1234 BPF_MTAP(ifp, m); 1235 /* 1236 * Encapsulate the packet in prep for transmission. 1237 */ 1238 m = ieee80211_encap(ic, m, ni); 1239 if (m == NULL) { 1240 DPRINTF(sc, ATH_DEBUG_XMIT, 1241 "%s: encapsulation failure\n", 1242 __func__); 1243 sc->sc_stats.ast_tx_encap++; 1244 goto bad; 1245 } 1246 } else { 1247 /* 1248 * Hack! The referenced node pointer is in the 1249 * rcvif field of the packet header. This is 1250 * placed there by ieee80211_mgmt_output because 1251 * we need to hold the reference with the frame 1252 * and there's no other way (other than packet 1253 * tags which we consider too expensive to use) 1254 * to pass it along. 1255 */ 1256 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1257 m->m_pkthdr.rcvif = NULL; 1258 1259 wh = mtod(m, struct ieee80211_frame *); 1260 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 1261 IEEE80211_FC0_SUBTYPE_PROBE_RESP) { 1262 /* fill time stamp */ 1263 u_int64_t tsf; 1264 u_int32_t *tstamp; 1265 1266 tsf = ath_hal_gettsf64(ah); 1267 /* XXX: adjust 100us delay to xmit */ 1268 tsf += 100; 1269 tstamp = (u_int32_t *)&wh[1]; 1270 tstamp[0] = htole32(tsf & 0xffffffff); 1271 tstamp[1] = htole32(tsf >> 32); 1272 } 1273 sc->sc_stats.ast_tx_mgmt++; 1274 } 1275 1276 if (ath_tx_start(sc, ni, bf, m)) { 1277 bad: 1278 ifp->if_oerrors++; 1279 reclaim: 1280 ATH_TXBUF_LOCK(sc); 1281 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 1282 ATH_TXBUF_UNLOCK(sc); 1283 if (ni != NULL) 1284 ieee80211_free_node(ni); 1285 continue; 1286 } 1287 1288 sc->sc_tx_timer = 5; 1289 ifp->if_timer = 1; 1290 } 1291 } 1292 1293 static int 1294 ath_media_change(struct ifnet *ifp) 1295 { 1296 #define IS_UP(ifp) \ 1297 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 1298 int error; 1299 1300 error = ieee80211_media_change(ifp); 1301 if (error == ENETRESET) { 1302 struct ath_softc *sc = ifp->if_softc; 1303 struct ieee80211com *ic = &sc->sc_ic; 1304 1305 if (ic->ic_opmode == IEEE80211_M_AHDEMO) { 1306 /* 1307 * Adhoc demo mode is just ibss mode w/o beacons 1308 * (mostly). The hal knows nothing about it; 1309 * tell it we're operating in ibss mode. 1310 */ 1311 sc->sc_opmode = HAL_M_IBSS; 1312 } else 1313 sc->sc_opmode = ic->ic_opmode; 1314 if (IS_UP(ifp)) 1315 ath_init(ifp->if_softc); /* XXX lose error */ 1316 error = 0; 1317 } 1318 return error; 1319 #undef IS_UP 1320 } 1321 1322 #ifdef ATH_DEBUG 1323 static void 1324 ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix, 1325 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1326 { 1327 static const char *ciphers[] = { 1328 "WEP", 1329 "AES-OCB", 1330 "AES-CCM", 1331 "CKIP", 1332 "TKIP", 1333 "CLR", 1334 }; 1335 int i, n; 1336 1337 printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); 1338 for (i = 0, n = hk->kv_len; i < n; i++) 1339 printf("%02x", hk->kv_val[i]); 1340 printf(" mac %s", ether_sprintf(mac)); 1341 if (hk->kv_type == HAL_CIPHER_TKIP) { 1342 printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic"); 1343 for (i = 0; i < sizeof(hk->kv_mic); i++) 1344 printf("%02x", hk->kv_mic[i]); 1345 #if HAL_ABI_VERSION > 0x06052200 1346 if (!sc->sc_splitmic) { 1347 printf(" txmic "); 1348 for (i = 0; i < sizeof(hk->kv_txmic); i++) 1349 printf("%02x", hk->kv_txmic[i]); 1350 } 1351 #endif 1352 } 1353 printf("\n"); 1354 } 1355 #endif 1356 1357 /* 1358 * Set a TKIP key into the hardware. This handles the 1359 * potential distribution of key state to multiple key 1360 * cache slots for TKIP. 1361 */ 1362 static int 1363 ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, 1364 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) 1365 { 1366 #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) 1367 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; 1368 struct ath_hal *ah = sc->sc_ah; 1369 1370 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, 1371 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); 1372 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { 1373 if (sc->sc_splitmic) { 1374 /* 1375 * TX key goes at first index, RX key at the rx index. 1376 * The hal handles the MIC keys at index+64. 1377 */ 1378 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); 1379 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); 1380 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) 1381 return 0; 1382 1383 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 1384 KEYPRINTF(sc, k->wk_keyix+32, hk, mac); 1385 /* XXX delete tx key on failure? */ 1386 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); 1387 } else { 1388 /* 1389 * Room for both TX+RX MIC keys in one key cache 1390 * slot, just set key at the first index; the hal 1391 * will handle the reset. 1392 */ 1393 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); 1394 #if HAL_ABI_VERSION > 0x06052200 1395 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); 1396 #endif 1397 KEYPRINTF(sc, k->wk_keyix, hk, mac); 1398 return ath_hal_keyset(ah, k->wk_keyix, hk, mac); 1399 } 1400 } else if (k->wk_flags & IEEE80211_KEY_XR) { 1401 /* 1402 * TX/RX key goes at first index. 1403 * The hal handles the MIC keys are index+64. 1404 */ 1405 memcpy(hk->kv_mic, k->wk_flags & IEEE80211_KEY_XMIT ? 1406 k->wk_txmic : k->wk_rxmic, sizeof(hk->kv_mic)); 1407 KEYPRINTF(sc, k->wk_keyix, hk, mac); 1408 return ath_hal_keyset(ah, k->wk_keyix, hk, mac); 1409 } 1410 return 0; 1411 #undef IEEE80211_KEY_XR 1412 } 1413 1414 /* 1415 * Set a net80211 key into the hardware. This handles the 1416 * potential distribution of key state to multiple key 1417 * cache slots for TKIP with hardware MIC support. 1418 */ 1419 static int 1420 ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, 1421 const u_int8_t mac0[IEEE80211_ADDR_LEN], 1422 struct ieee80211_node *bss) 1423 { 1424 #define N(a) (sizeof(a)/sizeof(a[0])) 1425 static const u_int8_t ciphermap[] = { 1426 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ 1427 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ 1428 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ 1429 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ 1430 (u_int8_t) -1, /* 4 is not allocated */ 1431 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ 1432 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ 1433 }; 1434 struct ath_hal *ah = sc->sc_ah; 1435 const struct ieee80211_cipher *cip = k->wk_cipher; 1436 u_int8_t gmac[IEEE80211_ADDR_LEN]; 1437 const u_int8_t *mac; 1438 HAL_KEYVAL hk; 1439 1440 memset(&hk, 0, sizeof(hk)); 1441 /* 1442 * Software crypto uses a "clear key" so non-crypto 1443 * state kept in the key cache are maintained and 1444 * so that rx frames have an entry to match. 1445 */ 1446 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { 1447 KASSERT(cip->ic_cipher < N(ciphermap), 1448 ("invalid cipher type %u", cip->ic_cipher)); 1449 hk.kv_type = ciphermap[cip->ic_cipher]; 1450 hk.kv_len = k->wk_keylen; 1451 memcpy(hk.kv_val, k->wk_key, k->wk_keylen); 1452 } else 1453 hk.kv_type = HAL_CIPHER_CLR; 1454 1455 if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) { 1456 /* 1457 * Group keys on hardware that supports multicast frame 1458 * key search use a mac that is the sender's address with 1459 * the high bit set instead of the app-specified address. 1460 */ 1461 IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr); 1462 gmac[0] |= 0x80; 1463 mac = gmac; 1464 } else 1465 mac = mac0; 1466 1467 if (hk.kv_type == HAL_CIPHER_TKIP && 1468 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 1469 return ath_keyset_tkip(sc, k, &hk, mac); 1470 } else { 1471 KEYPRINTF(sc, k->wk_keyix, &hk, mac); 1472 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac); 1473 } 1474 #undef N 1475 } 1476 1477 /* 1478 * Allocate tx/rx key slots for TKIP. We allocate two slots for 1479 * each key, one for decrypt/encrypt and the other for the MIC. 1480 */ 1481 static u_int16_t 1482 key_alloc_2pair(struct ath_softc *sc, 1483 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) 1484 { 1485 #define N(a) (sizeof(a)/sizeof(a[0])) 1486 u_int i, keyix; 1487 1488 KASSERT(sc->sc_splitmic, ("key cache !split")); 1489 /* XXX could optimize */ 1490 for (i = 0; i < N(sc->sc_keymap)/4; i++) { 1491 u_int8_t b = sc->sc_keymap[i]; 1492 if (b != 0xff) { 1493 /* 1494 * One or more slots in this byte are free. 1495 */ 1496 keyix = i*NBBY; 1497 while (b & 1) { 1498 again: 1499 keyix++; 1500 b >>= 1; 1501 } 1502 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ 1503 if (isset(sc->sc_keymap, keyix+32) || 1504 isset(sc->sc_keymap, keyix+64) || 1505 isset(sc->sc_keymap, keyix+32+64)) { 1506 /* full pair unavailable */ 1507 /* XXX statistic */ 1508 if (keyix == (i+1)*NBBY) { 1509 /* no slots were appropriate, advance */ 1510 continue; 1511 } 1512 goto again; 1513 } 1514 setbit(sc->sc_keymap, keyix); 1515 setbit(sc->sc_keymap, keyix+64); 1516 setbit(sc->sc_keymap, keyix+32); 1517 setbit(sc->sc_keymap, keyix+32+64); 1518 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1519 "%s: key pair %u,%u %u,%u\n", 1520 __func__, keyix, keyix+64, 1521 keyix+32, keyix+32+64); 1522 *txkeyix = keyix; 1523 *rxkeyix = keyix+32; 1524 return 1; 1525 } 1526 } 1527 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); 1528 return 0; 1529 #undef N 1530 } 1531 1532 /* 1533 * Allocate tx/rx key slots for TKIP. We allocate two slots for 1534 * each key, one for decrypt/encrypt and the other for the MIC. 1535 */ 1536 static u_int16_t 1537 key_alloc_pair(struct ath_softc *sc, 1538 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) 1539 { 1540 #define N(a) (sizeof(a)/sizeof(a[0])) 1541 u_int i, keyix; 1542 1543 KASSERT(!sc->sc_splitmic, ("key cache split")); 1544 /* XXX could optimize */ 1545 for (i = 0; i < N(sc->sc_keymap)/4; i++) { 1546 u_int8_t b = sc->sc_keymap[i]; 1547 if (b != 0xff) { 1548 /* 1549 * One or more slots in this byte are free. 1550 */ 1551 keyix = i*NBBY; 1552 while (b & 1) { 1553 again: 1554 keyix++; 1555 b >>= 1; 1556 } 1557 if (isset(sc->sc_keymap, keyix+64)) { 1558 /* full pair unavailable */ 1559 /* XXX statistic */ 1560 if (keyix == (i+1)*NBBY) { 1561 /* no slots were appropriate, advance */ 1562 continue; 1563 } 1564 goto again; 1565 } 1566 setbit(sc->sc_keymap, keyix); 1567 setbit(sc->sc_keymap, keyix+64); 1568 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1569 "%s: key pair %u,%u\n", 1570 __func__, keyix, keyix+64); 1571 *txkeyix = *rxkeyix = keyix; 1572 return 1; 1573 } 1574 } 1575 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); 1576 return 0; 1577 #undef N 1578 } 1579 1580 /* 1581 * Allocate a single key cache slot. 1582 */ 1583 static int 1584 key_alloc_single(struct ath_softc *sc, 1585 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) 1586 { 1587 #define N(a) (sizeof(a)/sizeof(a[0])) 1588 u_int i, keyix; 1589 1590 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ 1591 for (i = 0; i < N(sc->sc_keymap); i++) { 1592 u_int8_t b = sc->sc_keymap[i]; 1593 if (b != 0xff) { 1594 /* 1595 * One or more slots are free. 1596 */ 1597 keyix = i*NBBY; 1598 while (b & 1) 1599 keyix++, b >>= 1; 1600 setbit(sc->sc_keymap, keyix); 1601 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", 1602 __func__, keyix); 1603 *txkeyix = *rxkeyix = keyix; 1604 return 1; 1605 } 1606 } 1607 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); 1608 return 0; 1609 #undef N 1610 } 1611 1612 /* 1613 * Allocate one or more key cache slots for a uniacst key. The 1614 * key itself is needed only to identify the cipher. For hardware 1615 * TKIP with split cipher+MIC keys we allocate two key cache slot 1616 * pairs so that we can setup separate TX and RX MIC keys. Note 1617 * that the MIC key for a TKIP key at slot i is assumed by the 1618 * hardware to be at slot i+64. This limits TKIP keys to the first 1619 * 64 entries. 1620 */ 1621 static int 1622 ath_key_alloc(struct ieee80211com *ic, const struct ieee80211_key *k, 1623 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) 1624 { 1625 struct ath_softc *sc = ic->ic_ifp->if_softc; 1626 1627 /* 1628 * Group key allocation must be handled specially for 1629 * parts that do not support multicast key cache search 1630 * functionality. For those parts the key id must match 1631 * the h/w key index so lookups find the right key. On 1632 * parts w/ the key search facility we install the sender's 1633 * mac address (with the high bit set) and let the hardware 1634 * find the key w/o using the key id. This is preferred as 1635 * it permits us to support multiple users for adhoc and/or 1636 * multi-station operation. 1637 */ 1638 if ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey) { 1639 if (!(&ic->ic_nw_keys[0] <= k && 1640 k < &ic->ic_nw_keys[IEEE80211_WEP_NKID])) { 1641 /* should not happen */ 1642 DPRINTF(sc, ATH_DEBUG_KEYCACHE, 1643 "%s: bogus group key\n", __func__); 1644 return 0; 1645 } 1646 /* 1647 * XXX we pre-allocate the global keys so 1648 * have no way to check if they've already been allocated. 1649 */ 1650 *keyix = *rxkeyix = k - ic->ic_nw_keys; 1651 return 1; 1652 } 1653 1654 /* 1655 * We allocate two pair for TKIP when using the h/w to do 1656 * the MIC. For everything else, including software crypto, 1657 * we allocate a single entry. Note that s/w crypto requires 1658 * a pass-through slot on the 5211 and 5212. The 5210 does 1659 * not support pass-through cache entries and we map all 1660 * those requests to slot 0. 1661 */ 1662 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 1663 return key_alloc_single(sc, keyix, rxkeyix); 1664 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && 1665 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 1666 if (sc->sc_splitmic) 1667 return key_alloc_2pair(sc, keyix, rxkeyix); 1668 else 1669 return key_alloc_pair(sc, keyix, rxkeyix); 1670 } else { 1671 return key_alloc_single(sc, keyix, rxkeyix); 1672 } 1673 } 1674 1675 /* 1676 * Delete an entry in the key cache allocated by ath_key_alloc. 1677 */ 1678 static int 1679 ath_key_delete(struct ieee80211com *ic, const struct ieee80211_key *k) 1680 { 1681 struct ath_softc *sc = ic->ic_ifp->if_softc; 1682 struct ath_hal *ah = sc->sc_ah; 1683 const struct ieee80211_cipher *cip = k->wk_cipher; 1684 u_int keyix = k->wk_keyix; 1685 1686 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); 1687 1688 ath_hal_keyreset(ah, keyix); 1689 /* 1690 * Handle split tx/rx keying required for TKIP with h/w MIC. 1691 */ 1692 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1693 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) 1694 ath_hal_keyreset(ah, keyix+32); /* RX key */ 1695 if (keyix >= IEEE80211_WEP_NKID) { 1696 /* 1697 * Don't touch keymap entries for global keys so 1698 * they are never considered for dynamic allocation. 1699 */ 1700 clrbit(sc->sc_keymap, keyix); 1701 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && 1702 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { 1703 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ 1704 if (sc->sc_splitmic) { 1705 /* +32 for RX key, +32+64 for RX key MIC */ 1706 clrbit(sc->sc_keymap, keyix+32); 1707 clrbit(sc->sc_keymap, keyix+32+64); 1708 } 1709 } 1710 } 1711 return 1; 1712 } 1713 1714 /* 1715 * Set the key cache contents for the specified key. Key cache 1716 * slot(s) must already have been allocated by ath_key_alloc. 1717 */ 1718 static int 1719 ath_key_set(struct ieee80211com *ic, const struct ieee80211_key *k, 1720 const u_int8_t mac[IEEE80211_ADDR_LEN]) 1721 { 1722 struct ath_softc *sc = ic->ic_ifp->if_softc; 1723 1724 return ath_keyset(sc, k, mac, ic->ic_bss); 1725 } 1726 1727 /* 1728 * Block/unblock tx+rx processing while a key change is done. 1729 * We assume the caller serializes key management operations 1730 * so we only need to worry about synchronization with other 1731 * uses that originate in the driver. 1732 */ 1733 static void 1734 ath_key_update_begin(struct ieee80211com *ic) 1735 { 1736 struct ifnet *ifp = ic->ic_ifp; 1737 struct ath_softc *sc = ifp->if_softc; 1738 1739 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1740 #if 0 1741 tasklet_disable(&sc->sc_rxtq); 1742 #endif 1743 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 1744 } 1745 1746 static void 1747 ath_key_update_end(struct ieee80211com *ic) 1748 { 1749 struct ifnet *ifp = ic->ic_ifp; 1750 struct ath_softc *sc = ifp->if_softc; 1751 1752 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1753 IF_UNLOCK(&ifp->if_snd); 1754 #if 0 1755 tasklet_enable(&sc->sc_rxtq); 1756 #endif 1757 } 1758 1759 /* 1760 * Calculate the receive filter according to the 1761 * operating mode and state: 1762 * 1763 * o always accept unicast, broadcast, and multicast traffic 1764 * o maintain current state of phy error reception (the hal 1765 * may enable phy error frames for noise immunity work) 1766 * o probe request frames are accepted only when operating in 1767 * hostap, adhoc, or monitor modes 1768 * o enable promiscuous mode according to the interface state 1769 * o accept beacons: 1770 * - when operating in adhoc mode so the 802.11 layer creates 1771 * node table entries for peers, 1772 * - when operating in station mode for collecting rssi data when 1773 * the station is otherwise quiet, or 1774 * - when scanning 1775 * o accept control frames: 1776 * - when in monitor mode 1777 */ 1778 static u_int32_t 1779 ath_calcrxfilter(struct ath_softc *sc, enum ieee80211_state state) 1780 { 1781 #define RX_FILTER_PRESERVE (HAL_RX_FILTER_PHYERR | HAL_RX_FILTER_PHYRADAR) 1782 struct ieee80211com *ic = &sc->sc_ic; 1783 struct ath_hal *ah = sc->sc_ah; 1784 struct ifnet *ifp = sc->sc_ifp; 1785 u_int32_t rfilt; 1786 1787 rfilt = (ath_hal_getrxfilter(ah) & RX_FILTER_PRESERVE) 1788 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 1789 if (ic->ic_opmode != IEEE80211_M_STA) 1790 rfilt |= HAL_RX_FILTER_PROBEREQ; 1791 if (ic->ic_opmode != IEEE80211_M_HOSTAP && 1792 (ifp->if_flags & IFF_PROMISC)) 1793 rfilt |= HAL_RX_FILTER_PROM; 1794 if (ic->ic_opmode == IEEE80211_M_STA || 1795 ic->ic_opmode == IEEE80211_M_IBSS || 1796 state == IEEE80211_S_SCAN) 1797 rfilt |= HAL_RX_FILTER_BEACON; 1798 if (ic->ic_opmode == IEEE80211_M_MONITOR) 1799 rfilt |= HAL_RX_FILTER_CONTROL; 1800 return rfilt; 1801 #undef RX_FILTER_PRESERVE 1802 } 1803 1804 static void 1805 ath_mode_init(struct ath_softc *sc) 1806 { 1807 struct ieee80211com *ic = &sc->sc_ic; 1808 struct ath_hal *ah = sc->sc_ah; 1809 struct ifnet *ifp = sc->sc_ifp; 1810 u_int32_t rfilt, mfilt[2], val; 1811 u_int8_t pos; 1812 struct ifmultiaddr *ifma; 1813 1814 /* configure rx filter */ 1815 rfilt = ath_calcrxfilter(sc, ic->ic_state); 1816 ath_hal_setrxfilter(ah, rfilt); 1817 1818 /* configure operational mode */ 1819 ath_hal_setopmode(ah); 1820 1821 /* 1822 * Handle any link-level address change. Note that we only 1823 * need to force ic_myaddr; any other addresses are handled 1824 * as a byproduct of the ifnet code marking the interface 1825 * down then up. 1826 * 1827 * XXX should get from lladdr instead of arpcom but that's more work 1828 */ 1829 IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); 1830 ath_hal_setmac(ah, ic->ic_myaddr); 1831 1832 /* calculate and install multicast filter */ 1833 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 1834 mfilt[0] = mfilt[1] = 0; 1835 IF_ADDR_LOCK(ifp); 1836 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1837 caddr_t dl; 1838 1839 /* calculate XOR of eight 6bit values */ 1840 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1841 val = LE_READ_4(dl + 0); 1842 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1843 val = LE_READ_4(dl + 3); 1844 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 1845 pos &= 0x3f; 1846 mfilt[pos / 32] |= (1 << (pos % 32)); 1847 } 1848 IF_ADDR_UNLOCK(ifp); 1849 } else { 1850 mfilt[0] = mfilt[1] = ~0; 1851 } 1852 ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]); 1853 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, MC filter %08x:%08x\n", 1854 __func__, rfilt, mfilt[0], mfilt[1]); 1855 } 1856 1857 /* 1858 * Set the slot time based on the current setting. 1859 */ 1860 static void 1861 ath_setslottime(struct ath_softc *sc) 1862 { 1863 struct ieee80211com *ic = &sc->sc_ic; 1864 struct ath_hal *ah = sc->sc_ah; 1865 u_int usec; 1866 1867 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 1868 usec = 13; 1869 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 1870 usec = 21; 1871 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 1872 /* honor short/long slot time only in 11g */ 1873 /* XXX shouldn't honor on pure g or turbo g channel */ 1874 if (ic->ic_flags & IEEE80211_F_SHSLOT) 1875 usec = HAL_SLOT_TIME_9; 1876 else 1877 usec = HAL_SLOT_TIME_20; 1878 } else 1879 usec = HAL_SLOT_TIME_9; 1880 1881 DPRINTF(sc, ATH_DEBUG_RESET, 1882 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 1883 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 1884 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 1885 1886 ath_hal_setslottime(ah, usec); 1887 sc->sc_updateslot = OK; 1888 } 1889 1890 /* 1891 * Callback from the 802.11 layer to update the 1892 * slot time based on the current setting. 1893 */ 1894 static void 1895 ath_updateslot(struct ifnet *ifp) 1896 { 1897 struct ath_softc *sc = ifp->if_softc; 1898 struct ieee80211com *ic = &sc->sc_ic; 1899 1900 /* 1901 * When not coordinating the BSS, change the hardware 1902 * immediately. For other operation we defer the change 1903 * until beacon updates have propagated to the stations. 1904 */ 1905 if (ic->ic_opmode == IEEE80211_M_HOSTAP) 1906 sc->sc_updateslot = UPDATE; 1907 else 1908 ath_setslottime(sc); 1909 } 1910 1911 /* 1912 * Setup a h/w transmit queue for beacons. 1913 */ 1914 static int 1915 ath_beaconq_setup(struct ath_hal *ah) 1916 { 1917 HAL_TXQ_INFO qi; 1918 1919 memset(&qi, 0, sizeof(qi)); 1920 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 1921 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 1922 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 1923 /* NB: for dynamic turbo, don't enable any other interrupts */ 1924 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 1925 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 1926 } 1927 1928 /* 1929 * Setup the transmit queue parameters for the beacon queue. 1930 */ 1931 static int 1932 ath_beaconq_config(struct ath_softc *sc) 1933 { 1934 #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 1935 struct ieee80211com *ic = &sc->sc_ic; 1936 struct ath_hal *ah = sc->sc_ah; 1937 HAL_TXQ_INFO qi; 1938 1939 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 1940 if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 1941 /* 1942 * Always burst out beacon and CAB traffic. 1943 */ 1944 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 1945 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 1946 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 1947 } else { 1948 struct wmeParams *wmep = 1949 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 1950 /* 1951 * Adhoc mode; important thing is to use 2x cwmin. 1952 */ 1953 qi.tqi_aifs = wmep->wmep_aifsn; 1954 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 1955 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 1956 } 1957 1958 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 1959 device_printf(sc->sc_dev, "unable to update parameters for " 1960 "beacon hardware queue!\n"); 1961 return 0; 1962 } else { 1963 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 1964 return 1; 1965 } 1966 #undef ATH_EXPONENT_TO_VALUE 1967 } 1968 1969 /* 1970 * Allocate and setup an initial beacon frame. 1971 */ 1972 static int 1973 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 1974 { 1975 struct ieee80211com *ic = ni->ni_ic; 1976 struct ath_buf *bf; 1977 struct mbuf *m; 1978 int error; 1979 1980 bf = STAILQ_FIRST(&sc->sc_bbuf); 1981 if (bf == NULL) { 1982 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: no dma buffers\n", __func__); 1983 sc->sc_stats.ast_be_nombuf++; /* XXX */ 1984 return ENOMEM; /* XXX */ 1985 } 1986 /* 1987 * NB: the beacon data buffer must be 32-bit aligned; 1988 * we assume the mbuf routines will return us something 1989 * with this alignment (perhaps should assert). 1990 */ 1991 m = ieee80211_beacon_alloc(ic, ni, &sc->sc_boff); 1992 if (m == NULL) { 1993 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get mbuf\n", 1994 __func__); 1995 sc->sc_stats.ast_be_nombuf++; 1996 return ENOMEM; 1997 } 1998 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 1999 bf->bf_segs, &bf->bf_nseg, 2000 BUS_DMA_NOWAIT); 2001 if (error == 0) { 2002 bf->bf_m = m; 2003 bf->bf_node = ieee80211_ref_node(ni); 2004 } else { 2005 m_freem(m); 2006 } 2007 return error; 2008 } 2009 2010 /* 2011 * Setup the beacon frame for transmit. 2012 */ 2013 static void 2014 ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2015 { 2016 #define USE_SHPREAMBLE(_ic) \ 2017 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2018 == IEEE80211_F_SHPREAMBLE) 2019 struct ieee80211_node *ni = bf->bf_node; 2020 struct ieee80211com *ic = ni->ni_ic; 2021 struct mbuf *m = bf->bf_m; 2022 struct ath_hal *ah = sc->sc_ah; 2023 struct ath_desc *ds; 2024 int flags, antenna; 2025 const HAL_RATE_TABLE *rt; 2026 u_int8_t rix, rate; 2027 2028 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2029 __func__, m, m->m_len); 2030 2031 /* setup descriptors */ 2032 ds = bf->bf_desc; 2033 2034 flags = HAL_TXDESC_NOACK; 2035 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2036 ds->ds_link = bf->bf_daddr; /* self-linked */ 2037 flags |= HAL_TXDESC_VEOL; 2038 /* 2039 * Let hardware handle antenna switching. 2040 */ 2041 antenna = sc->sc_txantenna; 2042 } else { 2043 ds->ds_link = 0; 2044 /* 2045 * Switch antenna every 4 beacons. 2046 * XXX assumes two antenna 2047 */ 2048 antenna = sc->sc_txantenna != 0 ? sc->sc_txantenna 2049 : (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2050 } 2051 2052 KASSERT(bf->bf_nseg == 1, 2053 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 2054 ds->ds_data = bf->bf_segs[0].ds_addr; 2055 /* 2056 * Calculate rate code. 2057 * XXX everything at min xmit rate 2058 */ 2059 rix = sc->sc_minrateix; 2060 rt = sc->sc_currates; 2061 rate = rt->info[rix].rateCode; 2062 if (USE_SHPREAMBLE(ic)) 2063 rate |= rt->info[rix].shortPreamble; 2064 ath_hal_setuptxdesc(ah, ds 2065 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 2066 , sizeof(struct ieee80211_frame)/* header length */ 2067 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2068 , ni->ni_txpower /* txpower XXX */ 2069 , rate, 1 /* series 0 rate/tries */ 2070 , HAL_TXKEYIX_INVALID /* no encryption */ 2071 , antenna /* antenna mode */ 2072 , flags /* no ack, veol for beacons */ 2073 , 0 /* rts/cts rate */ 2074 , 0 /* rts/cts duration */ 2075 ); 2076 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 2077 ath_hal_filltxdesc(ah, ds 2078 , roundup(m->m_len, 4) /* buffer length */ 2079 , AH_TRUE /* first segment */ 2080 , AH_TRUE /* last segment */ 2081 , ds /* first descriptor */ 2082 ); 2083 #undef USE_SHPREAMBLE 2084 } 2085 2086 /* 2087 * Append the contents of src to dst; both queues 2088 * are assumed to be locked. 2089 */ 2090 static void 2091 ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2092 { 2093 STAILQ_CONCAT(&dst->axq_q, &src->axq_q); 2094 dst->axq_link = src->axq_link; 2095 src->axq_link = NULL; 2096 dst->axq_depth += src->axq_depth; 2097 src->axq_depth = 0; 2098 } 2099 2100 /* 2101 * Transmit a beacon frame at SWBA. Dynamic updates to the 2102 * frame contents are done as needed and the slot time is 2103 * also adjusted based on current state. 2104 */ 2105 static void 2106 ath_beacon_proc(void *arg, int pending) 2107 { 2108 struct ath_softc *sc = arg; 2109 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); 2110 struct ieee80211_node *ni = bf->bf_node; 2111 struct ieee80211com *ic = ni->ni_ic; 2112 struct ath_hal *ah = sc->sc_ah; 2113 struct ath_txq *cabq = sc->sc_cabq; 2114 struct mbuf *m; 2115 int ncabq, nmcastq, error, otherant; 2116 2117 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 2118 __func__, pending); 2119 2120 if (ic->ic_opmode == IEEE80211_M_STA || 2121 ic->ic_opmode == IEEE80211_M_MONITOR || 2122 bf == NULL || bf->bf_m == NULL) { 2123 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_flags=%x bf=%p bf_m=%p\n", 2124 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL); 2125 return; 2126 } 2127 /* 2128 * Check if the previous beacon has gone out. If 2129 * not don't try to post another, skip this period 2130 * and wait for the next. Missed beacons indicate 2131 * a problem and should not occur. If we miss too 2132 * many consecutive beacons reset the device. 2133 */ 2134 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2135 sc->sc_bmisscount++; 2136 DPRINTF(sc, ATH_DEBUG_BEACON, 2137 "%s: missed %u consecutive beacons\n", 2138 __func__, sc->sc_bmisscount); 2139 if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */ 2140 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 2141 return; 2142 } 2143 if (sc->sc_bmisscount != 0) { 2144 DPRINTF(sc, ATH_DEBUG_BEACON, 2145 "%s: resume beacon xmit after %u misses\n", 2146 __func__, sc->sc_bmisscount); 2147 sc->sc_bmisscount = 0; 2148 } 2149 2150 /* 2151 * Update dynamic beacon contents. If this returns 2152 * non-zero then we need to remap the memory because 2153 * the beacon frame changed size (probably because 2154 * of the TIM bitmap). 2155 */ 2156 m = bf->bf_m; 2157 nmcastq = sc->sc_mcastq.axq_depth; 2158 ncabq = ath_hal_numtxpending(ah, cabq->axq_qnum); 2159 if (ieee80211_beacon_update(ic, bf->bf_node, &sc->sc_boff, m, ncabq+nmcastq)) { 2160 /* XXX too conservative? */ 2161 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2162 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2163 bf->bf_segs, &bf->bf_nseg, 2164 BUS_DMA_NOWAIT); 2165 if (error != 0) { 2166 if_printf(ic->ic_ifp, 2167 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 2168 __func__, error); 2169 return; 2170 } 2171 } 2172 if (ncabq && (sc->sc_boff.bo_tim[4] & 1)) { 2173 /* 2174 * CABQ traffic from the previous DTIM is still pending. 2175 * This is ok for now but when there are multiple vap's 2176 * and we are using staggered beacons we'll want to drain 2177 * the cabq before loading frames for the different vap. 2178 */ 2179 DPRINTF(sc, ATH_DEBUG_BEACON, 2180 "%s: cabq did not drain, mcastq %u cabq %u/%u\n", 2181 __func__, nmcastq, ncabq, cabq->axq_depth); 2182 sc->sc_stats.ast_cabq_busy++; 2183 } 2184 2185 /* 2186 * Handle slot time change when a non-ERP station joins/leaves 2187 * an 11g network. The 802.11 layer notifies us via callback, 2188 * we mark updateslot, then wait one beacon before effecting 2189 * the change. This gives associated stations at least one 2190 * beacon interval to note the state change. 2191 */ 2192 /* XXX locking */ 2193 if (sc->sc_updateslot == UPDATE) 2194 sc->sc_updateslot = COMMIT; /* commit next beacon */ 2195 else if (sc->sc_updateslot == COMMIT) 2196 ath_setslottime(sc); /* commit change to h/w */ 2197 2198 /* 2199 * Check recent per-antenna transmit statistics and flip 2200 * the default antenna if noticeably more frames went out 2201 * on the non-default antenna. 2202 * XXX assumes 2 anntenae 2203 */ 2204 otherant = sc->sc_defant & 1 ? 2 : 1; 2205 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 2206 ath_setdefantenna(sc, otherant); 2207 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 2208 2209 /* 2210 * Construct tx descriptor. 2211 */ 2212 ath_beacon_setup(sc, bf); 2213 2214 /* 2215 * Stop any current dma and put the new frame on the queue. 2216 * This should never fail since we check above that no frames 2217 * are still pending on the queue. 2218 */ 2219 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 2220 DPRINTF(sc, ATH_DEBUG_ANY, 2221 "%s: beacon queue %u did not stop?\n", 2222 __func__, sc->sc_bhalq); 2223 } 2224 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 2225 2226 /* 2227 * Enable the CAB queue before the beacon queue to 2228 * insure cab frames are triggered by this beacon. 2229 */ 2230 if (sc->sc_boff.bo_tim_len && (sc->sc_boff.bo_tim[4] & 1)) { 2231 /* NB: only at DTIM */ 2232 ATH_TXQ_LOCK(cabq); 2233 ATH_TXQ_LOCK(&sc->sc_mcastq); 2234 if (nmcastq) { 2235 struct ath_buf *bfm; 2236 2237 /* 2238 * Move frames from the s/w mcast q to the h/w cab q. 2239 */ 2240 bfm = STAILQ_FIRST(&sc->sc_mcastq.axq_q); 2241 if (cabq->axq_link != NULL) { 2242 *cabq->axq_link = bfm->bf_daddr; 2243 } else 2244 ath_hal_puttxbuf(ah, cabq->axq_qnum, 2245 bfm->bf_daddr); 2246 ath_txqmove(cabq, &sc->sc_mcastq); 2247 2248 sc->sc_stats.ast_cabq_xmit += nmcastq; 2249 } 2250 /* NB: gated by beacon so safe to start here */ 2251 ath_hal_txstart(ah, cabq->axq_qnum); 2252 ATH_TXQ_UNLOCK(cabq); 2253 ATH_TXQ_UNLOCK(&sc->sc_mcastq); 2254 } 2255 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 2256 ath_hal_txstart(ah, sc->sc_bhalq); 2257 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, 2258 "%s: TXDP[%u] = %p (%p)\n", __func__, 2259 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc); 2260 2261 sc->sc_stats.ast_be_xmit++; 2262 } 2263 2264 /* 2265 * Reset the hardware after detecting beacons have stopped. 2266 */ 2267 static void 2268 ath_bstuck_proc(void *arg, int pending) 2269 { 2270 struct ath_softc *sc = arg; 2271 struct ifnet *ifp = sc->sc_ifp; 2272 2273 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2274 sc->sc_bmisscount); 2275 ath_reset(ifp); 2276 } 2277 2278 /* 2279 * Reclaim beacon resources. 2280 */ 2281 static void 2282 ath_beacon_free(struct ath_softc *sc) 2283 { 2284 struct ath_buf *bf; 2285 2286 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 2287 if (bf->bf_m != NULL) { 2288 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2289 m_freem(bf->bf_m); 2290 bf->bf_m = NULL; 2291 } 2292 if (bf->bf_node != NULL) { 2293 ieee80211_free_node(bf->bf_node); 2294 bf->bf_node = NULL; 2295 } 2296 } 2297 } 2298 2299 /* 2300 * Configure the beacon and sleep timers. 2301 * 2302 * When operating as an AP this resets the TSF and sets 2303 * up the hardware to notify us when we need to issue beacons. 2304 * 2305 * When operating in station mode this sets up the beacon 2306 * timers according to the timestamp of the last received 2307 * beacon and the current TSF, configures PCF and DTIM 2308 * handling, programs the sleep registers so the hardware 2309 * will wakeup in time to receive beacons, and configures 2310 * the beacon miss handling so we'll receive a BMISS 2311 * interrupt when we stop seeing beacons from the AP 2312 * we've associated with. 2313 */ 2314 static void 2315 ath_beacon_config(struct ath_softc *sc) 2316 { 2317 #define TSF_TO_TU(_h,_l) \ 2318 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 2319 #define FUDGE 2 2320 struct ath_hal *ah = sc->sc_ah; 2321 struct ieee80211com *ic = &sc->sc_ic; 2322 struct ieee80211_node *ni = ic->ic_bss; 2323 u_int32_t nexttbtt, intval, tsftu; 2324 u_int64_t tsf; 2325 2326 /* extract tstamp from last beacon and convert to TU */ 2327 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 2328 LE_READ_4(ni->ni_tstamp.data)); 2329 /* NB: the beacon interval is kept internally in TU's */ 2330 intval = ni->ni_intval & HAL_BEACON_PERIOD; 2331 if (nexttbtt == 0) /* e.g. for ap mode */ 2332 nexttbtt = intval; 2333 else if (intval) /* NB: can be 0 for monitor mode */ 2334 nexttbtt = roundup(nexttbtt, intval); 2335 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 2336 __func__, nexttbtt, intval, ni->ni_intval); 2337 if (ic->ic_opmode == IEEE80211_M_STA) { 2338 HAL_BEACON_STATE bs; 2339 int dtimperiod, dtimcount; 2340 int cfpperiod, cfpcount; 2341 2342 /* 2343 * Setup dtim and cfp parameters according to 2344 * last beacon we received (which may be none). 2345 */ 2346 dtimperiod = ni->ni_dtim_period; 2347 if (dtimperiod <= 0) /* NB: 0 if not known */ 2348 dtimperiod = 1; 2349 dtimcount = ni->ni_dtim_count; 2350 if (dtimcount >= dtimperiod) /* NB: sanity check */ 2351 dtimcount = 0; /* XXX? */ 2352 cfpperiod = 1; /* NB: no PCF support yet */ 2353 cfpcount = 0; 2354 /* 2355 * Pull nexttbtt forward to reflect the current 2356 * TSF and calculate dtim+cfp state for the result. 2357 */ 2358 tsf = ath_hal_gettsf64(ah); 2359 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 2360 do { 2361 nexttbtt += intval; 2362 if (--dtimcount < 0) { 2363 dtimcount = dtimperiod - 1; 2364 if (--cfpcount < 0) 2365 cfpcount = cfpperiod - 1; 2366 } 2367 } while (nexttbtt < tsftu); 2368 memset(&bs, 0, sizeof(bs)); 2369 bs.bs_intval = intval; 2370 bs.bs_nexttbtt = nexttbtt; 2371 bs.bs_dtimperiod = dtimperiod*intval; 2372 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 2373 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 2374 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 2375 bs.bs_cfpmaxduration = 0; 2376 #if 0 2377 /* 2378 * The 802.11 layer records the offset to the DTIM 2379 * bitmap while receiving beacons; use it here to 2380 * enable h/w detection of our AID being marked in 2381 * the bitmap vector (to indicate frames for us are 2382 * pending at the AP). 2383 * XXX do DTIM handling in s/w to WAR old h/w bugs 2384 * XXX enable based on h/w rev for newer chips 2385 */ 2386 bs.bs_timoffset = ni->ni_timoff; 2387 #endif 2388 /* 2389 * Calculate the number of consecutive beacons to miss 2390 * before taking a BMISS interrupt. The configuration 2391 * is specified in ms, so we need to convert that to 2392 * TU's and then calculate based on the beacon interval. 2393 * Note that we clamp the result to at most 10 beacons. 2394 */ 2395 bs.bs_bmissthreshold = ic->ic_bmissthreshold; 2396 if (bs.bs_bmissthreshold > 10) 2397 bs.bs_bmissthreshold = 10; 2398 else if (bs.bs_bmissthreshold <= 0) 2399 bs.bs_bmissthreshold = 1; 2400 2401 /* 2402 * Calculate sleep duration. The configuration is 2403 * given in ms. We insure a multiple of the beacon 2404 * period is used. Also, if the sleep duration is 2405 * greater than the DTIM period then it makes senses 2406 * to make it a multiple of that. 2407 * 2408 * XXX fixed at 100ms 2409 */ 2410 bs.bs_sleepduration = 2411 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 2412 if (bs.bs_sleepduration > bs.bs_dtimperiod) 2413 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 2414 2415 DPRINTF(sc, ATH_DEBUG_BEACON, 2416 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 2417 , __func__ 2418 , tsf, tsftu 2419 , bs.bs_intval 2420 , bs.bs_nexttbtt 2421 , bs.bs_dtimperiod 2422 , bs.bs_nextdtim 2423 , bs.bs_bmissthreshold 2424 , bs.bs_sleepduration 2425 , bs.bs_cfpperiod 2426 , bs.bs_cfpmaxduration 2427 , bs.bs_cfpnext 2428 , bs.bs_timoffset 2429 ); 2430 ath_hal_intrset(ah, 0); 2431 ath_hal_beacontimers(ah, &bs); 2432 sc->sc_imask |= HAL_INT_BMISS; 2433 ath_hal_intrset(ah, sc->sc_imask); 2434 } else { 2435 ath_hal_intrset(ah, 0); 2436 if (nexttbtt == intval) 2437 intval |= HAL_BEACON_RESET_TSF; 2438 if (ic->ic_opmode == IEEE80211_M_IBSS) { 2439 /* 2440 * In IBSS mode enable the beacon timers but only 2441 * enable SWBA interrupts if we need to manually 2442 * prepare beacon frames. Otherwise we use a 2443 * self-linked tx descriptor and let the hardware 2444 * deal with things. 2445 */ 2446 intval |= HAL_BEACON_ENA; 2447 if (!sc->sc_hasveol) 2448 sc->sc_imask |= HAL_INT_SWBA; 2449 if ((intval & HAL_BEACON_RESET_TSF) == 0) { 2450 /* 2451 * Pull nexttbtt forward to reflect 2452 * the current TSF. 2453 */ 2454 tsf = ath_hal_gettsf64(ah); 2455 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 2456 do { 2457 nexttbtt += intval; 2458 } while (nexttbtt < tsftu); 2459 } 2460 ath_beaconq_config(sc); 2461 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 2462 /* 2463 * In AP mode we enable the beacon timers and 2464 * SWBA interrupts to prepare beacon frames. 2465 */ 2466 intval |= HAL_BEACON_ENA; 2467 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 2468 ath_beaconq_config(sc); 2469 } 2470 ath_hal_beaconinit(ah, nexttbtt, intval); 2471 sc->sc_bmisscount = 0; 2472 ath_hal_intrset(ah, sc->sc_imask); 2473 /* 2474 * When using a self-linked beacon descriptor in 2475 * ibss mode load it once here. 2476 */ 2477 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 2478 ath_beacon_proc(sc, 0); 2479 } 2480 sc->sc_syncbeacon = 0; 2481 #undef FUDGE 2482 #undef TSF_TO_TU 2483 } 2484 2485 static void 2486 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2487 { 2488 bus_addr_t *paddr = (bus_addr_t*) arg; 2489 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 2490 *paddr = segs->ds_addr; 2491 } 2492 2493 static int 2494 ath_descdma_setup(struct ath_softc *sc, 2495 struct ath_descdma *dd, ath_bufhead *head, 2496 const char *name, int nbuf, int ndesc) 2497 { 2498 #define DS2PHYS(_dd, _ds) \ 2499 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2500 struct ifnet *ifp = sc->sc_ifp; 2501 struct ath_desc *ds; 2502 struct ath_buf *bf; 2503 int i, bsize, error; 2504 2505 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 2506 __func__, name, nbuf, ndesc); 2507 2508 dd->dd_name = name; 2509 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; 2510 2511 /* 2512 * Setup DMA descriptor area. 2513 */ 2514 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 2515 PAGE_SIZE, 0, /* alignment, bounds */ 2516 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2517 BUS_SPACE_MAXADDR, /* highaddr */ 2518 NULL, NULL, /* filter, filterarg */ 2519 dd->dd_desc_len, /* maxsize */ 2520 1, /* nsegments */ 2521 dd->dd_desc_len, /* maxsegsize */ 2522 BUS_DMA_ALLOCNOW, /* flags */ 2523 NULL, /* lockfunc */ 2524 NULL, /* lockarg */ 2525 &dd->dd_dmat); 2526 if (error != 0) { 2527 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2528 return error; 2529 } 2530 2531 /* allocate descriptors */ 2532 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2533 if (error != 0) { 2534 if_printf(ifp, "unable to create dmamap for %s descriptors, " 2535 "error %u\n", dd->dd_name, error); 2536 goto fail0; 2537 } 2538 2539 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 2540 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 2541 &dd->dd_dmamap); 2542 if (error != 0) { 2543 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2544 "error %u\n", nbuf * ndesc, dd->dd_name, error); 2545 goto fail1; 2546 } 2547 2548 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2549 dd->dd_desc, dd->dd_desc_len, 2550 ath_load_cb, &dd->dd_desc_paddr, 2551 BUS_DMA_NOWAIT); 2552 if (error != 0) { 2553 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2554 dd->dd_name, error); 2555 goto fail2; 2556 } 2557 2558 ds = dd->dd_desc; 2559 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2560 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 2561 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 2562 2563 /* allocate rx buffers */ 2564 bsize = sizeof(struct ath_buf) * nbuf; 2565 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2566 if (bf == NULL) { 2567 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2568 dd->dd_name, bsize); 2569 goto fail3; 2570 } 2571 dd->dd_bufptr = bf; 2572 2573 STAILQ_INIT(head); 2574 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { 2575 bf->bf_desc = ds; 2576 bf->bf_daddr = DS2PHYS(dd, ds); 2577 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2578 &bf->bf_dmamap); 2579 if (error != 0) { 2580 if_printf(ifp, "unable to create dmamap for %s " 2581 "buffer %u, error %u\n", dd->dd_name, i, error); 2582 ath_descdma_cleanup(sc, dd, head); 2583 return error; 2584 } 2585 STAILQ_INSERT_TAIL(head, bf, bf_list); 2586 } 2587 return 0; 2588 fail3: 2589 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2590 fail2: 2591 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2592 fail1: 2593 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2594 fail0: 2595 bus_dma_tag_destroy(dd->dd_dmat); 2596 memset(dd, 0, sizeof(*dd)); 2597 return error; 2598 #undef DS2PHYS 2599 } 2600 2601 static void 2602 ath_descdma_cleanup(struct ath_softc *sc, 2603 struct ath_descdma *dd, ath_bufhead *head) 2604 { 2605 struct ath_buf *bf; 2606 struct ieee80211_node *ni; 2607 2608 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2609 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2610 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2611 bus_dma_tag_destroy(dd->dd_dmat); 2612 2613 STAILQ_FOREACH(bf, head, bf_list) { 2614 if (bf->bf_m) { 2615 m_freem(bf->bf_m); 2616 bf->bf_m = NULL; 2617 } 2618 if (bf->bf_dmamap != NULL) { 2619 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 2620 bf->bf_dmamap = NULL; 2621 } 2622 ni = bf->bf_node; 2623 bf->bf_node = NULL; 2624 if (ni != NULL) { 2625 /* 2626 * Reclaim node reference. 2627 */ 2628 ieee80211_free_node(ni); 2629 } 2630 } 2631 2632 STAILQ_INIT(head); 2633 free(dd->dd_bufptr, M_ATHDEV); 2634 memset(dd, 0, sizeof(*dd)); 2635 } 2636 2637 static int 2638 ath_desc_alloc(struct ath_softc *sc) 2639 { 2640 int error; 2641 2642 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 2643 "rx", ath_rxbuf, 1); 2644 if (error != 0) 2645 return error; 2646 2647 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2648 "tx", ath_txbuf, ATH_TXDESC); 2649 if (error != 0) { 2650 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2651 return error; 2652 } 2653 2654 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 2655 "beacon", 1, 1); 2656 if (error != 0) { 2657 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2658 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2659 return error; 2660 } 2661 return 0; 2662 } 2663 2664 static void 2665 ath_desc_free(struct ath_softc *sc) 2666 { 2667 2668 if (sc->sc_bdma.dd_desc_len != 0) 2669 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 2670 if (sc->sc_txdma.dd_desc_len != 0) 2671 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2672 if (sc->sc_rxdma.dd_desc_len != 0) 2673 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2674 } 2675 2676 static struct ieee80211_node * 2677 ath_node_alloc(struct ieee80211_node_table *nt) 2678 { 2679 struct ieee80211com *ic = nt->nt_ic; 2680 struct ath_softc *sc = ic->ic_ifp->if_softc; 2681 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 2682 struct ath_node *an; 2683 2684 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 2685 if (an == NULL) { 2686 /* XXX stat+msg */ 2687 return NULL; 2688 } 2689 an->an_avgrssi = ATH_RSSI_DUMMY_MARKER; 2690 ath_rate_node_init(sc, an); 2691 2692 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 2693 return &an->an_node; 2694 } 2695 2696 static void 2697 ath_node_free(struct ieee80211_node *ni) 2698 { 2699 struct ieee80211com *ic = ni->ni_ic; 2700 struct ath_softc *sc = ic->ic_ifp->if_softc; 2701 2702 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 2703 2704 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 2705 sc->sc_node_free(ni); 2706 } 2707 2708 static u_int8_t 2709 ath_node_getrssi(const struct ieee80211_node *ni) 2710 { 2711 #define HAL_EP_RND(x, mul) \ 2712 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 2713 u_int32_t avgrssi = ATH_NODE_CONST(ni)->an_avgrssi; 2714 int32_t rssi; 2715 2716 /* 2717 * When only one frame is received there will be no state in 2718 * avgrssi so fallback on the value recorded by the 802.11 layer. 2719 */ 2720 if (avgrssi != ATH_RSSI_DUMMY_MARKER) 2721 rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER); 2722 else 2723 rssi = ni->ni_rssi; 2724 return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi; 2725 #undef HAL_EP_RND 2726 } 2727 2728 static int 2729 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 2730 { 2731 struct ath_hal *ah = sc->sc_ah; 2732 int error; 2733 struct mbuf *m; 2734 struct ath_desc *ds; 2735 2736 m = bf->bf_m; 2737 if (m == NULL) { 2738 /* 2739 * NB: by assigning a page to the rx dma buffer we 2740 * implicitly satisfy the Atheros requirement that 2741 * this buffer be cache-line-aligned and sized to be 2742 * multiple of the cache line size. Not doing this 2743 * causes weird stuff to happen (for the 5210 at least). 2744 */ 2745 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2746 if (m == NULL) { 2747 DPRINTF(sc, ATH_DEBUG_ANY, 2748 "%s: no mbuf/cluster\n", __func__); 2749 sc->sc_stats.ast_rx_nombuf++; 2750 return ENOMEM; 2751 } 2752 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 2753 2754 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 2755 bf->bf_dmamap, m, 2756 bf->bf_segs, &bf->bf_nseg, 2757 BUS_DMA_NOWAIT); 2758 if (error != 0) { 2759 DPRINTF(sc, ATH_DEBUG_ANY, 2760 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 2761 __func__, error); 2762 sc->sc_stats.ast_rx_busdma++; 2763 m_freem(m); 2764 return error; 2765 } 2766 KASSERT(bf->bf_nseg == 1, 2767 ("multi-segment packet; nseg %u", bf->bf_nseg)); 2768 bf->bf_m = m; 2769 } 2770 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 2771 2772 /* 2773 * Setup descriptors. For receive we always terminate 2774 * the descriptor list with a self-linked entry so we'll 2775 * not get overrun under high load (as can happen with a 2776 * 5212 when ANI processing enables PHY error frames). 2777 * 2778 * To insure the last descriptor is self-linked we create 2779 * each descriptor as self-linked and add it to the end. As 2780 * each additional descriptor is added the previous self-linked 2781 * entry is ``fixed'' naturally. This should be safe even 2782 * if DMA is happening. When processing RX interrupts we 2783 * never remove/process the last, self-linked, entry on the 2784 * descriptor list. This insures the hardware always has 2785 * someplace to write a new frame. 2786 */ 2787 ds = bf->bf_desc; 2788 ds->ds_link = bf->bf_daddr; /* link to self */ 2789 ds->ds_data = bf->bf_segs[0].ds_addr; 2790 ath_hal_setuprxdesc(ah, ds 2791 , m->m_len /* buffer size */ 2792 , 0 2793 ); 2794 2795 if (sc->sc_rxlink != NULL) 2796 *sc->sc_rxlink = bf->bf_daddr; 2797 sc->sc_rxlink = &ds->ds_link; 2798 return 0; 2799 } 2800 2801 /* 2802 * Extend 15-bit time stamp from rx descriptor to 2803 * a full 64-bit TSF using the specified TSF. 2804 */ 2805 static __inline u_int64_t 2806 ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf) 2807 { 2808 if ((tsf & 0x7fff) < rstamp) 2809 tsf -= 0x8000; 2810 return ((tsf &~ 0x7fff) | rstamp); 2811 } 2812 2813 /* 2814 * Intercept management frames to collect beacon rssi data 2815 * and to do ibss merges. 2816 */ 2817 static void 2818 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, 2819 struct ieee80211_node *ni, 2820 int subtype, int rssi, u_int32_t rstamp) 2821 { 2822 struct ath_softc *sc = ic->ic_ifp->if_softc; 2823 2824 /* 2825 * Call up first so subsequent work can use information 2826 * potentially stored in the node (e.g. for ibss merge). 2827 */ 2828 sc->sc_recv_mgmt(ic, m, ni, subtype, rssi, rstamp); 2829 switch (subtype) { 2830 case IEEE80211_FC0_SUBTYPE_BEACON: 2831 /* update rssi statistics for use by the hal */ 2832 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 2833 if (sc->sc_syncbeacon && 2834 ni == ic->ic_bss && ic->ic_state == IEEE80211_S_RUN) { 2835 /* 2836 * Resync beacon timers using the tsf of the beacon 2837 * frame we just received. 2838 */ 2839 ath_beacon_config(sc); 2840 } 2841 /* fall thru... */ 2842 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 2843 if (ic->ic_opmode == IEEE80211_M_IBSS && 2844 ic->ic_state == IEEE80211_S_RUN) { 2845 u_int64_t tsf = ath_extend_tsf(rstamp, 2846 ath_hal_gettsf64(sc->sc_ah)); 2847 /* 2848 * Handle ibss merge as needed; check the tsf on the 2849 * frame before attempting the merge. The 802.11 spec 2850 * says the station should change it's bssid to match 2851 * the oldest station with the same ssid, where oldest 2852 * is determined by the tsf. Note that hardware 2853 * reconfiguration happens through callback to 2854 * ath_newstate as the state machine will go from 2855 * RUN -> RUN when this happens. 2856 */ 2857 if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 2858 DPRINTF(sc, ATH_DEBUG_STATE, 2859 "ibss merge, rstamp %u tsf %ju " 2860 "tstamp %ju\n", rstamp, (uintmax_t)tsf, 2861 (uintmax_t)ni->ni_tstamp.tsf); 2862 (void) ieee80211_ibss_merge(ni); 2863 } 2864 } 2865 break; 2866 } 2867 } 2868 2869 /* 2870 * Set the default antenna. 2871 */ 2872 static void 2873 ath_setdefantenna(struct ath_softc *sc, u_int antenna) 2874 { 2875 struct ath_hal *ah = sc->sc_ah; 2876 2877 /* XXX block beacon interrupts */ 2878 ath_hal_setdefantenna(ah, antenna); 2879 if (sc->sc_defant != antenna) 2880 sc->sc_stats.ast_ant_defswitch++; 2881 sc->sc_defant = antenna; 2882 sc->sc_rxotherant = 0; 2883 } 2884 2885 static int 2886 ath_rx_tap(struct ath_softc *sc, struct mbuf *m, 2887 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 2888 { 2889 u_int8_t rix; 2890 2891 KASSERT(sc->sc_drvbpf != NULL, ("no tap")); 2892 2893 /* 2894 * Discard anything shorter than an ack or cts. 2895 */ 2896 if (m->m_pkthdr.len < IEEE80211_ACK_LEN) { 2897 DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n", 2898 __func__, m->m_pkthdr.len); 2899 sc->sc_stats.ast_rx_tooshort++; 2900 return 0; 2901 } 2902 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf)); 2903 rix = rs->rs_rate; 2904 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 2905 if (rs->rs_status & HAL_RXERR_CRC) 2906 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 2907 /* XXX propagate other error flags from descriptor */ 2908 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 2909 sc->sc_rx_th.wr_antsignal = rs->rs_rssi + nf; 2910 sc->sc_rx_th.wr_antnoise = nf; 2911 sc->sc_rx_th.wr_antenna = rs->rs_antenna; 2912 2913 bpf_mtap2(sc->sc_drvbpf, &sc->sc_rx_th, sc->sc_rx_th_len, m); 2914 2915 return 1; 2916 } 2917 2918 static void 2919 ath_rx_proc(void *arg, int npending) 2920 { 2921 #define PA2DESC(_sc, _pa) \ 2922 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 2923 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 2924 struct ath_softc *sc = arg; 2925 struct ath_buf *bf; 2926 struct ieee80211com *ic = &sc->sc_ic; 2927 struct ifnet *ifp = sc->sc_ifp; 2928 struct ath_hal *ah = sc->sc_ah; 2929 struct ath_desc *ds; 2930 struct ath_rx_status *rs; 2931 struct mbuf *m; 2932 struct ieee80211_node *ni; 2933 struct ath_node *an; 2934 int len, type, ngood; 2935 u_int phyerr; 2936 HAL_STATUS status; 2937 int16_t nf; 2938 u_int64_t tsf; 2939 2940 NET_LOCK_GIANT(); /* XXX */ 2941 2942 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 2943 ngood = 0; 2944 nf = ath_hal_getchannoise(ah, &sc->sc_curchan); 2945 tsf = ath_hal_gettsf64(ah); 2946 do { 2947 bf = STAILQ_FIRST(&sc->sc_rxbuf); 2948 if (bf == NULL) { /* NB: shouldn't happen */ 2949 if_printf(ifp, "%s: no buffer!\n", __func__); 2950 break; 2951 } 2952 m = bf->bf_m; 2953 if (m == NULL) { /* NB: shouldn't happen */ 2954 /* 2955 * If mbuf allocation failed previously there 2956 * will be no mbuf; try again to re-populate it. 2957 */ 2958 /* XXX make debug msg */ 2959 if_printf(ifp, "%s: no mbuf!\n", __func__); 2960 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); 2961 goto rx_next; 2962 } 2963 ds = bf->bf_desc; 2964 if (ds->ds_link == bf->bf_daddr) { 2965 /* NB: never process the self-linked entry at the end */ 2966 break; 2967 } 2968 /* XXX sync descriptor memory */ 2969 /* 2970 * Must provide the virtual address of the current 2971 * descriptor, the physical address, and the virtual 2972 * address of the next descriptor in the h/w chain. 2973 * This allows the HAL to look ahead to see if the 2974 * hardware is done with a descriptor by checking the 2975 * done bit in the following descriptor and the address 2976 * of the current descriptor the DMA engine is working 2977 * on. All this is necessary because of our use of 2978 * a self-linked list to avoid rx overruns. 2979 */ 2980 rs = &bf->bf_status.ds_rxstat; 2981 status = ath_hal_rxprocdesc(ah, ds, 2982 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 2983 #ifdef ATH_DEBUG 2984 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 2985 ath_printrxbuf(bf, 0, status == HAL_OK); 2986 #endif 2987 if (status == HAL_EINPROGRESS) 2988 break; 2989 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); 2990 if (rs->rs_more) { 2991 /* 2992 * Frame spans multiple descriptors; this 2993 * cannot happen yet as we don't support 2994 * jumbograms. If not in monitor mode, 2995 * discard the frame. 2996 */ 2997 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 2998 sc->sc_stats.ast_rx_toobig++; 2999 goto rx_next; 3000 } 3001 /* fall thru for monitor mode handling... */ 3002 } else if (rs->rs_status != 0) { 3003 if (rs->rs_status & HAL_RXERR_CRC) 3004 sc->sc_stats.ast_rx_crcerr++; 3005 if (rs->rs_status & HAL_RXERR_FIFO) 3006 sc->sc_stats.ast_rx_fifoerr++; 3007 if (rs->rs_status & HAL_RXERR_PHY) { 3008 sc->sc_stats.ast_rx_phyerr++; 3009 phyerr = rs->rs_phyerr & 0x1f; 3010 sc->sc_stats.ast_rx_phy[phyerr]++; 3011 goto rx_next; 3012 } 3013 if (rs->rs_status & HAL_RXERR_DECRYPT) { 3014 /* 3015 * Decrypt error. If the error occurred 3016 * because there was no hardware key, then 3017 * let the frame through so the upper layers 3018 * can process it. This is necessary for 5210 3019 * parts which have no way to setup a ``clear'' 3020 * key cache entry. 3021 * 3022 * XXX do key cache faulting 3023 */ 3024 if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 3025 goto rx_accept; 3026 sc->sc_stats.ast_rx_badcrypt++; 3027 } 3028 if (rs->rs_status & HAL_RXERR_MIC) { 3029 sc->sc_stats.ast_rx_badmic++; 3030 /* 3031 * Do minimal work required to hand off 3032 * the 802.11 header for notifcation. 3033 */ 3034 /* XXX frag's and qos frames */ 3035 len = rs->rs_datalen; 3036 if (len >= sizeof (struct ieee80211_frame)) { 3037 bus_dmamap_sync(sc->sc_dmat, 3038 bf->bf_dmamap, 3039 BUS_DMASYNC_POSTREAD); 3040 ieee80211_notify_michael_failure(ic, 3041 mtod(m, struct ieee80211_frame *), 3042 sc->sc_splitmic ? 3043 rs->rs_keyix-32 : rs->rs_keyix 3044 ); 3045 } 3046 } 3047 ifp->if_ierrors++; 3048 /* 3049 * When a tap is present pass error frames 3050 * that have been requested. By default we 3051 * pass decrypt+mic errors but others may be 3052 * interesting (e.g. crc). 3053 */ 3054 if (bpf_peers_present(sc->sc_drvbpf) && 3055 (rs->rs_status & sc->sc_monpass)) { 3056 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3057 BUS_DMASYNC_POSTREAD); 3058 /* NB: bpf needs the mbuf length setup */ 3059 len = rs->rs_datalen; 3060 m->m_pkthdr.len = m->m_len = len; 3061 (void) ath_rx_tap(sc, m, rs, tsf, nf); 3062 } 3063 /* XXX pass MIC errors up for s/w reclaculation */ 3064 goto rx_next; 3065 } 3066 rx_accept: 3067 /* 3068 * Sync and unmap the frame. At this point we're 3069 * committed to passing the mbuf somewhere so clear 3070 * bf_m; this means a new mbuf must be allocated 3071 * when the rx descriptor is setup again to receive 3072 * another frame. 3073 */ 3074 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3075 BUS_DMASYNC_POSTREAD); 3076 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3077 bf->bf_m = NULL; 3078 3079 m->m_pkthdr.rcvif = ifp; 3080 len = rs->rs_datalen; 3081 m->m_pkthdr.len = m->m_len = len; 3082 3083 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 3084 3085 if (bpf_peers_present(sc->sc_drvbpf) && 3086 !ath_rx_tap(sc, m, rs, tsf, nf)) { 3087 m_freem(m); /* XXX reclaim */ 3088 goto rx_next; 3089 } 3090 3091 /* 3092 * From this point on we assume the frame is at least 3093 * as large as ieee80211_frame_min; verify that. 3094 */ 3095 if (len < IEEE80211_MIN_LEN) { 3096 DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", 3097 __func__, len); 3098 sc->sc_stats.ast_rx_tooshort++; 3099 m_freem(m); 3100 goto rx_next; 3101 } 3102 3103 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 3104 ieee80211_dump_pkt(mtod(m, caddr_t), len, 3105 sc->sc_hwmap[rs->rs_rate].ieeerate, 3106 rs->rs_rssi); 3107 } 3108 3109 m_adj(m, -IEEE80211_CRC_LEN); 3110 3111 /* 3112 * Locate the node for sender, track state, and then 3113 * pass the (referenced) node up to the 802.11 layer 3114 * for its use. 3115 */ 3116 ni = ieee80211_find_rxnode_withkey(ic, 3117 mtod(m, const struct ieee80211_frame_min *), 3118 rs->rs_keyix == HAL_RXKEYIX_INVALID ? 3119 IEEE80211_KEYIX_NONE : rs->rs_keyix); 3120 /* 3121 * Track rx rssi and do any rx antenna management. 3122 */ 3123 an = ATH_NODE(ni); 3124 ATH_RSSI_LPF(an->an_avgrssi, rs->rs_rssi); 3125 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 3126 /* 3127 * Send frame up for processing. 3128 */ 3129 type = ieee80211_input(ic, m, ni, rs->rs_rssi, rs->rs_tstamp); 3130 ieee80211_free_node(ni); 3131 if (sc->sc_diversity) { 3132 /* 3133 * When using fast diversity, change the default rx 3134 * antenna if diversity chooses the other antenna 3 3135 * times in a row. 3136 */ 3137 if (sc->sc_defant != rs->rs_antenna) { 3138 if (++sc->sc_rxotherant >= 3) 3139 ath_setdefantenna(sc, rs->rs_antenna); 3140 } else 3141 sc->sc_rxotherant = 0; 3142 } 3143 if (sc->sc_softled) { 3144 /* 3145 * Blink for any data frame. Otherwise do a 3146 * heartbeat-style blink when idle. The latter 3147 * is mainly for station mode where we depend on 3148 * periodic beacon frames to trigger the poll event. 3149 */ 3150 if (type == IEEE80211_FC0_TYPE_DATA) { 3151 sc->sc_rxrate = rs->rs_rate; 3152 ath_led_event(sc, ATH_LED_RX); 3153 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 3154 ath_led_event(sc, ATH_LED_POLL); 3155 } 3156 /* 3157 * Arrange to update the last rx timestamp only for 3158 * frames from our ap when operating in station mode. 3159 * This assumes the rx key is always setup when associated. 3160 */ 3161 if (ic->ic_opmode == IEEE80211_M_STA && 3162 rs->rs_keyix != HAL_RXKEYIX_INVALID) 3163 ngood++; 3164 rx_next: 3165 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 3166 } while (ath_rxbuf_init(sc, bf) == 0); 3167 3168 /* rx signal state monitoring */ 3169 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan); 3170 if (ngood) 3171 sc->sc_lastrx = tsf; 3172 3173 NET_UNLOCK_GIANT(); /* XXX */ 3174 #undef PA2DESC 3175 } 3176 3177 static void 3178 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3179 { 3180 txq->axq_qnum = qnum; 3181 txq->axq_depth = 0; 3182 txq->axq_intrcnt = 0; 3183 txq->axq_link = NULL; 3184 STAILQ_INIT(&txq->axq_q); 3185 ATH_TXQ_LOCK_INIT(sc, txq); 3186 } 3187 3188 /* 3189 * Setup a h/w transmit queue. 3190 */ 3191 static struct ath_txq * 3192 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3193 { 3194 #define N(a) (sizeof(a)/sizeof(a[0])) 3195 struct ath_hal *ah = sc->sc_ah; 3196 HAL_TXQ_INFO qi; 3197 int qnum; 3198 3199 memset(&qi, 0, sizeof(qi)); 3200 qi.tqi_subtype = subtype; 3201 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3202 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3203 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3204 /* 3205 * Enable interrupts only for EOL and DESC conditions. 3206 * We mark tx descriptors to receive a DESC interrupt 3207 * when a tx queue gets deep; otherwise waiting for the 3208 * EOL to reap descriptors. Note that this is done to 3209 * reduce interrupt load and this only defers reaping 3210 * descriptors, never transmitting frames. Aside from 3211 * reducing interrupts this also permits more concurrency. 3212 * The only potential downside is if the tx queue backs 3213 * up in which case the top half of the kernel may backup 3214 * due to a lack of tx descriptors. 3215 */ 3216 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 3217 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3218 if (qnum == -1) { 3219 /* 3220 * NB: don't print a message, this happens 3221 * normally on parts with too few tx queues 3222 */ 3223 return NULL; 3224 } 3225 if (qnum >= N(sc->sc_txq)) { 3226 device_printf(sc->sc_dev, 3227 "hal qnum %u out of range, max %zu!\n", 3228 qnum, N(sc->sc_txq)); 3229 ath_hal_releasetxqueue(ah, qnum); 3230 return NULL; 3231 } 3232 if (!ATH_TXQ_SETUP(sc, qnum)) { 3233 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3234 sc->sc_txqsetup |= 1<<qnum; 3235 } 3236 return &sc->sc_txq[qnum]; 3237 #undef N 3238 } 3239 3240 /* 3241 * Setup a hardware data transmit queue for the specified 3242 * access control. The hal may not support all requested 3243 * queues in which case it will return a reference to a 3244 * previously setup queue. We record the mapping from ac's 3245 * to h/w queues for use by ath_tx_start and also track 3246 * the set of h/w queues being used to optimize work in the 3247 * transmit interrupt handler and related routines. 3248 */ 3249 static int 3250 ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3251 { 3252 #define N(a) (sizeof(a)/sizeof(a[0])) 3253 struct ath_txq *txq; 3254 3255 if (ac >= N(sc->sc_ac2q)) { 3256 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3257 ac, N(sc->sc_ac2q)); 3258 return 0; 3259 } 3260 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3261 if (txq != NULL) { 3262 sc->sc_ac2q[ac] = txq; 3263 return 1; 3264 } else 3265 return 0; 3266 #undef N 3267 } 3268 3269 /* 3270 * Update WME parameters for a transmit queue. 3271 */ 3272 static int 3273 ath_txq_update(struct ath_softc *sc, int ac) 3274 { 3275 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3276 #define ATH_TXOP_TO_US(v) (v<<5) 3277 struct ieee80211com *ic = &sc->sc_ic; 3278 struct ath_txq *txq = sc->sc_ac2q[ac]; 3279 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3280 struct ath_hal *ah = sc->sc_ah; 3281 HAL_TXQ_INFO qi; 3282 3283 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3284 qi.tqi_aifs = wmep->wmep_aifsn; 3285 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3286 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3287 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 3288 3289 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3290 device_printf(sc->sc_dev, "unable to update hardware queue " 3291 "parameters for %s traffic!\n", 3292 ieee80211_wme_acnames[ac]); 3293 return 0; 3294 } else { 3295 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3296 return 1; 3297 } 3298 #undef ATH_TXOP_TO_US 3299 #undef ATH_EXPONENT_TO_VALUE 3300 } 3301 3302 /* 3303 * Callback from the 802.11 layer to update WME parameters. 3304 */ 3305 static int 3306 ath_wme_update(struct ieee80211com *ic) 3307 { 3308 struct ath_softc *sc = ic->ic_ifp->if_softc; 3309 3310 return !ath_txq_update(sc, WME_AC_BE) || 3311 !ath_txq_update(sc, WME_AC_BK) || 3312 !ath_txq_update(sc, WME_AC_VI) || 3313 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 3314 } 3315 3316 /* 3317 * Reclaim resources for a setup queue. 3318 */ 3319 static void 3320 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 3321 { 3322 3323 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 3324 ATH_TXQ_LOCK_DESTROY(txq); 3325 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 3326 } 3327 3328 /* 3329 * Reclaim all tx queue resources. 3330 */ 3331 static void 3332 ath_tx_cleanup(struct ath_softc *sc) 3333 { 3334 int i; 3335 3336 ATH_TXBUF_LOCK_DESTROY(sc); 3337 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3338 if (ATH_TXQ_SETUP(sc, i)) 3339 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 3340 ATH_TXQ_LOCK_DESTROY(&sc->sc_mcastq); 3341 } 3342 3343 /* 3344 * Defragment an mbuf chain, returning at most maxfrags separate 3345 * mbufs+clusters. If this is not possible NULL is returned and 3346 * the original mbuf chain is left in it's present (potentially 3347 * modified) state. We use two techniques: collapsing consecutive 3348 * mbufs and replacing consecutive mbufs by a cluster. 3349 */ 3350 static struct mbuf * 3351 ath_defrag(struct mbuf *m0, int how, int maxfrags) 3352 { 3353 struct mbuf *m, *n, *n2, **prev; 3354 u_int curfrags; 3355 3356 /* 3357 * Calculate the current number of frags. 3358 */ 3359 curfrags = 0; 3360 for (m = m0; m != NULL; m = m->m_next) 3361 curfrags++; 3362 /* 3363 * First, try to collapse mbufs. Note that we always collapse 3364 * towards the front so we don't need to deal with moving the 3365 * pkthdr. This may be suboptimal if the first mbuf has much 3366 * less data than the following. 3367 */ 3368 m = m0; 3369 again: 3370 for (;;) { 3371 n = m->m_next; 3372 if (n == NULL) 3373 break; 3374 if ((m->m_flags & M_RDONLY) == 0 && 3375 n->m_len < M_TRAILINGSPACE(m)) { 3376 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 3377 n->m_len); 3378 m->m_len += n->m_len; 3379 m->m_next = n->m_next; 3380 m_free(n); 3381 if (--curfrags <= maxfrags) 3382 return m0; 3383 } else 3384 m = n; 3385 } 3386 KASSERT(maxfrags > 1, 3387 ("maxfrags %u, but normal collapse failed", maxfrags)); 3388 /* 3389 * Collapse consecutive mbufs to a cluster. 3390 */ 3391 prev = &m0->m_next; /* NB: not the first mbuf */ 3392 while ((n = *prev) != NULL) { 3393 if ((n2 = n->m_next) != NULL && 3394 n->m_len + n2->m_len < MCLBYTES) { 3395 m = m_getcl(how, MT_DATA, 0); 3396 if (m == NULL) 3397 goto bad; 3398 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 3399 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 3400 n2->m_len); 3401 m->m_len = n->m_len + n2->m_len; 3402 m->m_next = n2->m_next; 3403 *prev = m; 3404 m_free(n); 3405 m_free(n2); 3406 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 3407 return m0; 3408 /* 3409 * Still not there, try the normal collapse 3410 * again before we allocate another cluster. 3411 */ 3412 goto again; 3413 } 3414 prev = &n->m_next; 3415 } 3416 /* 3417 * No place where we can collapse to a cluster; punt. 3418 * This can occur if, for example, you request 2 frags 3419 * but the packet requires that both be clusters (we 3420 * never reallocate the first mbuf to avoid moving the 3421 * packet header). 3422 */ 3423 bad: 3424 return NULL; 3425 } 3426 3427 /* 3428 * Return h/w rate index for an IEEE rate (w/o basic rate bit). 3429 */ 3430 static int 3431 ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate) 3432 { 3433 int i; 3434 3435 for (i = 0; i < rt->rateCount; i++) 3436 if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate) 3437 return i; 3438 return 0; /* NB: lowest rate */ 3439 } 3440 3441 static int 3442 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 3443 { 3444 struct mbuf *m; 3445 int error; 3446 3447 /* 3448 * Load the DMA map so any coalescing is done. This 3449 * also calculates the number of descriptors we need. 3450 */ 3451 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 3452 bf->bf_segs, &bf->bf_nseg, 3453 BUS_DMA_NOWAIT); 3454 if (error == EFBIG) { 3455 /* XXX packet requires too many descriptors */ 3456 bf->bf_nseg = ATH_TXDESC+1; 3457 } else if (error != 0) { 3458 sc->sc_stats.ast_tx_busdma++; 3459 m_freem(m0); 3460 return error; 3461 } 3462 /* 3463 * Discard null packets and check for packets that 3464 * require too many TX descriptors. We try to convert 3465 * the latter to a cluster. 3466 */ 3467 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ 3468 sc->sc_stats.ast_tx_linear++; 3469 m = ath_defrag(m0, M_DONTWAIT, ATH_TXDESC); 3470 if (m == NULL) { 3471 m_freem(m0); 3472 sc->sc_stats.ast_tx_nombuf++; 3473 return ENOMEM; 3474 } 3475 m0 = m; 3476 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 3477 bf->bf_segs, &bf->bf_nseg, 3478 BUS_DMA_NOWAIT); 3479 if (error != 0) { 3480 sc->sc_stats.ast_tx_busdma++; 3481 m_freem(m0); 3482 return error; 3483 } 3484 KASSERT(bf->bf_nseg <= ATH_TXDESC, 3485 ("too many segments after defrag; nseg %u", bf->bf_nseg)); 3486 } else if (bf->bf_nseg == 0) { /* null packet, discard */ 3487 sc->sc_stats.ast_tx_nodata++; 3488 m_freem(m0); 3489 return EIO; 3490 } 3491 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 3492 __func__, m0, m0->m_pkthdr.len); 3493 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 3494 bf->bf_m = m0; 3495 3496 return 0; 3497 } 3498 3499 static void 3500 ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) 3501 { 3502 struct ath_hal *ah = sc->sc_ah; 3503 struct ath_desc *ds, *ds0; 3504 int i; 3505 3506 /* 3507 * Fillin the remainder of the descriptor info. 3508 */ 3509 ds0 = ds = bf->bf_desc; 3510 for (i = 0; i < bf->bf_nseg; i++, ds++) { 3511 ds->ds_data = bf->bf_segs[i].ds_addr; 3512 if (i == bf->bf_nseg - 1) 3513 ds->ds_link = 0; 3514 else 3515 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); 3516 ath_hal_filltxdesc(ah, ds 3517 , bf->bf_segs[i].ds_len /* segment length */ 3518 , i == 0 /* first segment */ 3519 , i == bf->bf_nseg - 1 /* last segment */ 3520 , ds0 /* first descriptor */ 3521 ); 3522 DPRINTF(sc, ATH_DEBUG_XMIT, 3523 "%s: %d: %08x %08x %08x %08x %08x %08x\n", 3524 __func__, i, ds->ds_link, ds->ds_data, 3525 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); 3526 } 3527 /* 3528 * Insert the frame on the outbound list and pass it on 3529 * to the hardware. Multicast frames buffered for power 3530 * save stations and transmit from the CAB queue are stored 3531 * on a s/w only queue and loaded on to the CAB queue in 3532 * the SWBA handler since frames only go out on DTIM and 3533 * to avoid possible races. 3534 */ 3535 ATH_TXQ_LOCK(txq); 3536 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 3537 if (txq != &sc->sc_mcastq) { 3538 if (txq->axq_link == NULL) { 3539 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 3540 DPRINTF(sc, ATH_DEBUG_XMIT, 3541 "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, 3542 txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc, 3543 txq->axq_depth); 3544 } else { 3545 *txq->axq_link = bf->bf_daddr; 3546 DPRINTF(sc, ATH_DEBUG_XMIT, 3547 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 3548 txq->axq_qnum, txq->axq_link, 3549 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); 3550 } 3551 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 3552 ath_hal_txstart(ah, txq->axq_qnum); 3553 } else { 3554 if (txq->axq_link != NULL) 3555 *txq->axq_link = bf->bf_daddr; 3556 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; 3557 } 3558 ATH_TXQ_UNLOCK(txq); 3559 } 3560 3561 static int 3562 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, 3563 struct mbuf *m0) 3564 { 3565 struct ieee80211com *ic = &sc->sc_ic; 3566 struct ath_hal *ah = sc->sc_ah; 3567 struct ifnet *ifp = sc->sc_ifp; 3568 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 3569 int error, iswep, ismcast, ismrr; 3570 int keyix, hdrlen, pktlen, try0; 3571 u_int8_t rix, txrate, ctsrate; 3572 u_int8_t cix = 0xff; /* NB: silence compiler */ 3573 struct ath_desc *ds; 3574 struct ath_txq *txq; 3575 struct ieee80211_frame *wh; 3576 u_int subtype, flags, ctsduration; 3577 HAL_PKT_TYPE atype; 3578 const HAL_RATE_TABLE *rt; 3579 HAL_BOOL shortPreamble; 3580 struct ath_node *an; 3581 u_int pri; 3582 3583 wh = mtod(m0, struct ieee80211_frame *); 3584 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; 3585 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 3586 hdrlen = ieee80211_anyhdrsize(wh); 3587 /* 3588 * Packet length must not include any 3589 * pad bytes; deduct them here. 3590 */ 3591 pktlen = m0->m_pkthdr.len - (hdrlen & 3); 3592 3593 if (iswep) { 3594 const struct ieee80211_cipher *cip; 3595 struct ieee80211_key *k; 3596 3597 /* 3598 * Construct the 802.11 header+trailer for an encrypted 3599 * frame. The only reason this can fail is because of an 3600 * unknown or unsupported cipher/key type. 3601 */ 3602 k = ieee80211_crypto_encap(ic, ni, m0); 3603 if (k == NULL) { 3604 /* 3605 * This can happen when the key is yanked after the 3606 * frame was queued. Just discard the frame; the 3607 * 802.11 layer counts failures and provides 3608 * debugging/diagnostics. 3609 */ 3610 m_freem(m0); 3611 return EIO; 3612 } 3613 /* 3614 * Adjust the packet + header lengths for the crypto 3615 * additions and calculate the h/w key index. When 3616 * a s/w mic is done the frame will have had any mic 3617 * added to it prior to entry so m0->m_pkthdr.len above will 3618 * account for it. Otherwise we need to add it to the 3619 * packet length. 3620 */ 3621 cip = k->wk_cipher; 3622 hdrlen += cip->ic_header; 3623 pktlen += cip->ic_header + cip->ic_trailer; 3624 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0) 3625 pktlen += cip->ic_miclen; 3626 keyix = k->wk_keyix; 3627 3628 /* packet header may have moved, reset our local pointer */ 3629 wh = mtod(m0, struct ieee80211_frame *); 3630 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 3631 /* 3632 * Use station key cache slot, if assigned. 3633 */ 3634 keyix = ni->ni_ucastkey.wk_keyix; 3635 if (keyix == IEEE80211_KEYIX_NONE) 3636 keyix = HAL_TXKEYIX_INVALID; 3637 } else 3638 keyix = HAL_TXKEYIX_INVALID; 3639 3640 pktlen += IEEE80211_CRC_LEN; 3641 3642 /* 3643 * Load the DMA map so any coalescing is done. This 3644 * also calculates the number of descriptors we need. 3645 */ 3646 error = ath_tx_dmasetup(sc, bf, m0); 3647 if (error != 0) 3648 return error; 3649 bf->bf_node = ni; /* NB: held reference */ 3650 m0 = bf->bf_m; /* NB: may have changed */ 3651 wh = mtod(m0, struct ieee80211_frame *); 3652 3653 /* setup descriptors */ 3654 ds = bf->bf_desc; 3655 rt = sc->sc_currates; 3656 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 3657 3658 /* 3659 * NB: the 802.11 layer marks whether or not we should 3660 * use short preamble based on the current mode and 3661 * negotiated parameters. 3662 */ 3663 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 3664 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 3665 shortPreamble = AH_TRUE; 3666 sc->sc_stats.ast_tx_shortpre++; 3667 } else { 3668 shortPreamble = AH_FALSE; 3669 } 3670 3671 an = ATH_NODE(ni); 3672 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 3673 ismrr = 0; /* default no multi-rate retry*/ 3674 /* 3675 * Calculate Atheros packet type from IEEE80211 packet header, 3676 * setup for rate calculations, and select h/w transmit queue. 3677 */ 3678 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 3679 case IEEE80211_FC0_TYPE_MGT: 3680 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3681 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 3682 atype = HAL_PKT_TYPE_BEACON; 3683 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3684 atype = HAL_PKT_TYPE_PROBE_RESP; 3685 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 3686 atype = HAL_PKT_TYPE_ATIM; 3687 else 3688 atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 3689 rix = sc->sc_minrateix; 3690 txrate = rt->info[rix].rateCode; 3691 if (shortPreamble) 3692 txrate |= rt->info[rix].shortPreamble; 3693 try0 = ATH_TXMGTTRY; 3694 /* NB: force all management frames to highest queue */ 3695 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3696 /* NB: force all management frames to highest queue */ 3697 pri = WME_AC_VO; 3698 } else 3699 pri = WME_AC_BE; 3700 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3701 break; 3702 case IEEE80211_FC0_TYPE_CTL: 3703 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 3704 rix = sc->sc_minrateix; 3705 txrate = rt->info[rix].rateCode; 3706 if (shortPreamble) 3707 txrate |= rt->info[rix].shortPreamble; 3708 try0 = ATH_TXMGTTRY; 3709 /* NB: force all ctl frames to highest queue */ 3710 if (ni->ni_flags & IEEE80211_NODE_QOS) { 3711 /* NB: force all ctl frames to highest queue */ 3712 pri = WME_AC_VO; 3713 } else 3714 pri = WME_AC_BE; 3715 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 3716 break; 3717 case IEEE80211_FC0_TYPE_DATA: 3718 atype = HAL_PKT_TYPE_NORMAL; /* default */ 3719 /* 3720 * Data frames: multicast frames go out at a fixed rate, 3721 * otherwise consult the rate control module for the 3722 * rate to use. 3723 */ 3724 if (ismcast) { 3725 /* 3726 * Check mcast rate setting in case it's changed. 3727 * XXX move out of fastpath 3728 */ 3729 if (ic->ic_mcast_rate != sc->sc_mcastrate) { 3730 sc->sc_mcastrix = 3731 ath_tx_findrix(rt, ic->ic_mcast_rate); 3732 sc->sc_mcastrate = ic->ic_mcast_rate; 3733 } 3734 rix = sc->sc_mcastrix; 3735 txrate = rt->info[rix].rateCode; 3736 if (shortPreamble) 3737 txrate |= rt->info[rix].shortPreamble; 3738 try0 = 1; 3739 } else { 3740 ath_rate_findrate(sc, an, shortPreamble, pktlen, 3741 &rix, &try0, &txrate); 3742 sc->sc_txrate = txrate; /* for LED blinking */ 3743 if (try0 != ATH_TXMAXTRY) 3744 ismrr = 1; 3745 } 3746 pri = M_WME_GETAC(m0); 3747 if (cap->cap_wmeParams[pri].wmep_noackPolicy) 3748 flags |= HAL_TXDESC_NOACK; 3749 break; 3750 default: 3751 if_printf(ifp, "bogus frame type 0x%x (%s)\n", 3752 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 3753 /* XXX statistic */ 3754 m_freem(m0); 3755 return EIO; 3756 } 3757 txq = sc->sc_ac2q[pri]; 3758 3759 /* 3760 * When servicing one or more stations in power-save mode 3761 * (or) if there is some mcast data waiting on the mcast 3762 * queue (to prevent out of order delivery) multicast 3763 * frames must be buffered until after the beacon. 3764 */ 3765 if (ismcast && (ic->ic_ps_sta || sc->sc_mcastq.axq_depth)) { 3766 txq = &sc->sc_mcastq; 3767 /* XXX? more bit in 802.11 frame header */ 3768 } 3769 3770 /* 3771 * Calculate miscellaneous flags. 3772 */ 3773 if (ismcast) { 3774 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 3775 } else if (pktlen > ic->ic_rtsthreshold) { 3776 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 3777 cix = rt->info[rix].controlRate; 3778 sc->sc_stats.ast_tx_rts++; 3779 } 3780 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 3781 sc->sc_stats.ast_tx_noack++; 3782 3783 /* 3784 * If 802.11g protection is enabled, determine whether 3785 * to use RTS/CTS or just CTS. Note that this is only 3786 * done for OFDM unicast frames. 3787 */ 3788 if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3789 rt->info[rix].phy == IEEE80211_T_OFDM && 3790 (flags & HAL_TXDESC_NOACK) == 0) { 3791 /* XXX fragments must use CCK rates w/ protection */ 3792 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3793 flags |= HAL_TXDESC_RTSENA; 3794 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3795 flags |= HAL_TXDESC_CTSENA; 3796 cix = rt->info[sc->sc_protrix].controlRate; 3797 sc->sc_stats.ast_tx_protect++; 3798 } 3799 3800 /* 3801 * Calculate duration. This logically belongs in the 802.11 3802 * layer but it lacks sufficient information to calculate it. 3803 */ 3804 if ((flags & HAL_TXDESC_NOACK) == 0 && 3805 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 3806 u_int16_t dur; 3807 /* 3808 * XXX not right with fragmentation. 3809 */ 3810 if (shortPreamble) 3811 dur = rt->info[rix].spAckDuration; 3812 else 3813 dur = rt->info[rix].lpAckDuration; 3814 *(u_int16_t *)wh->i_dur = htole16(dur); 3815 } 3816 3817 /* 3818 * Calculate RTS/CTS rate and duration if needed. 3819 */ 3820 ctsduration = 0; 3821 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { 3822 /* 3823 * CTS transmit rate is derived from the transmit rate 3824 * by looking in the h/w rate table. We must also factor 3825 * in whether or not a short preamble is to be used. 3826 */ 3827 /* NB: cix is set above where RTS/CTS is enabled */ 3828 KASSERT(cix != 0xff, ("cix not setup")); 3829 ctsrate = rt->info[cix].rateCode; 3830 /* 3831 * Compute the transmit duration based on the frame 3832 * size and the size of an ACK frame. We call into the 3833 * HAL to do the computation since it depends on the 3834 * characteristics of the actual PHY being used. 3835 * 3836 * NB: CTS is assumed the same size as an ACK so we can 3837 * use the precalculated ACK durations. 3838 */ 3839 if (shortPreamble) { 3840 ctsrate |= rt->info[cix].shortPreamble; 3841 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3842 ctsduration += rt->info[cix].spAckDuration; 3843 ctsduration += ath_hal_computetxtime(ah, 3844 rt, pktlen, rix, AH_TRUE); 3845 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3846 ctsduration += rt->info[rix].spAckDuration; 3847 } else { 3848 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 3849 ctsduration += rt->info[cix].lpAckDuration; 3850 ctsduration += ath_hal_computetxtime(ah, 3851 rt, pktlen, rix, AH_FALSE); 3852 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 3853 ctsduration += rt->info[rix].lpAckDuration; 3854 } 3855 /* 3856 * Must disable multi-rate retry when using RTS/CTS. 3857 */ 3858 ismrr = 0; 3859 try0 = ATH_TXMGTTRY; /* XXX */ 3860 } else 3861 ctsrate = 0; 3862 3863 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3864 ieee80211_dump_pkt(mtod(m0, caddr_t), m0->m_len, 3865 sc->sc_hwmap[txrate].ieeerate, -1); 3866 3867 if (bpf_peers_present(ic->ic_rawbpf)) 3868 bpf_mtap(ic->ic_rawbpf, m0); 3869 if (bpf_peers_present(sc->sc_drvbpf)) { 3870 u_int64_t tsf = ath_hal_gettsf64(ah); 3871 3872 sc->sc_tx_th.wt_tsf = htole64(tsf); 3873 sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; 3874 if (iswep) 3875 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3876 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; 3877 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 3878 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 3879 3880 bpf_mtap2(sc->sc_drvbpf, 3881 &sc->sc_tx_th, sc->sc_tx_th_len, m0); 3882 } 3883 3884 /* 3885 * Determine if a tx interrupt should be generated for 3886 * this descriptor. We take a tx interrupt to reap 3887 * descriptors when the h/w hits an EOL condition or 3888 * when the descriptor is specifically marked to generate 3889 * an interrupt. We periodically mark descriptors in this 3890 * way to insure timely replenishing of the supply needed 3891 * for sending frames. Defering interrupts reduces system 3892 * load and potentially allows more concurrent work to be 3893 * done but if done to aggressively can cause senders to 3894 * backup. 3895 * 3896 * NB: use >= to deal with sc_txintrperiod changing 3897 * dynamically through sysctl. 3898 */ 3899 if (flags & HAL_TXDESC_INTREQ) { 3900 txq->axq_intrcnt = 0; 3901 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 3902 flags |= HAL_TXDESC_INTREQ; 3903 txq->axq_intrcnt = 0; 3904 } 3905 3906 /* 3907 * Formulate first tx descriptor with tx controls. 3908 */ 3909 /* XXX check return value? */ 3910 ath_hal_setuptxdesc(ah, ds 3911 , pktlen /* packet length */ 3912 , hdrlen /* header length */ 3913 , atype /* Atheros packet type */ 3914 , ni->ni_txpower /* txpower */ 3915 , txrate, try0 /* series 0 rate/tries */ 3916 , keyix /* key cache index */ 3917 , sc->sc_txantenna /* antenna mode */ 3918 , flags /* flags */ 3919 , ctsrate /* rts/cts rate */ 3920 , ctsduration /* rts/cts duration */ 3921 ); 3922 bf->bf_flags = flags; 3923 /* 3924 * Setup the multi-rate retry state only when we're 3925 * going to use it. This assumes ath_hal_setuptxdesc 3926 * initializes the descriptors (so we don't have to) 3927 * when the hardware supports multi-rate retry and 3928 * we don't use it. 3929 */ 3930 if (ismrr) 3931 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); 3932 3933 ath_tx_handoff(sc, txq, bf); 3934 return 0; 3935 } 3936 3937 /* 3938 * Process completed xmit descriptors from the specified queue. 3939 */ 3940 static int 3941 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 3942 { 3943 struct ath_hal *ah = sc->sc_ah; 3944 struct ieee80211com *ic = &sc->sc_ic; 3945 struct ath_buf *bf; 3946 struct ath_desc *ds, *ds0; 3947 struct ath_tx_status *ts; 3948 struct ieee80211_node *ni; 3949 struct ath_node *an; 3950 int sr, lr, pri, nacked; 3951 HAL_STATUS status; 3952 3953 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3954 __func__, txq->axq_qnum, 3955 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3956 txq->axq_link); 3957 nacked = 0; 3958 for (;;) { 3959 ATH_TXQ_LOCK(txq); 3960 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3961 bf = STAILQ_FIRST(&txq->axq_q); 3962 if (bf == NULL) { 3963 ATH_TXQ_UNLOCK(txq); 3964 break; 3965 } 3966 ds0 = &bf->bf_desc[0]; 3967 ds = &bf->bf_desc[bf->bf_nseg - 1]; 3968 ts = &bf->bf_status.ds_txstat; 3969 status = ath_hal_txprocdesc(ah, ds, ts); 3970 #ifdef ATH_DEBUG 3971 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3972 ath_printtxbuf(bf, txq->axq_qnum, 0, status == HAL_OK); 3973 #endif 3974 if (status == HAL_EINPROGRESS) { 3975 ATH_TXQ_UNLOCK(txq); 3976 break; 3977 } 3978 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 3979 if (txq->axq_depth == 0) 3980 txq->axq_link = NULL; 3981 ATH_TXQ_UNLOCK(txq); 3982 3983 ni = bf->bf_node; 3984 if (ni != NULL) { 3985 an = ATH_NODE(ni); 3986 if (ts->ts_status == 0) { 3987 u_int8_t txant = ts->ts_antenna; 3988 sc->sc_stats.ast_ant_tx[txant]++; 3989 sc->sc_ant_tx[txant]++; 3990 if (ts->ts_rate & HAL_TXSTAT_ALTRATE) 3991 sc->sc_stats.ast_tx_altrate++; 3992 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 3993 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 3994 ts->ts_rssi); 3995 pri = M_WME_GETAC(bf->bf_m); 3996 if (pri >= WME_AC_VO) 3997 ic->ic_wme.wme_hipri_traffic++; 3998 ni->ni_inact = ni->ni_inact_reload; 3999 } else { 4000 if (ts->ts_status & HAL_TXERR_XRETRY) 4001 sc->sc_stats.ast_tx_xretries++; 4002 if (ts->ts_status & HAL_TXERR_FIFO) 4003 sc->sc_stats.ast_tx_fifoerr++; 4004 if (ts->ts_status & HAL_TXERR_FILT) 4005 sc->sc_stats.ast_tx_filtered++; 4006 } 4007 sr = ts->ts_shortretry; 4008 lr = ts->ts_longretry; 4009 sc->sc_stats.ast_tx_shortretry += sr; 4010 sc->sc_stats.ast_tx_longretry += lr; 4011 /* 4012 * Hand the descriptor to the rate control algorithm. 4013 */ 4014 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4015 (bf->bf_flags & HAL_TXDESC_NOACK) == 0) { 4016 /* 4017 * If frame was ack'd update the last rx time 4018 * used to workaround phantom bmiss interrupts. 4019 */ 4020 if (ts->ts_status == 0) 4021 nacked++; 4022 ath_rate_tx_complete(sc, an, bf); 4023 } 4024 /* 4025 * Reclaim reference to node. 4026 * 4027 * NB: the node may be reclaimed here if, for example 4028 * this is a DEAUTH message that was sent and the 4029 * node was timed out due to inactivity. 4030 */ 4031 ieee80211_free_node(ni); 4032 } 4033 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4034 BUS_DMASYNC_POSTWRITE); 4035 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4036 m_freem(bf->bf_m); 4037 bf->bf_m = NULL; 4038 bf->bf_node = NULL; 4039 4040 ATH_TXBUF_LOCK(sc); 4041 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4042 ATH_TXBUF_UNLOCK(sc); 4043 } 4044 return nacked; 4045 } 4046 4047 static __inline int 4048 txqactive(struct ath_hal *ah, int qnum) 4049 { 4050 u_int32_t txqs = 1<<qnum; 4051 ath_hal_gettxintrtxqs(ah, &txqs); 4052 return (txqs & (1<<qnum)); 4053 } 4054 4055 /* 4056 * Deferred processing of transmit interrupt; special-cased 4057 * for a single hardware transmit queue (e.g. 5210 and 5211). 4058 */ 4059 static void 4060 ath_tx_proc_q0(void *arg, int npending) 4061 { 4062 struct ath_softc *sc = arg; 4063 struct ifnet *ifp = sc->sc_ifp; 4064 4065 if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0])) 4066 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4067 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) 4068 ath_tx_processq(sc, sc->sc_cabq); 4069 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4070 sc->sc_tx_timer = 0; 4071 4072 if (sc->sc_softled) 4073 ath_led_event(sc, ATH_LED_TX); 4074 4075 ath_start(ifp); 4076 } 4077 4078 /* 4079 * Deferred processing of transmit interrupt; special-cased 4080 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4081 */ 4082 static void 4083 ath_tx_proc_q0123(void *arg, int npending) 4084 { 4085 struct ath_softc *sc = arg; 4086 struct ifnet *ifp = sc->sc_ifp; 4087 int nacked; 4088 4089 /* 4090 * Process each active queue. 4091 */ 4092 nacked = 0; 4093 if (txqactive(sc->sc_ah, 0)) 4094 nacked += ath_tx_processq(sc, &sc->sc_txq[0]); 4095 if (txqactive(sc->sc_ah, 1)) 4096 nacked += ath_tx_processq(sc, &sc->sc_txq[1]); 4097 if (txqactive(sc->sc_ah, 2)) 4098 nacked += ath_tx_processq(sc, &sc->sc_txq[2]); 4099 if (txqactive(sc->sc_ah, 3)) 4100 nacked += ath_tx_processq(sc, &sc->sc_txq[3]); 4101 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) 4102 ath_tx_processq(sc, sc->sc_cabq); 4103 if (nacked) 4104 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4105 4106 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4107 sc->sc_tx_timer = 0; 4108 4109 if (sc->sc_softled) 4110 ath_led_event(sc, ATH_LED_TX); 4111 4112 ath_start(ifp); 4113 } 4114 4115 /* 4116 * Deferred processing of transmit interrupt. 4117 */ 4118 static void 4119 ath_tx_proc(void *arg, int npending) 4120 { 4121 struct ath_softc *sc = arg; 4122 struct ifnet *ifp = sc->sc_ifp; 4123 int i, nacked; 4124 4125 /* 4126 * Process each active queue. 4127 */ 4128 nacked = 0; 4129 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4130 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i)) 4131 nacked += ath_tx_processq(sc, &sc->sc_txq[i]); 4132 if (nacked) 4133 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4134 4135 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4136 sc->sc_tx_timer = 0; 4137 4138 if (sc->sc_softled) 4139 ath_led_event(sc, ATH_LED_TX); 4140 4141 ath_start(ifp); 4142 } 4143 4144 static void 4145 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 4146 { 4147 #ifdef ATH_DEBUG 4148 struct ath_hal *ah = sc->sc_ah; 4149 #endif 4150 struct ieee80211_node *ni; 4151 struct ath_buf *bf; 4152 u_int ix; 4153 4154 /* 4155 * NB: this assumes output has been stopped and 4156 * we do not need to block ath_tx_tasklet 4157 */ 4158 for (ix = 0;; ix++) { 4159 ATH_TXQ_LOCK(txq); 4160 bf = STAILQ_FIRST(&txq->axq_q); 4161 if (bf == NULL) { 4162 txq->axq_link = NULL; 4163 ATH_TXQ_UNLOCK(txq); 4164 break; 4165 } 4166 ATH_TXQ_REMOVE_HEAD(txq, bf_list); 4167 ATH_TXQ_UNLOCK(txq); 4168 #ifdef ATH_DEBUG 4169 if (sc->sc_debug & ATH_DEBUG_RESET) { 4170 ath_printtxbuf(bf, txq->axq_qnum, ix, 4171 ath_hal_txprocdesc(ah, bf->bf_desc, 4172 &bf->bf_status.ds_txstat) == HAL_OK); 4173 ieee80211_dump_pkt(mtod(bf->bf_m, caddr_t), 4174 bf->bf_m->m_len, 0, -1); 4175 } 4176 #endif /* ATH_DEBUG */ 4177 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4178 m_freem(bf->bf_m); 4179 bf->bf_m = NULL; 4180 ni = bf->bf_node; 4181 bf->bf_node = NULL; 4182 if (ni != NULL) { 4183 /* 4184 * Reclaim node reference. 4185 */ 4186 ieee80211_free_node(ni); 4187 } 4188 ATH_TXBUF_LOCK(sc); 4189 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4190 ATH_TXBUF_UNLOCK(sc); 4191 } 4192 } 4193 4194 static void 4195 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 4196 { 4197 struct ath_hal *ah = sc->sc_ah; 4198 4199 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4200 __func__, txq->axq_qnum, 4201 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 4202 txq->axq_link); 4203 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 4204 } 4205 4206 /* 4207 * Drain the transmit queues and reclaim resources. 4208 */ 4209 static void 4210 ath_draintxq(struct ath_softc *sc) 4211 { 4212 struct ath_hal *ah = sc->sc_ah; 4213 struct ifnet *ifp = sc->sc_ifp; 4214 int i; 4215 4216 /* XXX return value */ 4217 if (!sc->sc_invalid) { 4218 /* don't touch the hardware if marked invalid */ 4219 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4220 __func__, sc->sc_bhalq, 4221 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 4222 NULL); 4223 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 4224 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4225 if (ATH_TXQ_SETUP(sc, i)) 4226 ath_tx_stopdma(sc, &sc->sc_txq[i]); 4227 } 4228 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4229 if (ATH_TXQ_SETUP(sc, i)) 4230 ath_tx_draintxq(sc, &sc->sc_txq[i]); 4231 ath_tx_draintxq(sc, &sc->sc_mcastq); 4232 #ifdef ATH_DEBUG 4233 if (sc->sc_debug & ATH_DEBUG_RESET) { 4234 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); 4235 if (bf != NULL && bf->bf_m != NULL) { 4236 ath_printtxbuf(bf, sc->sc_bhalq, 0, 4237 ath_hal_txprocdesc(ah, bf->bf_desc, 4238 &bf->bf_status.ds_txstat) == HAL_OK); 4239 ieee80211_dump_pkt(mtod(bf->bf_m, caddr_t), 4240 bf->bf_m->m_len, 0, -1); 4241 } 4242 } 4243 #endif /* ATH_DEBUG */ 4244 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4245 sc->sc_tx_timer = 0; 4246 } 4247 4248 /* 4249 * Disable the receive h/w in preparation for a reset. 4250 */ 4251 static void 4252 ath_stoprecv(struct ath_softc *sc) 4253 { 4254 #define PA2DESC(_sc, _pa) \ 4255 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 4256 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 4257 struct ath_hal *ah = sc->sc_ah; 4258 4259 ath_hal_stoppcurecv(ah); /* disable PCU */ 4260 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 4261 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 4262 DELAY(3000); /* 3ms is long enough for 1 frame */ 4263 #ifdef ATH_DEBUG 4264 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 4265 struct ath_buf *bf; 4266 u_int ix; 4267 4268 printf("%s: rx queue %p, link %p\n", __func__, 4269 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 4270 ix = 0; 4271 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 4272 struct ath_desc *ds = bf->bf_desc; 4273 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 4274 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 4275 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4276 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 4277 ath_printrxbuf(bf, ix, status == HAL_OK); 4278 ix++; 4279 } 4280 } 4281 #endif 4282 sc->sc_rxlink = NULL; /* just in case */ 4283 #undef PA2DESC 4284 } 4285 4286 /* 4287 * Enable the receive h/w following a reset. 4288 */ 4289 static int 4290 ath_startrecv(struct ath_softc *sc) 4291 { 4292 struct ath_hal *ah = sc->sc_ah; 4293 struct ath_buf *bf; 4294 4295 sc->sc_rxlink = NULL; 4296 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 4297 int error = ath_rxbuf_init(sc, bf); 4298 if (error != 0) { 4299 DPRINTF(sc, ATH_DEBUG_RECV, 4300 "%s: ath_rxbuf_init failed %d\n", 4301 __func__, error); 4302 return error; 4303 } 4304 } 4305 4306 bf = STAILQ_FIRST(&sc->sc_rxbuf); 4307 ath_hal_putrxbuf(ah, bf->bf_daddr); 4308 ath_hal_rxena(ah); /* enable recv descriptors */ 4309 ath_mode_init(sc); /* set filters, etc. */ 4310 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 4311 return 0; 4312 } 4313 4314 /* 4315 * Update internal state after a channel change. 4316 */ 4317 static void 4318 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 4319 { 4320 struct ieee80211com *ic = &sc->sc_ic; 4321 enum ieee80211_phymode mode; 4322 u_int16_t flags; 4323 4324 /* 4325 * Change channels and update the h/w rate map 4326 * if we're switching; e.g. 11a to 11b/g. 4327 */ 4328 if (IEEE80211_IS_CHAN_HALF(chan)) 4329 mode = IEEE80211_MODE_HALF; 4330 else if (IEEE80211_IS_CHAN_QUARTER(chan)) 4331 mode = IEEE80211_MODE_QUARTER; 4332 else 4333 mode = ieee80211_chan2mode(ic, chan); 4334 if (mode != sc->sc_curmode) 4335 ath_setcurmode(sc, mode); 4336 /* 4337 * Update BPF state. NB: ethereal et. al. don't handle 4338 * merged flags well so pick a unique mode for their use. 4339 */ 4340 if (IEEE80211_IS_CHAN_A(chan)) 4341 flags = IEEE80211_CHAN_A; 4342 /* XXX 11g schizophrenia */ 4343 else if (IEEE80211_IS_CHAN_ANYG(chan)) 4344 flags = IEEE80211_CHAN_G; 4345 else 4346 flags = IEEE80211_CHAN_B; 4347 if (IEEE80211_IS_CHAN_T(chan)) 4348 flags |= IEEE80211_CHAN_TURBO; 4349 if (IEEE80211_IS_CHAN_HALF(chan)) 4350 flags |= IEEE80211_CHAN_HALF; 4351 if (IEEE80211_IS_CHAN_QUARTER(chan)) 4352 flags |= IEEE80211_CHAN_QUARTER; 4353 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = 4354 htole16(chan->ic_freq); 4355 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = 4356 htole16(flags); 4357 } 4358 4359 /* 4360 * Poll for a channel clear indication; this is required 4361 * for channels requiring DFS and not previously visited 4362 * and/or with a recent radar detection. 4363 */ 4364 static void 4365 ath_dfswait(void *arg) 4366 { 4367 struct ath_softc *sc = arg; 4368 struct ath_hal *ah = sc->sc_ah; 4369 HAL_CHANNEL hchan; 4370 4371 ath_hal_radar_wait(ah, &hchan); 4372 DPRINTF(sc, ATH_DEBUG_DFS, "%s: radar_wait %u/%x/%x\n", 4373 __func__, hchan.channel, hchan.channelFlags, hchan.privFlags); 4374 4375 if (hchan.privFlags & CHANNEL_INTERFERENCE) { 4376 if_printf(sc->sc_ifp, 4377 "channel %u/0x%x/0x%x has interference\n", 4378 hchan.channel, hchan.channelFlags, hchan.privFlags); 4379 return; 4380 } 4381 if ((hchan.privFlags & CHANNEL_DFS) == 0) { 4382 /* XXX should not happen */ 4383 return; 4384 } 4385 if (hchan.privFlags & CHANNEL_DFS_CLEAR) { 4386 sc->sc_curchan.privFlags |= CHANNEL_DFS_CLEAR; 4387 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4388 if_printf(sc->sc_ifp, 4389 "channel %u/0x%x/0x%x marked clear\n", 4390 hchan.channel, hchan.channelFlags, hchan.privFlags); 4391 } else 4392 callout_reset(&sc->sc_dfs_ch, 2 * hz, ath_dfswait, sc); 4393 } 4394 4395 /* 4396 * Set/change channels. If the channel is really being changed, 4397 * it's done by reseting the chip. To accomplish this we must 4398 * first cleanup any pending DMA, then restart stuff after a la 4399 * ath_init. 4400 */ 4401 static int 4402 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 4403 { 4404 struct ath_hal *ah = sc->sc_ah; 4405 struct ieee80211com *ic = &sc->sc_ic; 4406 HAL_CHANNEL hchan; 4407 4408 /* 4409 * Convert to a HAL channel description with 4410 * the flags constrained to reflect the current 4411 * operating mode. 4412 */ 4413 ath_mapchan(ic, &hchan, chan); 4414 4415 DPRINTF(sc, ATH_DEBUG_RESET, 4416 "%s: %u (%u MHz, hal flags 0x%x) -> %u (%u MHz, hal flags 0x%x)\n", 4417 __func__, 4418 ath_hal_mhz2ieee(ah, sc->sc_curchan.channel, 4419 sc->sc_curchan.channelFlags), 4420 sc->sc_curchan.channel, sc->sc_curchan.channelFlags, 4421 ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags), 4422 hchan.channel, hchan.channelFlags); 4423 if (hchan.channel != sc->sc_curchan.channel || 4424 hchan.channelFlags != sc->sc_curchan.channelFlags) { 4425 HAL_STATUS status; 4426 4427 /* 4428 * To switch channels clear any pending DMA operations; 4429 * wait long enough for the RX fifo to drain, reset the 4430 * hardware at the new frequency, and then re-enable 4431 * the relevant bits of the h/w. 4432 */ 4433 ath_hal_intrset(ah, 0); /* disable interrupts */ 4434 ath_draintxq(sc); /* clear pending tx frames */ 4435 ath_stoprecv(sc); /* turn off frame recv */ 4436 if (!ath_hal_reset(ah, sc->sc_opmode, &hchan, AH_TRUE, &status)) { 4437 if_printf(ic->ic_ifp, "%s: unable to reset " 4438 "channel %u (%u Mhz, flags 0x%x hal flags 0x%x)\n", 4439 __func__, ieee80211_chan2ieee(ic, chan), 4440 chan->ic_freq, chan->ic_flags, hchan.channelFlags); 4441 return EIO; 4442 } 4443 sc->sc_curchan = hchan; 4444 ath_update_txpow(sc); /* update tx power state */ 4445 sc->sc_diversity = ath_hal_getdiversity(ah); 4446 sc->sc_calinterval = 1; 4447 sc->sc_caltries = 0; 4448 4449 /* 4450 * Re-enable rx framework. 4451 */ 4452 if (ath_startrecv(sc) != 0) { 4453 if_printf(ic->ic_ifp, 4454 "%s: unable to restart recv logic\n", __func__); 4455 return EIO; 4456 } 4457 4458 /* 4459 * Change channels and update the h/w rate map 4460 * if we're switching; e.g. 11a to 11b/g. 4461 */ 4462 ic->ic_ibss_chan = chan; 4463 ath_chan_change(sc, chan); 4464 4465 /* 4466 * Handle DFS required waiting period to determine 4467 * if channel is clear of radar traffic. 4468 */ 4469 if (ic->ic_opmode == IEEE80211_M_HOSTAP) { 4470 #define DFS_AND_NOT_CLEAR(_c) \ 4471 (((_c)->privFlags & (CHANNEL_DFS | CHANNEL_DFS_CLEAR)) == CHANNEL_DFS) 4472 if (DFS_AND_NOT_CLEAR(&sc->sc_curchan)) { 4473 if_printf(sc->sc_ifp, 4474 "wait for DFS clear channel signal\n"); 4475 /* XXX stop sndq */ 4476 sc->sc_ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4477 callout_reset(&sc->sc_dfs_ch, 4478 2 * hz, ath_dfswait, sc); 4479 } else 4480 callout_stop(&sc->sc_dfs_ch); 4481 #undef DFS_NOT_CLEAR 4482 } 4483 4484 /* 4485 * Re-enable interrupts. 4486 */ 4487 ath_hal_intrset(ah, sc->sc_imask); 4488 } 4489 return 0; 4490 } 4491 4492 static void 4493 ath_next_scan(void *arg) 4494 { 4495 struct ath_softc *sc = arg; 4496 struct ieee80211com *ic = &sc->sc_ic; 4497 4498 if (ic->ic_state == IEEE80211_S_SCAN) 4499 ieee80211_next_scan(ic); 4500 } 4501 4502 /* 4503 * Periodically recalibrate the PHY to account 4504 * for temperature/environment changes. 4505 */ 4506 static void 4507 ath_calibrate(void *arg) 4508 { 4509 struct ath_softc *sc = arg; 4510 struct ath_hal *ah = sc->sc_ah; 4511 HAL_BOOL iqCalDone; 4512 4513 sc->sc_stats.ast_per_cal++; 4514 4515 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 4516 /* 4517 * Rfgain is out of bounds, reset the chip 4518 * to load new gain values. 4519 */ 4520 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4521 "%s: rfgain change\n", __func__); 4522 sc->sc_stats.ast_per_rfgain++; 4523 ath_reset(sc->sc_ifp); 4524 } 4525 if (!ath_hal_calibrate(ah, &sc->sc_curchan, &iqCalDone)) { 4526 DPRINTF(sc, ATH_DEBUG_ANY, 4527 "%s: calibration of channel %u failed\n", 4528 __func__, sc->sc_curchan.channel); 4529 sc->sc_stats.ast_per_calfail++; 4530 } 4531 /* 4532 * Calibrate noise floor data again in case of change. 4533 */ 4534 ath_hal_process_noisefloor(ah); 4535 /* 4536 * Poll more frequently when the IQ calibration is in 4537 * progress to speedup loading the final settings. 4538 * We temper this aggressive polling with an exponential 4539 * back off after 4 tries up to ath_calinterval. 4540 */ 4541 if (iqCalDone || sc->sc_calinterval >= ath_calinterval) { 4542 sc->sc_caltries = 0; 4543 sc->sc_calinterval = ath_calinterval; 4544 } else if (sc->sc_caltries > 4) { 4545 sc->sc_caltries = 0; 4546 sc->sc_calinterval <<= 1; 4547 if (sc->sc_calinterval > ath_calinterval) 4548 sc->sc_calinterval = ath_calinterval; 4549 } 4550 KASSERT(0 < sc->sc_calinterval && sc->sc_calinterval <= ath_calinterval, 4551 ("bad calibration interval %u", sc->sc_calinterval)); 4552 4553 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4554 "%s: next +%u (%siqCalDone tries %u)\n", __func__, 4555 sc->sc_calinterval, iqCalDone ? "" : "!", sc->sc_caltries); 4556 sc->sc_caltries++; 4557 callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, 4558 ath_calibrate, sc); 4559 } 4560 4561 static int 4562 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) 4563 { 4564 struct ifnet *ifp = ic->ic_ifp; 4565 struct ath_softc *sc = ifp->if_softc; 4566 struct ath_hal *ah = sc->sc_ah; 4567 struct ieee80211_node *ni; 4568 int i, error; 4569 const u_int8_t *bssid; 4570 u_int32_t rfilt; 4571 static const HAL_LED_STATE leds[] = { 4572 HAL_LED_INIT, /* IEEE80211_S_INIT */ 4573 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 4574 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 4575 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 4576 HAL_LED_RUN, /* IEEE80211_S_RUN */ 4577 }; 4578 4579 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4580 ieee80211_state_name[ic->ic_state], 4581 ieee80211_state_name[nstate]); 4582 4583 callout_stop(&sc->sc_scan_ch); 4584 callout_stop(&sc->sc_cal_ch); 4585 callout_stop(&sc->sc_dfs_ch); 4586 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 4587 4588 if (nstate == IEEE80211_S_INIT) { 4589 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4590 /* 4591 * NB: disable interrupts so we don't rx frames. 4592 */ 4593 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 4594 /* 4595 * Notify the rate control algorithm. 4596 */ 4597 ath_rate_newstate(sc, nstate); 4598 goto done; 4599 } 4600 ni = ic->ic_bss; 4601 error = ath_chan_set(sc, ic->ic_curchan); 4602 if (error != 0) 4603 goto bad; 4604 rfilt = ath_calcrxfilter(sc, nstate); 4605 if (nstate == IEEE80211_S_SCAN) 4606 bssid = ifp->if_broadcastaddr; 4607 else 4608 bssid = ni->ni_bssid; 4609 ath_hal_setrxfilter(ah, rfilt); 4610 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s\n", 4611 __func__, rfilt, ether_sprintf(bssid)); 4612 4613 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) 4614 ath_hal_setassocid(ah, bssid, ni->ni_associd); 4615 else 4616 ath_hal_setassocid(ah, bssid, 0); 4617 if (ic->ic_flags & IEEE80211_F_PRIVACY) { 4618 for (i = 0; i < IEEE80211_WEP_NKID; i++) 4619 if (ath_hal_keyisvalid(ah, i)) 4620 ath_hal_keysetmac(ah, i, bssid); 4621 } 4622 4623 /* 4624 * Notify the rate control algorithm so rates 4625 * are setup should ath_beacon_alloc be called. 4626 */ 4627 ath_rate_newstate(sc, nstate); 4628 4629 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 4630 /* nothing to do */; 4631 } else if (nstate == IEEE80211_S_RUN) { 4632 DPRINTF(sc, ATH_DEBUG_STATE, 4633 "%s(RUN): ic_flags=0x%08x iv=%d bssid=%s " 4634 "capinfo=0x%04x chan=%d\n" 4635 , __func__ 4636 , ic->ic_flags 4637 , ni->ni_intval 4638 , ether_sprintf(ni->ni_bssid) 4639 , ni->ni_capinfo 4640 , ieee80211_chan2ieee(ic, ic->ic_curchan)); 4641 4642 switch (ic->ic_opmode) { 4643 case IEEE80211_M_HOSTAP: 4644 case IEEE80211_M_IBSS: 4645 /* 4646 * Allocate and setup the beacon frame. 4647 * 4648 * Stop any previous beacon DMA. This may be 4649 * necessary, for example, when an ibss merge 4650 * causes reconfiguration; there will be a state 4651 * transition from RUN->RUN that means we may 4652 * be called with beacon transmission active. 4653 */ 4654 ath_hal_stoptxdma(ah, sc->sc_bhalq); 4655 ath_beacon_free(sc); 4656 error = ath_beacon_alloc(sc, ni); 4657 if (error != 0) 4658 goto bad; 4659 /* 4660 * If joining an adhoc network defer beacon timer 4661 * configuration to the next beacon frame so we 4662 * have a current TSF to use. Otherwise we're 4663 * starting an ibss/bss so there's no need to delay. 4664 */ 4665 if (ic->ic_opmode == IEEE80211_M_IBSS && 4666 ic->ic_bss->ni_tstamp.tsf != 0) 4667 sc->sc_syncbeacon = 1; 4668 else 4669 ath_beacon_config(sc); 4670 break; 4671 case IEEE80211_M_STA: 4672 /* 4673 * Allocate a key cache slot to the station. 4674 */ 4675 if ((ic->ic_flags & IEEE80211_F_PRIVACY) == 0 && 4676 sc->sc_hasclrkey && 4677 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 4678 ath_setup_stationkey(ni); 4679 /* 4680 * Defer beacon timer configuration to the next 4681 * beacon frame so we have a current TSF to use 4682 * (any TSF collected when scanning is likely old). 4683 */ 4684 sc->sc_syncbeacon = 1; 4685 break; 4686 default: 4687 break; 4688 } 4689 4690 /* 4691 * Let the hal process statistics collected during a 4692 * scan so it can provide calibrated noise floor data. 4693 */ 4694 ath_hal_process_noisefloor(ah); 4695 /* 4696 * Reset rssi stats; maybe not the best place... 4697 */ 4698 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 4699 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 4700 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 4701 } else { 4702 ath_hal_intrset(ah, 4703 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 4704 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4705 } 4706 done: 4707 /* 4708 * Invoke the parent method to complete the work. 4709 */ 4710 error = sc->sc_newstate(ic, nstate, arg); 4711 /* 4712 * Finally, start any timers. 4713 */ 4714 if (nstate == IEEE80211_S_RUN) { 4715 /* start periodic recalibration timer */ 4716 callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, 4717 ath_calibrate, sc); 4718 } else if (nstate == IEEE80211_S_SCAN) { 4719 /* start ap/neighbor scan timer */ 4720 callout_reset(&sc->sc_scan_ch, (ath_dwelltime * hz) / 1000, 4721 ath_next_scan, sc); 4722 } 4723 bad: 4724 return error; 4725 } 4726 4727 /* 4728 * Allocate a key cache slot to the station so we can 4729 * setup a mapping from key index to node. The key cache 4730 * slot is needed for managing antenna state and for 4731 * compression when stations do not use crypto. We do 4732 * it uniliaterally here; if crypto is employed this slot 4733 * will be reassigned. 4734 */ 4735 static void 4736 ath_setup_stationkey(struct ieee80211_node *ni) 4737 { 4738 struct ieee80211com *ic = ni->ni_ic; 4739 struct ath_softc *sc = ic->ic_ifp->if_softc; 4740 ieee80211_keyix keyix, rxkeyix; 4741 4742 if (!ath_key_alloc(ic, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 4743 /* 4744 * Key cache is full; we'll fall back to doing 4745 * the more expensive lookup in software. Note 4746 * this also means no h/w compression. 4747 */ 4748 /* XXX msg+statistic */ 4749 } else { 4750 /* XXX locking? */ 4751 ni->ni_ucastkey.wk_keyix = keyix; 4752 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 4753 /* NB: this will create a pass-thru key entry */ 4754 ath_keyset(sc, &ni->ni_ucastkey, ni->ni_macaddr, ic->ic_bss); 4755 } 4756 } 4757 4758 /* 4759 * Setup driver-specific state for a newly associated node. 4760 * Note that we're called also on a re-associate, the isnew 4761 * param tells us if this is the first time or not. 4762 */ 4763 static void 4764 ath_newassoc(struct ieee80211_node *ni, int isnew) 4765 { 4766 struct ieee80211com *ic = ni->ni_ic; 4767 struct ath_softc *sc = ic->ic_ifp->if_softc; 4768 4769 ath_rate_newassoc(sc, ATH_NODE(ni), isnew); 4770 if (isnew && 4771 (ic->ic_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey) { 4772 KASSERT(ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE, 4773 ("new assoc with a unicast key already setup (keyix %u)", 4774 ni->ni_ucastkey.wk_keyix)); 4775 ath_setup_stationkey(ni); 4776 } 4777 } 4778 4779 static int 4780 ath_getchannels(struct ath_softc *sc, 4781 HAL_REG_DOMAIN rd, HAL_CTRY_CODE cc, HAL_BOOL outdoor, HAL_BOOL xchanmode) 4782 { 4783 #define COMPAT \ 4784 (CHANNEL_ALL_NOTURBO|CHANNEL_PASSIVE|CHANNEL_HALF|CHANNEL_QUARTER) 4785 #define IS_CHAN_PUBLIC_SAFETY(_c) \ 4786 (((_c)->channelFlags & CHANNEL_5GHZ) && \ 4787 ((_c)->channel > 4940 && (_c)->channel < 4990)) 4788 struct ieee80211com *ic = &sc->sc_ic; 4789 struct ifnet *ifp = sc->sc_ifp; 4790 struct ath_hal *ah = sc->sc_ah; 4791 HAL_CHANNEL *chans; 4792 int i, ix, nchan; 4793 u_int32_t regdomain; 4794 4795 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), 4796 M_TEMP, M_NOWAIT); 4797 if (chans == NULL) { 4798 if_printf(ifp, "unable to allocate channel table\n"); 4799 return ENOMEM; 4800 } 4801 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, 4802 NULL, 0, NULL, cc, HAL_MODE_ALL, outdoor, xchanmode)) { 4803 ath_hal_getregdomain(ah, ®domain); 4804 if_printf(ifp, "unable to collect channel list from hal; " 4805 "regdomain likely %u country code %u\n", regdomain, cc); 4806 free(chans, M_TEMP); 4807 return EINVAL; 4808 } 4809 4810 /* 4811 * Convert HAL channels to ieee80211 ones and insert 4812 * them in the table according to their channel number. 4813 */ 4814 memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); 4815 for (i = 0; i < nchan; i++) { 4816 HAL_CHANNEL *c = &chans[i]; 4817 u_int16_t flags; 4818 4819 /* 4820 * XXX we're not ready to handle the ieee number mapping 4821 * for public safety channels as they overlap with any 4822 * 2GHz channels; for now use a non-public safety 4823 * numbering that is non-overlapping. 4824 */ 4825 ix = ath_hal_mhz2ieee(ah, c->channel, c->channelFlags); 4826 if (IS_CHAN_PUBLIC_SAFETY(c)) 4827 ix += 37; /* XXX */ 4828 if (ix > IEEE80211_CHAN_MAX) { 4829 if_printf(ifp, "bad hal channel %d (%u/%x) ignored\n", 4830 ix, c->channel, c->channelFlags); 4831 continue; 4832 } 4833 if (ix < 0) { 4834 /* XXX can't handle stuff <2400 right now */ 4835 if (bootverbose) 4836 if_printf(ifp, "hal channel %d (%u/%x) " 4837 "cannot be handled; ignored\n", 4838 ix, c->channel, c->channelFlags); 4839 continue; 4840 } 4841 if (bootverbose) 4842 if_printf(ifp, "hal channel %u/%x -> %u\n", 4843 c->channel, c->channelFlags, ix); 4844 /* 4845 * Calculate net80211 flags; most are compatible 4846 * but some need massaging. Note the static turbo 4847 * conversion can be removed once net80211 is updated 4848 * to understand static vs. dynamic turbo. 4849 */ 4850 flags = c->channelFlags & COMPAT; 4851 if (c->channelFlags & CHANNEL_STURBO) 4852 flags |= IEEE80211_CHAN_TURBO; 4853 if (ath_hal_isgsmsku(ah)) { 4854 /* remap to true frequencies */ 4855 c->channel = 922 + (2422 - c->channel); 4856 flags |= IEEE80211_CHAN_GSM; 4857 ix = ieee80211_mhz2ieee(c->channel, flags); 4858 } 4859 if (ic->ic_channels[ix].ic_freq == 0) { 4860 ic->ic_channels[ix].ic_freq = c->channel; 4861 ic->ic_channels[ix].ic_flags = flags; 4862 } else { 4863 /* channels overlap; e.g. 11g and 11b */ 4864 ic->ic_channels[ix].ic_flags |= flags; 4865 } 4866 } 4867 free(chans, M_TEMP); 4868 ath_hal_getregdomain(ah, &sc->sc_regdomain); 4869 ath_hal_getcountrycode(ah, &sc->sc_countrycode); 4870 sc->sc_xchanmode = xchanmode; 4871 sc->sc_outdoor = outdoor; 4872 return 0; 4873 #undef IS_CHAN_PUBLIC_SAFETY 4874 #undef COMPAT 4875 } 4876 4877 static void 4878 ath_led_done(void *arg) 4879 { 4880 struct ath_softc *sc = arg; 4881 4882 sc->sc_blinking = 0; 4883 } 4884 4885 /* 4886 * Turn the LED off: flip the pin and then set a timer so no 4887 * update will happen for the specified duration. 4888 */ 4889 static void 4890 ath_led_off(void *arg) 4891 { 4892 struct ath_softc *sc = arg; 4893 4894 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); 4895 callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc); 4896 } 4897 4898 /* 4899 * Blink the LED according to the specified on/off times. 4900 */ 4901 static void 4902 ath_led_blink(struct ath_softc *sc, int on, int off) 4903 { 4904 DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); 4905 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); 4906 sc->sc_blinking = 1; 4907 sc->sc_ledoff = off; 4908 callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc); 4909 } 4910 4911 static void 4912 ath_led_event(struct ath_softc *sc, int event) 4913 { 4914 4915 sc->sc_ledevent = ticks; /* time of last event */ 4916 if (sc->sc_blinking) /* don't interrupt active blink */ 4917 return; 4918 switch (event) { 4919 case ATH_LED_POLL: 4920 ath_led_blink(sc, sc->sc_hwmap[0].ledon, 4921 sc->sc_hwmap[0].ledoff); 4922 break; 4923 case ATH_LED_TX: 4924 ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon, 4925 sc->sc_hwmap[sc->sc_txrate].ledoff); 4926 break; 4927 case ATH_LED_RX: 4928 ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon, 4929 sc->sc_hwmap[sc->sc_rxrate].ledoff); 4930 break; 4931 } 4932 } 4933 4934 static void 4935 ath_update_txpow(struct ath_softc *sc) 4936 { 4937 struct ieee80211com *ic = &sc->sc_ic; 4938 struct ath_hal *ah = sc->sc_ah; 4939 u_int32_t txpow; 4940 4941 if (sc->sc_curtxpow != ic->ic_txpowlimit) { 4942 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 4943 /* read back in case value is clamped */ 4944 ath_hal_gettxpowlimit(ah, &txpow); 4945 ic->ic_txpowlimit = sc->sc_curtxpow = txpow; 4946 } 4947 /* 4948 * Fetch max tx power level for status requests. 4949 */ 4950 ath_hal_getmaxtxpow(sc->sc_ah, &txpow); 4951 ic->ic_bss->ni_txpower = txpow; 4952 } 4953 4954 static int 4955 ath_rate_setup(struct ath_softc *sc, u_int mode) 4956 { 4957 struct ath_hal *ah = sc->sc_ah; 4958 const HAL_RATE_TABLE *rt; 4959 4960 switch (mode) { 4961 case IEEE80211_MODE_11A: 4962 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 4963 break; 4964 case IEEE80211_MODE_HALF: 4965 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 4966 break; 4967 case IEEE80211_MODE_QUARTER: 4968 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 4969 break; 4970 case IEEE80211_MODE_11B: 4971 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 4972 break; 4973 case IEEE80211_MODE_11G: 4974 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 4975 break; 4976 case IEEE80211_MODE_TURBO_A: 4977 /* XXX until static/dynamic turbo is fixed */ 4978 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4979 break; 4980 case IEEE80211_MODE_TURBO_G: 4981 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 4982 break; 4983 default: 4984 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4985 __func__, mode); 4986 return 0; 4987 } 4988 sc->sc_rates[mode] = rt; 4989 return (rt != NULL); 4990 } 4991 4992 static void 4993 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 4994 { 4995 #define N(a) (sizeof(a)/sizeof(a[0])) 4996 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 4997 static const struct { 4998 u_int rate; /* tx/rx 802.11 rate */ 4999 u_int16_t timeOn; /* LED on time (ms) */ 5000 u_int16_t timeOff; /* LED off time (ms) */ 5001 } blinkrates[] = { 5002 { 108, 40, 10 }, 5003 { 96, 44, 11 }, 5004 { 72, 50, 13 }, 5005 { 48, 57, 14 }, 5006 { 36, 67, 16 }, 5007 { 24, 80, 20 }, 5008 { 22, 100, 25 }, 5009 { 18, 133, 34 }, 5010 { 12, 160, 40 }, 5011 { 10, 200, 50 }, 5012 { 6, 240, 58 }, 5013 { 4, 267, 66 }, 5014 { 2, 400, 100 }, 5015 { 0, 500, 130 }, 5016 /* XXX half/quarter rates */ 5017 }; 5018 const HAL_RATE_TABLE *rt; 5019 int i, j; 5020 5021 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 5022 rt = sc->sc_rates[mode]; 5023 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 5024 for (i = 0; i < rt->rateCount; i++) 5025 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; 5026 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 5027 for (i = 0; i < 32; i++) { 5028 u_int8_t ix = rt->rateCodeToIndex[i]; 5029 if (ix == 0xff) { 5030 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 5031 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 5032 continue; 5033 } 5034 sc->sc_hwmap[i].ieeerate = 5035 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; 5036 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 5037 if (rt->info[ix].shortPreamble || 5038 rt->info[ix].phy == IEEE80211_T_OFDM) 5039 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 5040 /* NB: receive frames include FCS */ 5041 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags | 5042 IEEE80211_RADIOTAP_F_FCS; 5043 /* setup blink rate table to avoid per-packet lookup */ 5044 for (j = 0; j < N(blinkrates)-1; j++) 5045 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 5046 break; 5047 /* NB: this uses the last entry if the rate isn't found */ 5048 /* XXX beware of overlow */ 5049 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 5050 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 5051 } 5052 sc->sc_currates = rt; 5053 sc->sc_curmode = mode; 5054 /* 5055 * All protection frames are transmited at 2Mb/s for 5056 * 11g, otherwise at 1Mb/s. 5057 */ 5058 if (mode == IEEE80211_MODE_11G) 5059 sc->sc_protrix = ath_tx_findrix(rt, 2*2); 5060 else 5061 sc->sc_protrix = ath_tx_findrix(rt, 2*1); 5062 /* rate index used to send management frames */ 5063 sc->sc_minrateix = 0; 5064 /* 5065 * Setup multicast rate state. 5066 */ 5067 /* XXX layering violation */ 5068 sc->sc_mcastrix = ath_tx_findrix(rt, sc->sc_ic.ic_mcast_rate); 5069 sc->sc_mcastrate = sc->sc_ic.ic_mcast_rate; 5070 /* NB: caller is responsible for reseting rate control state */ 5071 #undef N 5072 } 5073 5074 #ifdef ATH_DEBUG 5075 static void 5076 ath_printrxbuf(const struct ath_buf *bf, u_int ix, int done) 5077 { 5078 const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 5079 const struct ath_desc *ds; 5080 int i; 5081 5082 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 5083 printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n" 5084 " %08x %08x %08x %08x\n", 5085 ix, ds, (const struct ath_desc *)bf->bf_daddr + i, 5086 ds->ds_link, ds->ds_data, 5087 !done ? "" : (rs->rs_status == 0) ? " *" : " !", 5088 ds->ds_ctl0, ds->ds_ctl1, 5089 ds->ds_hw[0], ds->ds_hw[1]); 5090 } 5091 } 5092 5093 static void 5094 ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done) 5095 { 5096 const struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 5097 const struct ath_desc *ds; 5098 int i; 5099 5100 printf("Q%u[%3u]", qnum, ix); 5101 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { 5102 printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n" 5103 " %08x %08x %08x %08x %08x %08x\n", 5104 ds, (const struct ath_desc *)bf->bf_daddr + i, 5105 ds->ds_link, ds->ds_data, bf->bf_flags, 5106 !done ? "" : (ts->ts_status == 0) ? " *" : " !", 5107 ds->ds_ctl0, ds->ds_ctl1, 5108 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]); 5109 } 5110 } 5111 #endif /* ATH_DEBUG */ 5112 5113 static void 5114 ath_watchdog(struct ifnet *ifp) 5115 { 5116 struct ath_softc *sc = ifp->if_softc; 5117 struct ieee80211com *ic = &sc->sc_ic; 5118 5119 ifp->if_timer = 0; 5120 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 5121 return; 5122 if (sc->sc_tx_timer) { 5123 if (--sc->sc_tx_timer == 0) { 5124 if_printf(ifp, "device timeout\n"); 5125 ath_reset(ifp); 5126 ifp->if_oerrors++; 5127 sc->sc_stats.ast_watchdog++; 5128 } else 5129 ifp->if_timer = 1; 5130 } 5131 ieee80211_watchdog(ic); 5132 } 5133 5134 #ifdef ATH_DIAGAPI 5135 /* 5136 * Diagnostic interface to the HAL. This is used by various 5137 * tools to do things like retrieve register contents for 5138 * debugging. The mechanism is intentionally opaque so that 5139 * it can change frequently w/o concern for compatiblity. 5140 */ 5141 static int 5142 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 5143 { 5144 struct ath_hal *ah = sc->sc_ah; 5145 u_int id = ad->ad_id & ATH_DIAG_ID; 5146 void *indata = NULL; 5147 void *outdata = NULL; 5148 u_int32_t insize = ad->ad_in_size; 5149 u_int32_t outsize = ad->ad_out_size; 5150 int error = 0; 5151 5152 if (ad->ad_id & ATH_DIAG_IN) { 5153 /* 5154 * Copy in data. 5155 */ 5156 indata = malloc(insize, M_TEMP, M_NOWAIT); 5157 if (indata == NULL) { 5158 error = ENOMEM; 5159 goto bad; 5160 } 5161 error = copyin(ad->ad_in_data, indata, insize); 5162 if (error) 5163 goto bad; 5164 } 5165 if (ad->ad_id & ATH_DIAG_DYN) { 5166 /* 5167 * Allocate a buffer for the results (otherwise the HAL 5168 * returns a pointer to a buffer where we can read the 5169 * results). Note that we depend on the HAL leaving this 5170 * pointer for us to use below in reclaiming the buffer; 5171 * may want to be more defensive. 5172 */ 5173 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 5174 if (outdata == NULL) { 5175 error = ENOMEM; 5176 goto bad; 5177 } 5178 } 5179 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 5180 if (outsize < ad->ad_out_size) 5181 ad->ad_out_size = outsize; 5182 if (outdata != NULL) 5183 error = copyout(outdata, ad->ad_out_data, 5184 ad->ad_out_size); 5185 } else { 5186 error = EINVAL; 5187 } 5188 bad: 5189 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 5190 free(indata, M_TEMP); 5191 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 5192 free(outdata, M_TEMP); 5193 return error; 5194 } 5195 #endif /* ATH_DIAGAPI */ 5196 5197 static int 5198 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 5199 { 5200 #define IS_RUNNING(ifp) \ 5201 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 5202 struct ath_softc *sc = ifp->if_softc; 5203 struct ieee80211com *ic = &sc->sc_ic; 5204 struct ifreq *ifr = (struct ifreq *)data; 5205 int error = 0; 5206 5207 ATH_LOCK(sc); 5208 switch (cmd) { 5209 case SIOCSIFFLAGS: 5210 if (IS_RUNNING(ifp)) { 5211 /* 5212 * To avoid rescanning another access point, 5213 * do not call ath_init() here. Instead, 5214 * only reflect promisc mode settings. 5215 */ 5216 ath_mode_init(sc); 5217 } else if (ifp->if_flags & IFF_UP) { 5218 /* 5219 * Beware of being called during attach/detach 5220 * to reset promiscuous mode. In that case we 5221 * will still be marked UP but not RUNNING. 5222 * However trying to re-init the interface 5223 * is the wrong thing to do as we've already 5224 * torn down much of our state. There's 5225 * probably a better way to deal with this. 5226 */ 5227 if (!sc->sc_invalid && ic->ic_bss != NULL) 5228 ath_init(sc); /* XXX lose error */ 5229 } else 5230 ath_stop_locked(ifp); 5231 break; 5232 case SIOCADDMULTI: 5233 case SIOCDELMULTI: 5234 /* 5235 * The upper layer has already installed/removed 5236 * the multicast address(es), just recalculate the 5237 * multicast filter for the card. 5238 */ 5239 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 5240 ath_mode_init(sc); 5241 break; 5242 case SIOCGATHSTATS: 5243 /* NB: embed these numbers to get a consistent view */ 5244 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 5245 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 5246 sc->sc_stats.ast_rx_rssi = ieee80211_getrssi(ic); 5247 sc->sc_stats.ast_rx_noise = 5248 ath_hal_getchannoise(sc->sc_ah, &sc->sc_curchan); 5249 sc->sc_stats.ast_tx_rate = sc->sc_hwmap[sc->sc_txrate].ieeerate; 5250 ATH_UNLOCK(sc); 5251 /* 5252 * NB: Drop the softc lock in case of a page fault; 5253 * we'll accept any potential inconsisentcy in the 5254 * statistics. The alternative is to copy the data 5255 * to a local structure. 5256 */ 5257 return copyout(&sc->sc_stats, 5258 ifr->ifr_data, sizeof (sc->sc_stats)); 5259 #ifdef ATH_DIAGAPI 5260 case SIOCGATHDIAG: 5261 ATH_UNLOCK(sc); 5262 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 5263 ATH_LOCK(sc); 5264 break; 5265 #endif 5266 default: 5267 error = ieee80211_ioctl(ic, cmd, data); 5268 if (error == ENETRESET) { 5269 if (IS_RUNNING(ifp) && 5270 ic->ic_roaming != IEEE80211_ROAMING_MANUAL) 5271 ath_init(sc); /* XXX lose error */ 5272 error = 0; 5273 } 5274 if (error == ERESTART) 5275 error = IS_RUNNING(ifp) ? ath_reset(ifp) : 0; 5276 break; 5277 } 5278 ATH_UNLOCK(sc); 5279 return error; 5280 #undef IS_RUNNING 5281 } 5282 5283 static int 5284 ath_sysctl_slottime(SYSCTL_HANDLER_ARGS) 5285 { 5286 struct ath_softc *sc = arg1; 5287 u_int slottime = ath_hal_getslottime(sc->sc_ah); 5288 int error; 5289 5290 error = sysctl_handle_int(oidp, &slottime, 0, req); 5291 if (error || !req->newptr) 5292 return error; 5293 return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0; 5294 } 5295 5296 static int 5297 ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS) 5298 { 5299 struct ath_softc *sc = arg1; 5300 u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah); 5301 int error; 5302 5303 error = sysctl_handle_int(oidp, &acktimeout, 0, req); 5304 if (error || !req->newptr) 5305 return error; 5306 return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0; 5307 } 5308 5309 static int 5310 ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS) 5311 { 5312 struct ath_softc *sc = arg1; 5313 u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah); 5314 int error; 5315 5316 error = sysctl_handle_int(oidp, &ctstimeout, 0, req); 5317 if (error || !req->newptr) 5318 return error; 5319 return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0; 5320 } 5321 5322 static int 5323 ath_sysctl_softled(SYSCTL_HANDLER_ARGS) 5324 { 5325 struct ath_softc *sc = arg1; 5326 int softled = sc->sc_softled; 5327 int error; 5328 5329 error = sysctl_handle_int(oidp, &softled, 0, req); 5330 if (error || !req->newptr) 5331 return error; 5332 softled = (softled != 0); 5333 if (softled != sc->sc_softled) { 5334 if (softled) { 5335 /* NB: handle any sc_ledpin change */ 5336 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); 5337 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, 5338 !sc->sc_ledon); 5339 } 5340 sc->sc_softled = softled; 5341 } 5342 return 0; 5343 } 5344 5345 static int 5346 ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS) 5347 { 5348 struct ath_softc *sc = arg1; 5349 u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah); 5350 int error; 5351 5352 error = sysctl_handle_int(oidp, &txantenna, 0, req); 5353 if (!error && req->newptr) { 5354 /* XXX assumes 2 antenna ports */ 5355 if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B) 5356 return EINVAL; 5357 ath_hal_setantennaswitch(sc->sc_ah, txantenna); 5358 /* 5359 * NB: with the switch locked this isn't meaningful, 5360 * but set it anyway so things like radiotap get 5361 * consistent info in their data. 5362 */ 5363 sc->sc_txantenna = txantenna; 5364 } 5365 return error; 5366 } 5367 5368 static int 5369 ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS) 5370 { 5371 struct ath_softc *sc = arg1; 5372 u_int defantenna = ath_hal_getdefantenna(sc->sc_ah); 5373 int error; 5374 5375 error = sysctl_handle_int(oidp, &defantenna, 0, req); 5376 if (!error && req->newptr) 5377 ath_hal_setdefantenna(sc->sc_ah, defantenna); 5378 return error; 5379 } 5380 5381 static int 5382 ath_sysctl_diversity(SYSCTL_HANDLER_ARGS) 5383 { 5384 struct ath_softc *sc = arg1; 5385 u_int diversity = ath_hal_getdiversity(sc->sc_ah); 5386 int error; 5387 5388 error = sysctl_handle_int(oidp, &diversity, 0, req); 5389 if (error || !req->newptr) 5390 return error; 5391 if (!ath_hal_setdiversity(sc->sc_ah, diversity)) 5392 return EINVAL; 5393 sc->sc_diversity = diversity; 5394 return 0; 5395 } 5396 5397 static int 5398 ath_sysctl_diag(SYSCTL_HANDLER_ARGS) 5399 { 5400 struct ath_softc *sc = arg1; 5401 u_int32_t diag; 5402 int error; 5403 5404 if (!ath_hal_getdiag(sc->sc_ah, &diag)) 5405 return EINVAL; 5406 error = sysctl_handle_int(oidp, &diag, 0, req); 5407 if (error || !req->newptr) 5408 return error; 5409 return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0; 5410 } 5411 5412 static int 5413 ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS) 5414 { 5415 struct ath_softc *sc = arg1; 5416 struct ifnet *ifp = sc->sc_ifp; 5417 u_int32_t scale; 5418 int error; 5419 5420 ath_hal_gettpscale(sc->sc_ah, &scale); 5421 error = sysctl_handle_int(oidp, &scale, 0, req); 5422 if (error || !req->newptr) 5423 return error; 5424 return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL : 5425 (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0; 5426 } 5427 5428 static int 5429 ath_sysctl_tpc(SYSCTL_HANDLER_ARGS) 5430 { 5431 struct ath_softc *sc = arg1; 5432 u_int tpc = ath_hal_gettpc(sc->sc_ah); 5433 int error; 5434 5435 error = sysctl_handle_int(oidp, &tpc, 0, req); 5436 if (error || !req->newptr) 5437 return error; 5438 return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0; 5439 } 5440 5441 static int 5442 ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS) 5443 { 5444 struct ath_softc *sc = arg1; 5445 struct ifnet *ifp = sc->sc_ifp; 5446 struct ath_hal *ah = sc->sc_ah; 5447 u_int rfkill = ath_hal_getrfkill(ah); 5448 int error; 5449 5450 error = sysctl_handle_int(oidp, &rfkill, 0, req); 5451 if (error || !req->newptr) 5452 return error; 5453 if (rfkill == ath_hal_getrfkill(ah)) /* unchanged */ 5454 return 0; 5455 if (!ath_hal_setrfkill(ah, rfkill)) 5456 return EINVAL; 5457 return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0; 5458 } 5459 5460 static int 5461 ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS) 5462 { 5463 struct ath_softc *sc = arg1; 5464 u_int rfsilent; 5465 int error; 5466 5467 ath_hal_getrfsilent(sc->sc_ah, &rfsilent); 5468 error = sysctl_handle_int(oidp, &rfsilent, 0, req); 5469 if (error || !req->newptr) 5470 return error; 5471 if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent)) 5472 return EINVAL; 5473 sc->sc_rfsilentpin = rfsilent & 0x1c; 5474 sc->sc_rfsilentpol = (rfsilent & 0x2) != 0; 5475 return 0; 5476 } 5477 5478 static int 5479 ath_sysctl_countrycode(SYSCTL_HANDLER_ARGS) 5480 { 5481 struct ath_softc *sc = arg1; 5482 u_int32_t cc = sc->sc_countrycode; 5483 struct ieee80211com *ic = &sc->sc_ic; 5484 int error; 5485 5486 error = sysctl_handle_int(oidp, &cc, 0, req); 5487 if (error || !req->newptr) 5488 return error; 5489 error = ath_getchannels(sc, sc->sc_regdomain, cc, 5490 sc->sc_outdoor, sc->sc_xchanmode); 5491 if (error != 0) 5492 return error; 5493 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); 5494 /* setcurmode? */ 5495 return 0; 5496 } 5497 5498 static int 5499 ath_sysctl_regdomain(SYSCTL_HANDLER_ARGS) 5500 { 5501 struct ath_softc *sc = arg1; 5502 u_int32_t rd = sc->sc_regdomain; 5503 struct ieee80211com *ic = &sc->sc_ic; 5504 int error; 5505 5506 error = sysctl_handle_int(oidp, &rd, 0, req); 5507 if (error || !req->newptr) 5508 return error; 5509 if (!ath_hal_setregdomain(sc->sc_ah, rd)) 5510 return EINVAL; 5511 error = ath_getchannels(sc, rd, sc->sc_countrycode, 5512 sc->sc_outdoor, sc->sc_xchanmode); 5513 if (error != 0) 5514 return error; 5515 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); 5516 /* setcurmode? */ 5517 return 0; 5518 } 5519 5520 static int 5521 ath_sysctl_tpack(SYSCTL_HANDLER_ARGS) 5522 { 5523 struct ath_softc *sc = arg1; 5524 u_int32_t tpack; 5525 int error; 5526 5527 ath_hal_gettpack(sc->sc_ah, &tpack); 5528 error = sysctl_handle_int(oidp, &tpack, 0, req); 5529 if (error || !req->newptr) 5530 return error; 5531 return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0; 5532 } 5533 5534 static int 5535 ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS) 5536 { 5537 struct ath_softc *sc = arg1; 5538 u_int32_t tpcts; 5539 int error; 5540 5541 ath_hal_gettpcts(sc->sc_ah, &tpcts); 5542 error = sysctl_handle_int(oidp, &tpcts, 0, req); 5543 if (error || !req->newptr) 5544 return error; 5545 return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0; 5546 } 5547 5548 static void 5549 ath_sysctlattach(struct ath_softc *sc) 5550 { 5551 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 5552 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 5553 struct ath_hal *ah = sc->sc_ah; 5554 5555 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5556 "countrycode", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5557 ath_sysctl_countrycode, "I", "country code"); 5558 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5559 "regdomain", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5560 ath_sysctl_regdomain, "I", "EEPROM regdomain code"); 5561 #ifdef ATH_DEBUG 5562 sc->sc_debug = ath_debug; 5563 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5564 "debug", CTLFLAG_RW, &sc->sc_debug, 0, 5565 "control debugging printfs"); 5566 #endif 5567 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5568 "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5569 ath_sysctl_slottime, "I", "802.11 slot time (us)"); 5570 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5571 "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5572 ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)"); 5573 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5574 "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5575 ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)"); 5576 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5577 "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5578 ath_sysctl_softled, "I", "enable/disable software LED support"); 5579 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5580 "ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0, 5581 "GPIO pin connected to LED"); 5582 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5583 "ledon", CTLFLAG_RW, &sc->sc_ledon, 0, 5584 "setting to turn LED on"); 5585 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5586 "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0, 5587 "idle time for inactivity LED (ticks)"); 5588 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5589 "txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5590 ath_sysctl_txantenna, "I", "antenna switch"); 5591 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5592 "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5593 ath_sysctl_rxantenna, "I", "default/rx antenna"); 5594 if (ath_hal_hasdiversity(ah)) 5595 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5596 "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5597 ath_sysctl_diversity, "I", "antenna diversity"); 5598 sc->sc_txintrperiod = ATH_TXINTR_PERIOD; 5599 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5600 "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0, 5601 "tx descriptor batching"); 5602 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5603 "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5604 ath_sysctl_diag, "I", "h/w diagnostic control"); 5605 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5606 "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5607 ath_sysctl_tpscale, "I", "tx power scaling"); 5608 if (ath_hal_hastpc(ah)) { 5609 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5610 "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5611 ath_sysctl_tpc, "I", "enable/disable per-packet TPC"); 5612 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5613 "tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5614 ath_sysctl_tpack, "I", "tx power for ack frames"); 5615 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5616 "tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5617 ath_sysctl_tpcts, "I", "tx power for cts frames"); 5618 } 5619 if (ath_hal_hasrfsilent(ah)) { 5620 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5621 "rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5622 ath_sysctl_rfsilent, "I", "h/w RF silent config"); 5623 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5624 "rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 5625 ath_sysctl_rfkill, "I", "enable/disable RF kill switch"); 5626 } 5627 sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC; 5628 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 5629 "monpass", CTLFLAG_RW, &sc->sc_monpass, 0, 5630 "mask of error frames to pass when monitoring"); 5631 } 5632 5633 static void 5634 ath_bpfattach(struct ath_softc *sc) 5635 { 5636 struct ifnet *ifp = sc->sc_ifp; 5637 5638 bpfattach2(ifp, DLT_IEEE802_11_RADIO, 5639 sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th), 5640 &sc->sc_drvbpf); 5641 /* 5642 * Initialize constant fields. 5643 * XXX make header lengths a multiple of 32-bits so subsequent 5644 * headers are properly aligned; this is a kludge to keep 5645 * certain applications happy. 5646 * 5647 * NB: the channel is setup each time we transition to the 5648 * RUN state to avoid filling it in for each frame. 5649 */ 5650 sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); 5651 sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); 5652 sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); 5653 5654 sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); 5655 sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len); 5656 sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); 5657 } 5658 5659 static int 5660 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 5661 struct ath_buf *bf, struct mbuf *m0, 5662 const struct ieee80211_bpf_params *params) 5663 { 5664 struct ieee80211com *ic = &sc->sc_ic; 5665 struct ath_hal *ah = sc->sc_ah; 5666 int error, ismcast, ismrr; 5667 int hdrlen, pktlen, try0, txantenna; 5668 u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3; 5669 struct ath_txq *txq; 5670 struct ieee80211_frame *wh; 5671 u_int flags, ctsduration; 5672 HAL_PKT_TYPE atype; 5673 const HAL_RATE_TABLE *rt; 5674 struct ath_desc *ds; 5675 u_int pri; 5676 5677 wh = mtod(m0, struct ieee80211_frame *); 5678 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 5679 hdrlen = ieee80211_anyhdrsize(wh); 5680 /* 5681 * Packet length must not include any 5682 * pad bytes; deduct them here. 5683 */ 5684 /* XXX honor IEEE80211_BPF_DATAPAD */ 5685 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 5686 5687 error = ath_tx_dmasetup(sc, bf, m0); 5688 if (error != 0) 5689 return error; 5690 m0 = bf->bf_m; /* NB: may have changed */ 5691 wh = mtod(m0, struct ieee80211_frame *); 5692 bf->bf_node = ni; /* NB: held reference */ 5693 5694 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 5695 flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 5696 if (params->ibp_flags & IEEE80211_BPF_RTS) 5697 flags |= HAL_TXDESC_RTSENA; 5698 else if (params->ibp_flags & IEEE80211_BPF_CTS) 5699 flags |= HAL_TXDESC_CTSENA; 5700 /* XXX leave ismcast to injector? */ 5701 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 5702 flags |= HAL_TXDESC_NOACK; 5703 5704 rt = sc->sc_currates; 5705 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 5706 rix = ath_tx_findrix(rt, params->ibp_rate0); 5707 txrate = rt->info[rix].rateCode; 5708 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 5709 txrate |= rt->info[rix].shortPreamble; 5710 sc->sc_txrate = txrate; 5711 try0 = params->ibp_try0; 5712 ismrr = (params->ibp_try1 != 0); 5713 txantenna = params->ibp_pri >> 2; 5714 if (txantenna == 0) /* XXX? */ 5715 txantenna = sc->sc_txantenna; 5716 ctsduration = 0; 5717 if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) { 5718 cix = ath_tx_findrix(rt, params->ibp_ctsrate); 5719 ctsrate = rt->info[cix].rateCode; 5720 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) { 5721 ctsrate |= rt->info[cix].shortPreamble; 5722 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 5723 ctsduration += rt->info[cix].spAckDuration; 5724 ctsduration += ath_hal_computetxtime(ah, 5725 rt, pktlen, rix, AH_TRUE); 5726 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 5727 ctsduration += rt->info[rix].spAckDuration; 5728 } else { 5729 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 5730 ctsduration += rt->info[cix].lpAckDuration; 5731 ctsduration += ath_hal_computetxtime(ah, 5732 rt, pktlen, rix, AH_FALSE); 5733 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 5734 ctsduration += rt->info[rix].lpAckDuration; 5735 } 5736 ismrr = 0; /* XXX */ 5737 } else 5738 ctsrate = 0; 5739 pri = params->ibp_pri & 3; 5740 /* 5741 * NB: we mark all packets as type PSPOLL so the h/w won't 5742 * set the sequence number, duration, etc. 5743 */ 5744 atype = HAL_PKT_TYPE_PSPOLL; 5745 5746 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 5747 ieee80211_dump_pkt(mtod(m0, caddr_t), m0->m_len, 5748 sc->sc_hwmap[txrate].ieeerate, -1); 5749 5750 if (bpf_peers_present(ic->ic_rawbpf)) 5751 bpf_mtap(ic->ic_rawbpf, m0); 5752 if (bpf_peers_present(sc->sc_drvbpf)) { 5753 u_int64_t tsf = ath_hal_gettsf64(ah); 5754 5755 sc->sc_tx_th.wt_tsf = htole64(tsf); 5756 sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; 5757 if (wh->i_fc[1] & IEEE80211_FC1_WEP) 5758 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 5759 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; 5760 sc->sc_tx_th.wt_txpower = ni->ni_txpower; 5761 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 5762 5763 bpf_mtap2(sc->sc_drvbpf, 5764 &sc->sc_tx_th, sc->sc_tx_th_len, m0); 5765 } 5766 5767 /* 5768 * Formulate first tx descriptor with tx controls. 5769 */ 5770 ds = bf->bf_desc; 5771 /* XXX check return value? */ 5772 ath_hal_setuptxdesc(ah, ds 5773 , pktlen /* packet length */ 5774 , hdrlen /* header length */ 5775 , atype /* Atheros packet type */ 5776 , params->ibp_power /* txpower */ 5777 , txrate, try0 /* series 0 rate/tries */ 5778 , HAL_TXKEYIX_INVALID /* key cache index */ 5779 , txantenna /* antenna mode */ 5780 , flags /* flags */ 5781 , ctsrate /* rts/cts rate */ 5782 , ctsduration /* rts/cts duration */ 5783 ); 5784 bf->bf_flags = flags; 5785 5786 if (ismrr) { 5787 rix = ath_tx_findrix(rt, params->ibp_rate1); 5788 rate1 = rt->info[rix].rateCode; 5789 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 5790 rate1 |= rt->info[rix].shortPreamble; 5791 if (params->ibp_try2) { 5792 rix = ath_tx_findrix(rt, params->ibp_rate2); 5793 rate2 = rt->info[rix].rateCode; 5794 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 5795 rate2 |= rt->info[rix].shortPreamble; 5796 } else 5797 rate2 = 0; 5798 if (params->ibp_try3) { 5799 rix = ath_tx_findrix(rt, params->ibp_rate3); 5800 rate3 = rt->info[rix].rateCode; 5801 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 5802 rate3 |= rt->info[rix].shortPreamble; 5803 } else 5804 rate3 = 0; 5805 ath_hal_setupxtxdesc(ah, ds 5806 , rate1, params->ibp_try1 /* series 1 */ 5807 , rate2, params->ibp_try2 /* series 2 */ 5808 , rate3, params->ibp_try3 /* series 3 */ 5809 ); 5810 } 5811 5812 /* 5813 * When servicing one or more stations in power-save mode 5814 * (or) if there is some mcast data waiting on the mcast 5815 * queue (to prevent out of order delivery) multicast 5816 * frames must be buffered until after the beacon. 5817 */ 5818 txq = sc->sc_ac2q[pri]; 5819 if (ismcast && (ic->ic_ps_sta || sc->sc_mcastq.axq_depth)) 5820 txq = &sc->sc_mcastq; 5821 ath_tx_handoff(sc, txq, bf); 5822 return 0; 5823 } 5824 5825 static int 5826 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 5827 const struct ieee80211_bpf_params *params) 5828 { 5829 struct ieee80211com *ic = ni->ni_ic; 5830 struct ifnet *ifp = ic->ic_ifp; 5831 struct ath_softc *sc = ifp->if_softc; 5832 struct ath_buf *bf; 5833 5834 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 5835 m_freem(m); 5836 return ENETDOWN; 5837 } 5838 /* 5839 * Grab a TX buffer and associated resources. 5840 */ 5841 ATH_TXBUF_LOCK(sc); 5842 bf = STAILQ_FIRST(&sc->sc_txbuf); 5843 if (bf != NULL) 5844 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); 5845 ATH_TXBUF_UNLOCK(sc); 5846 if (bf == NULL) { 5847 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n", 5848 __func__); 5849 sc->sc_stats.ast_tx_qstop++; 5850 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 5851 m_freem(m); 5852 return ENOBUFS; 5853 } 5854 5855 ifp->if_opackets++; 5856 sc->sc_stats.ast_tx_raw++; 5857 5858 if (params == NULL) { 5859 /* 5860 * Legacy path; interpret frame contents to decide 5861 * precisely how to send the frame. 5862 */ 5863 if (ath_tx_start(sc, ni, bf, m)) 5864 goto bad; 5865 } else { 5866 /* 5867 * Caller supplied explicit parameters to use in 5868 * sending the frame. 5869 */ 5870 if (ath_tx_raw_start(sc, ni, bf, m, params)) 5871 goto bad; 5872 } 5873 sc->sc_tx_timer = 5; 5874 ifp->if_timer = 1; 5875 5876 return 0; 5877 bad: 5878 ifp->if_oerrors++; 5879 ATH_TXBUF_LOCK(sc); 5880 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 5881 ATH_TXBUF_UNLOCK(sc); 5882 ieee80211_free_node(ni); 5883 return EIO; /* XXX */ 5884 } 5885 5886 /* 5887 * Announce various information on device/driver attach. 5888 */ 5889 static void 5890 ath_announce(struct ath_softc *sc) 5891 { 5892 #define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) 5893 struct ifnet *ifp = sc->sc_ifp; 5894 struct ath_hal *ah = sc->sc_ah; 5895 u_int modes, cc; 5896 5897 if_printf(ifp, "mac %d.%d phy %d.%d", 5898 ah->ah_macVersion, ah->ah_macRev, 5899 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 5900 /* 5901 * Print radio revision(s). We check the wireless modes 5902 * to avoid falsely printing revs for inoperable parts. 5903 * Dual-band radio revs are returned in the 5Ghz rev number. 5904 */ 5905 ath_hal_getcountrycode(ah, &cc); 5906 modes = ath_hal_getwirelessmodes(ah, cc); 5907 if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { 5908 if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) 5909 printf(" 5ghz radio %d.%d 2ghz radio %d.%d", 5910 ah->ah_analog5GhzRev >> 4, 5911 ah->ah_analog5GhzRev & 0xf, 5912 ah->ah_analog2GhzRev >> 4, 5913 ah->ah_analog2GhzRev & 0xf); 5914 else 5915 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 5916 ah->ah_analog5GhzRev & 0xf); 5917 } else 5918 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, 5919 ah->ah_analog5GhzRev & 0xf); 5920 printf("\n"); 5921 if (bootverbose) { 5922 int i; 5923 for (i = 0; i <= WME_AC_VO; i++) { 5924 struct ath_txq *txq = sc->sc_ac2q[i]; 5925 if_printf(ifp, "Use hw queue %u for %s traffic\n", 5926 txq->axq_qnum, ieee80211_wme_acnames[i]); 5927 } 5928 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 5929 sc->sc_cabq->axq_qnum); 5930 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 5931 } 5932 if (ath_rxbuf != ATH_RXBUF) 5933 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 5934 if (ath_txbuf != ATH_TXBUF) 5935 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 5936 #undef HAL_MODE_DUALBAND 5937 } 5938