1 /* $OpenBSD: ath.c,v 1.125 2023/11/10 15:51:20 bluhm Exp $ */
2 /* $NetBSD: ath.c,v 1.37 2004/08/18 21:59:39 dyoung Exp $ */
3
4 /*-
5 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 * redistribution must be conditioned upon including a substantially
17 * similar Disclaimer requirement for further binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * NO WARRANTY
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGES.
34 */
35
36 /*
37 * Driver for the Atheros Wireless LAN controller.
38 *
39 * This software is derived from work of Atsushi Onoe; his contribution
40 * is greatly appreciated. It has been modified for OpenBSD to use an
41 * open source HAL instead of the original binary-only HAL.
42 */
43
44 #include "bpfilter.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/mbuf.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/device.h>
55 #include <sys/errno.h>
56 #include <sys/timeout.h>
57 #include <sys/gpio.h>
58 #include <sys/endian.h>
59
60 #include <machine/bus.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70
71 #include <net80211/ieee80211_var.h>
72 #include <net80211/ieee80211_rssadapt.h>
73
74 #include <dev/pci/pcidevs.h>
75 #include <dev/gpio/gpiovar.h>
76
77 #include <dev/ic/athvar.h>
78
79 int ath_init(struct ifnet *);
80 int ath_init1(struct ath_softc *);
81 int ath_intr1(struct ath_softc *);
82 void ath_stop(struct ifnet *);
83 void ath_start(struct ifnet *);
84 void ath_reset(struct ath_softc *, int);
85 int ath_media_change(struct ifnet *);
86 void ath_watchdog(struct ifnet *);
87 int ath_ioctl(struct ifnet *, u_long, caddr_t);
88 void ath_fatal_proc(void *, int);
89 void ath_rxorn_proc(void *, int);
90 void ath_bmiss_proc(void *, int);
91 int ath_initkeytable(struct ath_softc *);
92 void ath_mcastfilter_accum(caddr_t, u_int32_t (*)[2]);
93 void ath_mcastfilter_compute(struct ath_softc *, u_int32_t (*)[2]);
94 u_int32_t ath_calcrxfilter(struct ath_softc *);
95 void ath_mode_init(struct ath_softc *);
96 #ifndef IEEE80211_STA_ONLY
97 int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
98 void ath_beacon_proc(void *, int);
99 void ath_beacon_free(struct ath_softc *);
100 #endif
101 void ath_beacon_config(struct ath_softc *);
102 int ath_desc_alloc(struct ath_softc *);
103 void ath_desc_free(struct ath_softc *);
104 struct ieee80211_node *ath_node_alloc(struct ieee80211com *);
105 struct mbuf *ath_getmbuf(int, int, u_int);
106 void ath_node_free(struct ieee80211com *, struct ieee80211_node *);
107 void ath_node_copy(struct ieee80211com *,
108 struct ieee80211_node *, const struct ieee80211_node *);
109 u_int8_t ath_node_getrssi(struct ieee80211com *,
110 const struct ieee80211_node *);
111 int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
112 void ath_rx_proc(void *, int);
113 int ath_tx_start(struct ath_softc *, struct ieee80211_node *,
114 struct ath_buf *, struct mbuf *);
115 void ath_tx_proc(void *, int);
116 int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
117 void ath_draintxq(struct ath_softc *);
118 void ath_stoprecv(struct ath_softc *);
119 int ath_startrecv(struct ath_softc *);
120 void ath_next_scan(void *);
121 int ath_set_slot_time(struct ath_softc *);
122 void ath_calibrate(void *);
123 void ath_ledstate(struct ath_softc *, enum ieee80211_state);
124 int ath_newstate(struct ieee80211com *, enum ieee80211_state, int);
125 void ath_newassoc(struct ieee80211com *,
126 struct ieee80211_node *, int);
127 int ath_getchannels(struct ath_softc *, HAL_BOOL outdoor,
128 HAL_BOOL xchanmode);
129 int ath_rate_setup(struct ath_softc *sc, u_int mode);
130 void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
131 void ath_rssadapt_updatenode(void *, struct ieee80211_node *);
132 void ath_rssadapt_updatestats(void *);
133 #ifndef IEEE80211_STA_ONLY
134 void ath_recv_mgmt(struct ieee80211com *, struct mbuf *,
135 struct ieee80211_node *, struct ieee80211_rxinfo *, int);
136 #endif
137 void ath_disable(struct ath_softc *);
138
139 int ath_gpio_attach(struct ath_softc *, u_int16_t);
140 int ath_gpio_pin_read(void *, int);
141 void ath_gpio_pin_write(void *, int, int);
142 void ath_gpio_pin_ctl(void *, int, int);
143
144 #ifdef AR_DEBUG
145 void ath_printrxbuf(struct ath_buf *, int);
146 void ath_printtxbuf(struct ath_buf *, int);
147 int ath_debug = 0;
148 #endif
149
150 int ath_dwelltime = 200; /* 5 channels/second */
151 int ath_calinterval = 30; /* calibrate every 30 secs */
152 int ath_outdoor = AH_TRUE; /* outdoor operation */
153 int ath_xchanmode = AH_TRUE; /* enable extended channels */
154 int ath_softcrypto = 1; /* 1=enable software crypto */
155
156 struct cfdriver ath_cd = {
157 NULL, "ath", DV_IFNET
158 };
159
160 int
ath_activate(struct device * self,int act)161 ath_activate(struct device *self, int act)
162 {
163 struct ath_softc *sc = (struct ath_softc *)self;
164 struct ifnet *ifp = &sc->sc_ic.ic_if;
165
166 switch (act) {
167 case DVACT_SUSPEND:
168 if (ifp->if_flags & IFF_RUNNING) {
169 ath_stop(ifp);
170 if (sc->sc_power != NULL)
171 (*sc->sc_power)(sc, act);
172 }
173 break;
174 case DVACT_RESUME:
175 if (ifp->if_flags & IFF_UP) {
176 ath_init(ifp);
177 if (ifp->if_flags & IFF_RUNNING)
178 ath_start(ifp);
179 }
180 break;
181 }
182 return 0;
183 }
184
185 int
ath_enable(struct ath_softc * sc)186 ath_enable(struct ath_softc *sc)
187 {
188 if (ATH_IS_ENABLED(sc) == 0) {
189 if (sc->sc_enable != NULL && (*sc->sc_enable)(sc) != 0) {
190 printf("%s: device enable failed\n",
191 sc->sc_dev.dv_xname);
192 return (EIO);
193 }
194 sc->sc_flags |= ATH_ENABLED;
195 }
196 return (0);
197 }
198
199 void
ath_disable(struct ath_softc * sc)200 ath_disable(struct ath_softc *sc)
201 {
202 if (!ATH_IS_ENABLED(sc))
203 return;
204 if (sc->sc_disable != NULL)
205 (*sc->sc_disable)(sc);
206 sc->sc_flags &= ~ATH_ENABLED;
207 }
208
209 int
ath_attach(u_int16_t devid,struct ath_softc * sc)210 ath_attach(u_int16_t devid, struct ath_softc *sc)
211 {
212 struct ieee80211com *ic = &sc->sc_ic;
213 struct ifnet *ifp = &ic->ic_if;
214 struct ath_hal *ah;
215 HAL_STATUS status;
216 HAL_TXQ_INFO qinfo;
217 int error = 0, i;
218
219 DPRINTF(ATH_DEBUG_ANY, ("%s: devid 0x%x\n", __func__, devid));
220
221 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
222 sc->sc_flags &= ~ATH_ATTACHED; /* make sure that it's not attached */
223
224 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
225 sc->sc_pcie, &status);
226 if (ah == NULL) {
227 printf("%s: unable to attach hardware; HAL status %d\n",
228 ifp->if_xname, status);
229 error = ENXIO;
230 goto bad;
231 }
232 if (ah->ah_abi != HAL_ABI_VERSION) {
233 printf("%s: HAL ABI mismatch detected (0x%x != 0x%x)\n",
234 ifp->if_xname, ah->ah_abi, HAL_ABI_VERSION);
235 error = ENXIO;
236 goto bad;
237 }
238
239 if (ah->ah_single_chip == AH_TRUE) {
240 printf("%s: AR%s %u.%u phy %u.%u rf %u.%u", ifp->if_xname,
241 ar5k_printver(AR5K_VERSION_DEV, devid),
242 ah->ah_mac_version, ah->ah_mac_revision,
243 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf,
244 ah->ah_radio_5ghz_revision >> 4,
245 ah->ah_radio_5ghz_revision & 0xf);
246 } else {
247 printf("%s: AR%s %u.%u phy %u.%u", ifp->if_xname,
248 ar5k_printver(AR5K_VERSION_VER, ah->ah_mac_srev),
249 ah->ah_mac_version, ah->ah_mac_revision,
250 ah->ah_phy_revision >> 4, ah->ah_phy_revision & 0xf);
251 printf(" rf%s %u.%u",
252 ar5k_printver(AR5K_VERSION_RAD, ah->ah_radio_5ghz_revision),
253 ah->ah_radio_5ghz_revision >> 4,
254 ah->ah_radio_5ghz_revision & 0xf);
255 if (ah->ah_radio_2ghz_revision != 0) {
256 printf(" rf%s %u.%u",
257 ar5k_printver(AR5K_VERSION_RAD,
258 ah->ah_radio_2ghz_revision),
259 ah->ah_radio_2ghz_revision >> 4,
260 ah->ah_radio_2ghz_revision & 0xf);
261 }
262 }
263 if (ah->ah_ee_version == AR5K_EEPROM_VERSION_4_7)
264 printf(" eeprom 4.7");
265 else
266 printf(" eeprom %1x.%1x", ah->ah_ee_version >> 12,
267 ah->ah_ee_version & 0xff);
268
269 #if 0
270 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_UNSUPP ||
271 ah->ah_radio_2ghz_revision >= AR5K_SREV_RAD_UNSUPP) {
272 printf(": RF radio not supported\n");
273 error = EOPNOTSUPP;
274 goto bad;
275 }
276 #endif
277
278 sc->sc_ah = ah;
279 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
280
281 /*
282 * Get regulation domain either stored in the EEPROM or defined
283 * as the default value. Some devices are known to have broken
284 * regulation domain values in their EEPROM.
285 */
286 ath_hal_get_regdomain(ah, &ah->ah_regdomain);
287
288 /*
289 * Construct channel list based on the current regulation domain.
290 */
291 error = ath_getchannels(sc, ath_outdoor, ath_xchanmode);
292 if (error != 0)
293 goto bad;
294
295 /*
296 * Setup rate tables for all potential media types.
297 */
298 ath_rate_setup(sc, IEEE80211_MODE_11A);
299 ath_rate_setup(sc, IEEE80211_MODE_11B);
300 ath_rate_setup(sc, IEEE80211_MODE_11G);
301
302 error = ath_desc_alloc(sc);
303 if (error != 0) {
304 printf(": failed to allocate descriptors: %d\n", error);
305 goto bad;
306 }
307 timeout_set(&sc->sc_scan_to, ath_next_scan, sc);
308 timeout_set(&sc->sc_cal_to, ath_calibrate, sc);
309 timeout_set(&sc->sc_rssadapt_to, ath_rssadapt_updatestats, sc);
310
311 ATH_TASK_INIT(&sc->sc_txtask, ath_tx_proc, sc);
312 ATH_TASK_INIT(&sc->sc_rxtask, ath_rx_proc, sc);
313 ATH_TASK_INIT(&sc->sc_rxorntask, ath_rxorn_proc, sc);
314 ATH_TASK_INIT(&sc->sc_fataltask, ath_fatal_proc, sc);
315 ATH_TASK_INIT(&sc->sc_bmisstask, ath_bmiss_proc, sc);
316 #ifndef IEEE80211_STA_ONLY
317 ATH_TASK_INIT(&sc->sc_swbatask, ath_beacon_proc, sc);
318 #endif
319
320 /*
321 * For now just pre-allocate one data queue and one
322 * beacon queue. Note that the HAL handles resetting
323 * them at the needed time. Eventually we'll want to
324 * allocate more tx queues for splitting management
325 * frames and for QOS support.
326 */
327 sc->sc_bhalq = ath_hal_setup_tx_queue(ah, HAL_TX_QUEUE_BEACON, NULL);
328 if (sc->sc_bhalq == (u_int) -1) {
329 printf(": unable to setup a beacon xmit queue!\n");
330 goto bad2;
331 }
332
333 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) {
334 bzero(&qinfo, sizeof(qinfo));
335 qinfo.tqi_type = HAL_TX_QUEUE_DATA;
336 qinfo.tqi_subtype = i; /* should be mapped to WME types */
337 sc->sc_txhalq[i] = ath_hal_setup_tx_queue(ah,
338 HAL_TX_QUEUE_DATA, &qinfo);
339 if (sc->sc_txhalq[i] == (u_int) -1) {
340 printf(": unable to setup a data xmit queue %u!\n", i);
341 goto bad2;
342 }
343 }
344
345 ifp->if_softc = sc;
346 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
347 ifp->if_start = ath_start;
348 ifp->if_watchdog = ath_watchdog;
349 ifp->if_ioctl = ath_ioctl;
350 ifq_init_maxlen(&ifp->if_snd, ATH_TXBUF * ATH_TXDESC);
351
352 ic->ic_softc = sc;
353 ic->ic_newassoc = ath_newassoc;
354 /* XXX not right but it's not used anywhere important */
355 ic->ic_phytype = IEEE80211_T_OFDM;
356 ic->ic_opmode = IEEE80211_M_STA;
357 ic->ic_caps = IEEE80211_C_WEP /* wep supported */
358 | IEEE80211_C_PMGT /* power management */
359 #ifndef IEEE80211_STA_ONLY
360 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
361 | IEEE80211_C_HOSTAP /* hostap mode */
362 #endif
363 | IEEE80211_C_MONITOR /* monitor mode */
364 | IEEE80211_C_SHSLOT /* short slot time supported */
365 | IEEE80211_C_SHPREAMBLE; /* short preamble supported */
366 if (ath_softcrypto)
367 ic->ic_caps |= IEEE80211_C_RSN; /* wpa/rsn supported */
368
369 /*
370 * Not all chips have the VEOL support we want to use with
371 * IBSS beacon; check here for it.
372 */
373 sc->sc_veol = ath_hal_has_veol(ah);
374
375 /* get mac address from hardware */
376 ath_hal_get_lladdr(ah, ic->ic_myaddr);
377
378 if_attach(ifp);
379
380 /* call MI attach routine. */
381 ieee80211_ifattach(ifp);
382
383 /* override default methods */
384 ic->ic_node_alloc = ath_node_alloc;
385 sc->sc_node_free = ic->ic_node_free;
386 ic->ic_node_free = ath_node_free;
387 sc->sc_node_copy = ic->ic_node_copy;
388 ic->ic_node_copy = ath_node_copy;
389 ic->ic_node_getrssi = ath_node_getrssi;
390 sc->sc_newstate = ic->ic_newstate;
391 ic->ic_newstate = ath_newstate;
392 #ifndef IEEE80211_STA_ONLY
393 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
394 ic->ic_recv_mgmt = ath_recv_mgmt;
395 #endif
396 ic->ic_max_rssi = AR5K_MAX_RSSI;
397 bcopy(etherbroadcastaddr, sc->sc_broadcast_addr, IEEE80211_ADDR_LEN);
398
399 /* complete initialization */
400 ieee80211_media_init(ifp, ath_media_change, ieee80211_media_status);
401
402 #if NBPFILTER > 0
403 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
404 sizeof(struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
405
406 sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
407 bzero(&sc->sc_rxtapu, sc->sc_rxtap_len);
408 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
409 sc->sc_rxtap.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT);
410
411 sc->sc_txtap_len = sizeof(sc->sc_txtapu);
412 bzero(&sc->sc_txtapu, sc->sc_txtap_len);
413 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
414 sc->sc_txtap.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT);
415 #endif
416
417 sc->sc_flags |= ATH_ATTACHED;
418
419 /*
420 * Print regulation domain and the mac address. The regulation domain
421 * will be marked with a * if the EEPROM value has been overwritten.
422 */
423 printf(", %s%s, address %s\n",
424 ieee80211_regdomain2name(ah->ah_regdomain),
425 ah->ah_regdomain != ah->ah_regdomain_hw ? "*" : "",
426 ether_sprintf(ic->ic_myaddr));
427
428 if (ath_gpio_attach(sc, devid) == 0)
429 sc->sc_flags |= ATH_GPIO;
430
431 return 0;
432 bad2:
433 ath_desc_free(sc);
434 bad:
435 if (ah)
436 ath_hal_detach(ah);
437 sc->sc_invalid = 1;
438 return error;
439 }
440
441 int
ath_detach(struct ath_softc * sc,int flags)442 ath_detach(struct ath_softc *sc, int flags)
443 {
444 struct ifnet *ifp = &sc->sc_ic.ic_if;
445 int s;
446
447 if ((sc->sc_flags & ATH_ATTACHED) == 0)
448 return (0);
449
450 config_detach_children(&sc->sc_dev, flags);
451
452 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags %x\n", __func__, ifp->if_flags));
453
454 timeout_del(&sc->sc_scan_to);
455 timeout_del(&sc->sc_cal_to);
456 timeout_del(&sc->sc_rssadapt_to);
457
458 s = splnet();
459 ath_stop(ifp);
460 ath_desc_free(sc);
461 ath_hal_detach(sc->sc_ah);
462
463 ieee80211_ifdetach(ifp);
464 if_detach(ifp);
465
466 splx(s);
467
468 return 0;
469 }
470
471 int
ath_intr(void * arg)472 ath_intr(void *arg)
473 {
474 return ath_intr1((struct ath_softc *)arg);
475 }
476
477 int
ath_intr1(struct ath_softc * sc)478 ath_intr1(struct ath_softc *sc)
479 {
480 struct ieee80211com *ic = &sc->sc_ic;
481 struct ifnet *ifp = &ic->ic_if;
482 struct ath_hal *ah = sc->sc_ah;
483 HAL_INT status;
484
485 if (sc->sc_invalid) {
486 /*
487 * The hardware is not ready/present, don't touch anything.
488 * Note this can happen early on if the IRQ is shared.
489 */
490 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid; ignored\n", __func__));
491 return 0;
492 }
493 if (!ath_hal_is_intr_pending(ah)) /* shared irq, not for us */
494 return 0;
495 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) {
496 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n",
497 __func__, ifp->if_flags));
498 ath_hal_get_isr(ah, &status); /* clear ISR */
499 ath_hal_set_intr(ah, 0); /* disable further intr's */
500 return 1; /* XXX */
501 }
502 ath_hal_get_isr(ah, &status); /* NB: clears ISR too */
503 DPRINTF(ATH_DEBUG_INTR, ("%s: status 0x%x\n", __func__, status));
504 status &= sc->sc_imask; /* discard unasked for bits */
505 if (status & HAL_INT_FATAL) {
506 sc->sc_stats.ast_hardware++;
507 ath_hal_set_intr(ah, 0); /* disable intr's until reset */
508 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_fataltask);
509 } else if (status & HAL_INT_RXORN) {
510 sc->sc_stats.ast_rxorn++;
511 ath_hal_set_intr(ah, 0); /* disable intr's until reset */
512 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxorntask);
513 } else if (status & HAL_INT_MIB) {
514 DPRINTF(ATH_DEBUG_INTR,
515 ("%s: resetting MIB counters\n", __func__));
516 sc->sc_stats.ast_mib++;
517 ath_hal_update_mib_counters(ah, &sc->sc_mib_stats);
518 } else {
519 if (status & HAL_INT_RXEOL) {
520 /*
521 * NB: the hardware should re-read the link when
522 * RXE bit is written, but it doesn't work at
523 * least on older hardware revs.
524 */
525 sc->sc_stats.ast_rxeol++;
526 sc->sc_rxlink = NULL;
527 }
528 if (status & HAL_INT_TXURN) {
529 sc->sc_stats.ast_txurn++;
530 /* bump tx trigger level */
531 ath_hal_update_tx_triglevel(ah, AH_TRUE);
532 }
533 if (status & HAL_INT_RX)
534 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_rxtask);
535 if (status & HAL_INT_TX)
536 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_txtask);
537 if (status & HAL_INT_SWBA)
538 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_swbatask);
539 if (status & HAL_INT_BMISS) {
540 sc->sc_stats.ast_bmiss++;
541 ATH_TASK_RUN_OR_ENQUEUE(&sc->sc_bmisstask);
542 }
543 }
544 return 1;
545 }
546
547 void
ath_fatal_proc(void * arg,int pending)548 ath_fatal_proc(void *arg, int pending)
549 {
550 struct ath_softc *sc = arg;
551 struct ieee80211com *ic = &sc->sc_ic;
552 struct ifnet *ifp = &ic->ic_if;
553
554 if (ifp->if_flags & IFF_DEBUG)
555 printf("%s: hardware error; resetting\n", ifp->if_xname);
556 ath_reset(sc, 1);
557 }
558
559 void
ath_rxorn_proc(void * arg,int pending)560 ath_rxorn_proc(void *arg, int pending)
561 {
562 struct ath_softc *sc = arg;
563 struct ieee80211com *ic = &sc->sc_ic;
564 struct ifnet *ifp = &ic->ic_if;
565
566 if (ifp->if_flags & IFF_DEBUG)
567 printf("%s: rx FIFO overrun; resetting\n", ifp->if_xname);
568 ath_reset(sc, 1);
569 }
570
571 void
ath_bmiss_proc(void * arg,int pending)572 ath_bmiss_proc(void *arg, int pending)
573 {
574 struct ath_softc *sc = arg;
575 struct ieee80211com *ic = &sc->sc_ic;
576
577 DPRINTF(ATH_DEBUG_ANY, ("%s: pending %u\n", __func__, pending));
578 if (ic->ic_opmode != IEEE80211_M_STA)
579 return;
580 if (ic->ic_state == IEEE80211_S_RUN) {
581 /*
582 * Rather than go directly to scan state, try to
583 * reassociate first. If that fails then the state
584 * machine will drop us into scanning after timing
585 * out waiting for a probe response.
586 */
587 ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1);
588 }
589 }
590
591 int
ath_init(struct ifnet * ifp)592 ath_init(struct ifnet *ifp)
593 {
594 return ath_init1((struct ath_softc *)ifp->if_softc);
595 }
596
597 int
ath_init1(struct ath_softc * sc)598 ath_init1(struct ath_softc *sc)
599 {
600 struct ieee80211com *ic = &sc->sc_ic;
601 struct ifnet *ifp = &ic->ic_if;
602 struct ieee80211_node *ni;
603 enum ieee80211_phymode mode;
604 struct ath_hal *ah = sc->sc_ah;
605 HAL_STATUS status;
606 HAL_CHANNEL hchan;
607 int error = 0, s;
608
609 DPRINTF(ATH_DEBUG_ANY, ("%s: if_flags 0x%x\n",
610 __func__, ifp->if_flags));
611
612 if ((error = ath_enable(sc)) != 0)
613 return error;
614
615 s = splnet();
616 /*
617 * Stop anything previously setup. This is safe
618 * whether this is the first time through or not.
619 */
620 ath_stop(ifp);
621
622 /*
623 * Reset the link layer address to the latest value.
624 */
625 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
626 ath_hal_set_lladdr(ah, ic->ic_myaddr);
627
628 /*
629 * The basic interface to setting the hardware in a good
630 * state is ``reset''. On return the hardware is known to
631 * be powered up and with interrupts disabled. This must
632 * be followed by initialization of the appropriate bits
633 * and then setup of the interrupt mask.
634 */
635 hchan.channel = ic->ic_ibss_chan->ic_freq;
636 hchan.channelFlags = ic->ic_ibss_chan->ic_flags;
637 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) {
638 printf("%s: unable to reset hardware; hal status %u\n",
639 ifp->if_xname, status);
640 error = EIO;
641 goto done;
642 }
643 ath_set_slot_time(sc);
644
645 if ((error = ath_initkeytable(sc)) != 0) {
646 printf("%s: unable to reset the key cache\n",
647 ifp->if_xname);
648 goto done;
649 }
650
651 if ((error = ath_startrecv(sc)) != 0) {
652 printf("%s: unable to start recv logic\n", ifp->if_xname);
653 goto done;
654 }
655
656 /*
657 * Enable interrupts.
658 */
659 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
660 | HAL_INT_RXEOL | HAL_INT_RXORN
661 | HAL_INT_FATAL | HAL_INT_GLOBAL;
662 #ifndef IEEE80211_STA_ONLY
663 if (ic->ic_opmode == IEEE80211_M_HOSTAP)
664 sc->sc_imask |= HAL_INT_MIB;
665 #endif
666 ath_hal_set_intr(ah, sc->sc_imask);
667
668 ifp->if_flags |= IFF_RUNNING;
669 ic->ic_state = IEEE80211_S_INIT;
670
671 /*
672 * The hardware should be ready to go now so it's safe
673 * to kick the 802.11 state machine as it's likely to
674 * immediately call back to us to send mgmt frames.
675 */
676 ni = ic->ic_bss;
677 ni->ni_chan = ic->ic_ibss_chan;
678 mode = ieee80211_chan2mode(ic, ni->ni_chan);
679 if (mode != sc->sc_curmode)
680 ath_setcurmode(sc, mode);
681 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
682 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
683 } else {
684 ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
685 }
686 done:
687 splx(s);
688 return error;
689 }
690
691 void
ath_stop(struct ifnet * ifp)692 ath_stop(struct ifnet *ifp)
693 {
694 struct ieee80211com *ic = (struct ieee80211com *) ifp;
695 struct ath_softc *sc = ifp->if_softc;
696 struct ath_hal *ah = sc->sc_ah;
697 int s;
698
699 DPRINTF(ATH_DEBUG_ANY, ("%s: invalid %u if_flags 0x%x\n",
700 __func__, sc->sc_invalid, ifp->if_flags));
701
702 s = splnet();
703 if (ifp->if_flags & IFF_RUNNING) {
704 /*
705 * Shutdown the hardware and driver:
706 * disable interrupts
707 * turn off timers
708 * clear transmit machinery
709 * clear receive machinery
710 * drain and release tx queues
711 * reclaim beacon resources
712 * reset 802.11 state machine
713 * power down hardware
714 *
715 * Note that some of this work is not possible if the
716 * hardware is gone (invalid).
717 */
718 ifp->if_flags &= ~IFF_RUNNING;
719 ifp->if_timer = 0;
720 if (!sc->sc_invalid)
721 ath_hal_set_intr(ah, 0);
722 ath_draintxq(sc);
723 if (!sc->sc_invalid) {
724 ath_stoprecv(sc);
725 } else {
726 sc->sc_rxlink = NULL;
727 }
728 ifq_purge(&ifp->if_snd);
729 #ifndef IEEE80211_STA_ONLY
730 ath_beacon_free(sc);
731 #endif
732 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
733 if (!sc->sc_invalid) {
734 ath_hal_set_power(ah, HAL_PM_FULL_SLEEP, 0);
735 }
736 ath_disable(sc);
737 }
738 splx(s);
739 }
740
741 /*
742 * Reset the hardware w/o losing operational state. This is
743 * basically a more efficient way of doing ath_stop, ath_init,
744 * followed by state transitions to the current 802.11
745 * operational state. Used to recover from errors rx overrun
746 * and to reset the hardware when rf gain settings must be reset.
747 */
748 void
ath_reset(struct ath_softc * sc,int full)749 ath_reset(struct ath_softc *sc, int full)
750 {
751 struct ieee80211com *ic = &sc->sc_ic;
752 struct ifnet *ifp = &ic->ic_if;
753 struct ath_hal *ah = sc->sc_ah;
754 struct ieee80211_channel *c;
755 HAL_STATUS status;
756 HAL_CHANNEL hchan;
757
758 /*
759 * Convert to a HAL channel description.
760 */
761 c = ic->ic_ibss_chan;
762 hchan.channel = c->ic_freq;
763 hchan.channelFlags = c->ic_flags;
764
765 ath_hal_set_intr(ah, 0); /* disable interrupts */
766 ath_draintxq(sc); /* stop xmit side */
767 ath_stoprecv(sc); /* stop recv side */
768 /* NB: indicate channel change so we do a full reset */
769 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan,
770 full ? AH_TRUE : AH_FALSE, &status)) {
771 printf("%s: %s: unable to reset hardware; hal status %u\n",
772 ifp->if_xname, __func__, status);
773 }
774 ath_set_slot_time(sc);
775 /* In case channel changed, save as a node channel */
776 ic->ic_bss->ni_chan = ic->ic_ibss_chan;
777 ath_hal_set_intr(ah, sc->sc_imask);
778 if (ath_startrecv(sc) != 0) /* restart recv */
779 printf("%s: %s: unable to start recv logic\n", ifp->if_xname,
780 __func__);
781 ath_start(ifp); /* restart xmit */
782 if (ic->ic_state == IEEE80211_S_RUN)
783 ath_beacon_config(sc); /* restart beacons */
784 }
785
786 void
ath_start(struct ifnet * ifp)787 ath_start(struct ifnet *ifp)
788 {
789 struct ath_softc *sc = ifp->if_softc;
790 struct ath_hal *ah = sc->sc_ah;
791 struct ieee80211com *ic = &sc->sc_ic;
792 struct ieee80211_node *ni;
793 struct ath_buf *bf;
794 struct mbuf *m;
795 struct ieee80211_frame *wh;
796 int s;
797
798 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd) ||
799 sc->sc_invalid)
800 return;
801 for (;;) {
802 /*
803 * Grab a TX buffer and associated resources.
804 */
805 s = splnet();
806 bf = TAILQ_FIRST(&sc->sc_txbuf);
807 if (bf != NULL)
808 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
809 splx(s);
810 if (bf == NULL) {
811 DPRINTF(ATH_DEBUG_ANY, ("%s: out of xmit buffers\n",
812 __func__));
813 sc->sc_stats.ast_tx_qstop++;
814 ifq_set_oactive(&ifp->if_snd);
815 break;
816 }
817 /*
818 * Poll the management queue for frames; they
819 * have priority over normal data frames.
820 */
821 m = mq_dequeue(&ic->ic_mgtq);
822 if (m == NULL) {
823 /*
824 * No data frames go out unless we're associated.
825 */
826 if (ic->ic_state != IEEE80211_S_RUN) {
827 DPRINTF(ATH_DEBUG_ANY,
828 ("%s: ignore data packet, state %u\n",
829 __func__, ic->ic_state));
830 sc->sc_stats.ast_tx_discard++;
831 s = splnet();
832 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
833 splx(s);
834 break;
835 }
836 m = ifq_dequeue(&ifp->if_snd);
837 if (m == NULL) {
838 s = splnet();
839 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
840 splx(s);
841 break;
842 }
843
844 #if NBPFILTER > 0
845 if (ifp->if_bpf)
846 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
847 #endif
848
849 /*
850 * Encapsulate the packet in prep for transmission.
851 */
852 m = ieee80211_encap(ifp, m, &ni);
853 if (m == NULL) {
854 DPRINTF(ATH_DEBUG_ANY,
855 ("%s: encapsulation failure\n",
856 __func__));
857 sc->sc_stats.ast_tx_encap++;
858 goto bad;
859 }
860 wh = mtod(m, struct ieee80211_frame *);
861 } else {
862 ni = m->m_pkthdr.ph_cookie;
863
864 wh = mtod(m, struct ieee80211_frame *);
865 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
866 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
867 /* fill time stamp */
868 u_int64_t tsf;
869 u_int32_t *tstamp;
870
871 tsf = ath_hal_get_tsf64(ah);
872 /* XXX: adjust 100us delay to xmit */
873 tsf += 100;
874 tstamp = (u_int32_t *)&wh[1];
875 tstamp[0] = htole32(tsf & 0xffffffff);
876 tstamp[1] = htole32(tsf >> 32);
877 }
878 sc->sc_stats.ast_tx_mgmt++;
879 }
880
881 if (ath_tx_start(sc, ni, bf, m)) {
882 bad:
883 s = splnet();
884 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
885 splx(s);
886 ifp->if_oerrors++;
887 if (ni != NULL)
888 ieee80211_release_node(ic, ni);
889 continue;
890 }
891
892 sc->sc_tx_timer = 5;
893 ifp->if_timer = 1;
894 }
895 }
896
897 int
ath_media_change(struct ifnet * ifp)898 ath_media_change(struct ifnet *ifp)
899 {
900 int error;
901
902 error = ieee80211_media_change(ifp);
903 if (error == ENETRESET) {
904 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) ==
905 (IFF_RUNNING|IFF_UP))
906 ath_init(ifp); /* XXX lose error */
907 error = 0;
908 }
909 return error;
910 }
911
912 void
ath_watchdog(struct ifnet * ifp)913 ath_watchdog(struct ifnet *ifp)
914 {
915 struct ath_softc *sc = ifp->if_softc;
916
917 ifp->if_timer = 0;
918 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid)
919 return;
920 if (sc->sc_tx_timer) {
921 if (--sc->sc_tx_timer == 0) {
922 printf("%s: device timeout\n", ifp->if_xname);
923 ath_reset(sc, 1);
924 ifp->if_oerrors++;
925 sc->sc_stats.ast_watchdog++;
926 return;
927 }
928 ifp->if_timer = 1;
929 }
930
931 ieee80211_watchdog(ifp);
932 }
933
934 int
ath_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)935 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
936 {
937 struct ath_softc *sc = ifp->if_softc;
938 struct ieee80211com *ic = &sc->sc_ic;
939 struct ifreq *ifr = (struct ifreq *)data;
940 int error = 0, s;
941
942 s = splnet();
943 switch (cmd) {
944 case SIOCSIFADDR:
945 ifp->if_flags |= IFF_UP;
946 /* FALLTHROUGH */
947 case SIOCSIFFLAGS:
948 if (ifp->if_flags & IFF_UP) {
949 if (ifp->if_flags & IFF_RUNNING) {
950 /*
951 * To avoid rescanning another access point,
952 * do not call ath_init() here. Instead,
953 * only reflect promisc mode settings.
954 */
955 ath_mode_init(sc);
956 } else {
957 /*
958 * Beware of being called during detach to
959 * reset promiscuous mode. In that case we
960 * will still be marked UP but not RUNNING.
961 * However trying to re-init the interface
962 * is the wrong thing to do as we've already
963 * torn down much of our state. There's
964 * probably a better way to deal with this.
965 */
966 if (!sc->sc_invalid)
967 ath_init(ifp); /* XXX lose error */
968 }
969 } else
970 ath_stop(ifp);
971 break;
972 case SIOCADDMULTI:
973 case SIOCDELMULTI:
974 error = (cmd == SIOCADDMULTI) ?
975 ether_addmulti(ifr, &sc->sc_ic.ic_ac) :
976 ether_delmulti(ifr, &sc->sc_ic.ic_ac);
977 if (error == ENETRESET) {
978 if (ifp->if_flags & IFF_RUNNING)
979 ath_mode_init(sc);
980 error = 0;
981 }
982 break;
983 case SIOCGATHSTATS:
984 error = copyout(&sc->sc_stats,
985 ifr->ifr_data, sizeof (sc->sc_stats));
986 break;
987 default:
988 error = ieee80211_ioctl(ifp, cmd, data);
989 if (error == ENETRESET) {
990 if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) ==
991 (IFF_RUNNING|IFF_UP)) {
992 if (ic->ic_opmode != IEEE80211_M_MONITOR)
993 ath_init(ifp); /* XXX lose error */
994 else
995 ath_reset(sc, 1);
996 }
997 error = 0;
998 }
999 break;
1000 }
1001 splx(s);
1002 return error;
1003 }
1004
1005 /*
1006 * Fill the hardware key cache with key entries.
1007 */
1008 int
ath_initkeytable(struct ath_softc * sc)1009 ath_initkeytable(struct ath_softc *sc)
1010 {
1011 struct ieee80211com *ic = &sc->sc_ic;
1012 struct ath_hal *ah = sc->sc_ah;
1013 int i;
1014
1015 if (ath_softcrypto) {
1016 /*
1017 * Disable the hardware crypto engine and reset the key cache
1018 * to allow software crypto operation for WEP/RSN/WPA2
1019 */
1020 if (ic->ic_flags & (IEEE80211_F_WEPON|IEEE80211_F_RSNON))
1021 (void)ath_hal_softcrypto(ah, AH_TRUE);
1022 else
1023 (void)ath_hal_softcrypto(ah, AH_FALSE);
1024 return (0);
1025 }
1026
1027 /* WEP is disabled, we only support WEP in hardware yet */
1028 if ((ic->ic_flags & IEEE80211_F_WEPON) == 0)
1029 return (0);
1030
1031 /*
1032 * Setup the hardware after reset: the key cache is filled as
1033 * needed and the receive engine is set going. Frame transmit
1034 * is handled entirely in the frame output path; there's nothing
1035 * to do here except setup the interrupt mask.
1036 */
1037
1038 /* XXX maybe should reset all keys when !WEPON */
1039 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1040 struct ieee80211_key *k = &ic->ic_nw_keys[i];
1041 if (k->k_len == 0)
1042 ath_hal_reset_key(ah, i);
1043 else {
1044 HAL_KEYVAL hk;
1045
1046 bzero(&hk, sizeof(hk));
1047 /*
1048 * Pad the key to a supported key length. It
1049 * is always a good idea to use full-length
1050 * keys without padded zeros but this seems
1051 * to be the default behaviour used by many
1052 * implementations.
1053 */
1054 if (k->k_cipher == IEEE80211_CIPHER_WEP40)
1055 hk.wk_len = AR5K_KEYVAL_LENGTH_40;
1056 else if (k->k_cipher == IEEE80211_CIPHER_WEP104)
1057 hk.wk_len = AR5K_KEYVAL_LENGTH_104;
1058 else
1059 return (EINVAL);
1060 bcopy(k->k_key, hk.wk_key, hk.wk_len);
1061
1062 if (ath_hal_set_key(ah, i, &hk) != AH_TRUE)
1063 return (EINVAL);
1064 }
1065 }
1066
1067 return (0);
1068 }
1069
1070 void
ath_mcastfilter_accum(caddr_t dl,u_int32_t (* mfilt)[2])1071 ath_mcastfilter_accum(caddr_t dl, u_int32_t (*mfilt)[2])
1072 {
1073 u_int32_t val;
1074 u_int8_t pos;
1075
1076 val = LE_READ_4(dl + 0);
1077 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1078 val = LE_READ_4(dl + 3);
1079 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1080 pos &= 0x3f;
1081 (*mfilt)[pos / 32] |= (1 << (pos % 32));
1082 }
1083
1084 void
ath_mcastfilter_compute(struct ath_softc * sc,u_int32_t (* mfilt)[2])1085 ath_mcastfilter_compute(struct ath_softc *sc, u_int32_t (*mfilt)[2])
1086 {
1087 struct arpcom *ac = &sc->sc_ic.ic_ac;
1088 struct ifnet *ifp = &sc->sc_ic.ic_if;
1089 struct ether_multi *enm;
1090 struct ether_multistep estep;
1091
1092 if (ac->ac_multirangecnt > 0) {
1093 /* XXX Punt on ranges. */
1094 (*mfilt)[0] = (*mfilt)[1] = ~((u_int32_t)0);
1095 ifp->if_flags |= IFF_ALLMULTI;
1096 return;
1097 }
1098
1099 ETHER_FIRST_MULTI(estep, ac, enm);
1100 while (enm != NULL) {
1101 ath_mcastfilter_accum(enm->enm_addrlo, mfilt);
1102 ETHER_NEXT_MULTI(estep, enm);
1103 }
1104 ifp->if_flags &= ~IFF_ALLMULTI;
1105 }
1106
1107 /*
1108 * Calculate the receive filter according to the
1109 * operating mode and state:
1110 *
1111 * o always accept unicast, broadcast, and multicast traffic
1112 * o maintain current state of phy error reception
1113 * o probe request frames are accepted only when operating in
1114 * hostap, adhoc, or monitor modes
1115 * o enable promiscuous mode according to the interface state
1116 * o accept beacons:
1117 * - when operating in adhoc mode so the 802.11 layer creates
1118 * node table entries for peers,
1119 * - when operating in station mode for collecting rssi data when
1120 * the station is otherwise quiet, or
1121 * - when scanning
1122 */
1123 u_int32_t
ath_calcrxfilter(struct ath_softc * sc)1124 ath_calcrxfilter(struct ath_softc *sc)
1125 {
1126 struct ieee80211com *ic = &sc->sc_ic;
1127 struct ath_hal *ah = sc->sc_ah;
1128 struct ifnet *ifp = &ic->ic_if;
1129 u_int32_t rfilt;
1130
1131 rfilt = (ath_hal_get_rx_filter(ah) & HAL_RX_FILTER_PHYERR)
1132 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
1133 if (ic->ic_opmode != IEEE80211_M_STA)
1134 rfilt |= HAL_RX_FILTER_PROBEREQ;
1135 #ifndef IEEE80211_STA_ONLY
1136 if (ic->ic_opmode != IEEE80211_M_AHDEMO)
1137 #endif
1138 rfilt |= HAL_RX_FILTER_BEACON;
1139 if (ifp->if_flags & IFF_PROMISC)
1140 rfilt |= HAL_RX_FILTER_PROM;
1141 return rfilt;
1142 }
1143
1144 void
ath_mode_init(struct ath_softc * sc)1145 ath_mode_init(struct ath_softc *sc)
1146 {
1147 struct ath_hal *ah = sc->sc_ah;
1148 u_int32_t rfilt, mfilt[2];
1149
1150 /* configure rx filter */
1151 rfilt = ath_calcrxfilter(sc);
1152 ath_hal_set_rx_filter(ah, rfilt);
1153
1154 /* configure operational mode */
1155 ath_hal_set_opmode(ah);
1156
1157 /* calculate and install multicast filter */
1158 mfilt[0] = mfilt[1] = 0;
1159 ath_mcastfilter_compute(sc, &mfilt);
1160 ath_hal_set_mcast_filter(ah, mfilt[0], mfilt[1]);
1161 DPRINTF(ATH_DEBUG_MODE, ("%s: RX filter 0x%x, MC filter %08x:%08x\n",
1162 __func__, rfilt, mfilt[0], mfilt[1]));
1163 }
1164
1165 struct mbuf *
ath_getmbuf(int flags,int type,u_int pktlen)1166 ath_getmbuf(int flags, int type, u_int pktlen)
1167 {
1168 struct mbuf *m;
1169
1170 KASSERT(pktlen <= MCLBYTES, ("802.11 packet too large: %u", pktlen));
1171 MGETHDR(m, flags, type);
1172 if (m != NULL && pktlen > MHLEN) {
1173 MCLGET(m, flags);
1174 if ((m->m_flags & M_EXT) == 0) {
1175 m_free(m);
1176 m = NULL;
1177 }
1178 }
1179 return m;
1180 }
1181
1182 #ifndef IEEE80211_STA_ONLY
1183 int
ath_beacon_alloc(struct ath_softc * sc,struct ieee80211_node * ni)1184 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
1185 {
1186 struct ieee80211com *ic = &sc->sc_ic;
1187 struct ath_hal *ah = sc->sc_ah;
1188 struct ath_buf *bf;
1189 struct ath_desc *ds;
1190 struct mbuf *m;
1191 int error;
1192 u_int8_t rate;
1193 const HAL_RATE_TABLE *rt;
1194 u_int flags = 0;
1195
1196 bf = sc->sc_bcbuf;
1197 if (bf->bf_m != NULL) {
1198 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
1199 m_freem(bf->bf_m);
1200 bf->bf_m = NULL;
1201 bf->bf_node = NULL;
1202 }
1203 /*
1204 * NB: the beacon data buffer must be 32-bit aligned;
1205 * we assume the mbuf routines will return us something
1206 * with this alignment (perhaps should assert).
1207 */
1208 m = ieee80211_beacon_alloc(ic, ni);
1209 if (m == NULL) {
1210 DPRINTF(ATH_DEBUG_BEACON, ("%s: cannot get mbuf/cluster\n",
1211 __func__));
1212 sc->sc_stats.ast_be_nombuf++;
1213 return ENOMEM;
1214 }
1215
1216 DPRINTF(ATH_DEBUG_BEACON, ("%s: m %p len %u\n", __func__, m, m->m_len));
1217 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m,
1218 BUS_DMA_NOWAIT);
1219 if (error != 0) {
1220 m_freem(m);
1221 return error;
1222 }
1223 KASSERT(bf->bf_nseg == 1,
1224 ("%s: multi-segment packet; nseg %u", __func__, bf->bf_nseg));
1225 bf->bf_m = m;
1226
1227 /* setup descriptors */
1228 ds = bf->bf_desc;
1229 bzero(ds, sizeof(struct ath_desc));
1230
1231 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol) {
1232 ds->ds_link = bf->bf_daddr; /* link to self */
1233 flags |= HAL_TXDESC_VEOL;
1234 } else {
1235 ds->ds_link = 0;
1236 }
1237 ds->ds_data = bf->bf_segs[0].ds_addr;
1238
1239 DPRINTF(ATH_DEBUG_ANY, ("%s: segaddr %p seglen %u\n", __func__,
1240 (caddr_t)bf->bf_segs[0].ds_addr, (u_int)bf->bf_segs[0].ds_len));
1241
1242 /*
1243 * Calculate rate code.
1244 * XXX everything at min xmit rate
1245 */
1246 rt = sc->sc_currates;
1247 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1248 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
1249 rate = rt->info[0].rateCode | rt->info[0].shortPreamble;
1250 } else {
1251 rate = rt->info[0].rateCode;
1252 }
1253
1254 flags = HAL_TXDESC_NOACK;
1255 if (ic->ic_opmode == IEEE80211_M_IBSS)
1256 flags |= HAL_TXDESC_VEOL;
1257
1258 if (!ath_hal_setup_tx_desc(ah, ds
1259 , m->m_pkthdr.len + IEEE80211_CRC_LEN /* packet length */
1260 , sizeof(struct ieee80211_frame) /* header length */
1261 , HAL_PKT_TYPE_BEACON /* Atheros packet type */
1262 , 60 /* txpower XXX */
1263 , rate, 1 /* series 0 rate/tries */
1264 , HAL_TXKEYIX_INVALID /* no encryption */
1265 , 0 /* antenna mode */
1266 , flags /* no ack for beacons */
1267 , 0 /* rts/cts rate */
1268 , 0 /* rts/cts duration */
1269 )) {
1270 printf("%s: ath_hal_setup_tx_desc failed\n", __func__);
1271 return -1;
1272 }
1273 /* NB: beacon's BufLen must be a multiple of 4 bytes */
1274 /* XXX verify mbuf data area covers this roundup */
1275 if (!ath_hal_fill_tx_desc(ah, ds
1276 , roundup(bf->bf_segs[0].ds_len, 4) /* buffer length */
1277 , AH_TRUE /* first segment */
1278 , AH_TRUE /* last segment */
1279 )) {
1280 printf("%s: ath_hal_fill_tx_desc failed\n", __func__);
1281 return -1;
1282 }
1283
1284 /* XXX it is not appropriate to bus_dmamap_sync? -dcy */
1285
1286 return 0;
1287 }
1288
1289 void
ath_beacon_proc(void * arg,int pending)1290 ath_beacon_proc(void *arg, int pending)
1291 {
1292 struct ath_softc *sc = arg;
1293 struct ieee80211com *ic = &sc->sc_ic;
1294 struct ath_buf *bf = sc->sc_bcbuf;
1295 struct ath_hal *ah = sc->sc_ah;
1296
1297 DPRINTF(ATH_DEBUG_BEACON_PROC, ("%s: pending %u\n", __func__, pending));
1298 if (ic->ic_opmode == IEEE80211_M_STA ||
1299 bf == NULL || bf->bf_m == NULL) {
1300 DPRINTF(ATH_DEBUG_ANY, ("%s: ic_flags=%x bf=%p bf_m=%p\n",
1301 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL));
1302 return;
1303 }
1304 /* TODO: update beacon to reflect PS poll state */
1305 if (!ath_hal_stop_tx_dma(ah, sc->sc_bhalq)) {
1306 DPRINTF(ATH_DEBUG_ANY, ("%s: beacon queue %u did not stop?\n",
1307 __func__, sc->sc_bhalq));
1308 }
1309 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,
1310 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1311
1312 ath_hal_put_tx_buf(ah, sc->sc_bhalq, bf->bf_daddr);
1313 ath_hal_tx_start(ah, sc->sc_bhalq);
1314 DPRINTF(ATH_DEBUG_BEACON_PROC,
1315 ("%s: TXDP%u = %p (%p)\n", __func__,
1316 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc));
1317 }
1318
1319 void
ath_beacon_free(struct ath_softc * sc)1320 ath_beacon_free(struct ath_softc *sc)
1321 {
1322 struct ath_buf *bf = sc->sc_bcbuf;
1323
1324 if (bf->bf_m != NULL) {
1325 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
1326 m_freem(bf->bf_m);
1327 bf->bf_m = NULL;
1328 bf->bf_node = NULL;
1329 }
1330 }
1331 #endif /* IEEE80211_STA_ONLY */
1332
1333 /*
1334 * Configure the beacon and sleep timers.
1335 *
1336 * When operating as an AP this resets the TSF and sets
1337 * up the hardware to notify us when we need to issue beacons.
1338 *
1339 * When operating in station mode this sets up the beacon
1340 * timers according to the timestamp of the last received
1341 * beacon and the current TSF, configures PCF and DTIM
1342 * handling, programs the sleep registers so the hardware
1343 * will wakeup in time to receive beacons, and configures
1344 * the beacon miss handling so we'll receive a BMISS
1345 * interrupt when we stop seeing beacons from the AP
1346 * we've associated with.
1347 */
1348 void
ath_beacon_config(struct ath_softc * sc)1349 ath_beacon_config(struct ath_softc *sc)
1350 {
1351 #define MS_TO_TU(x) (((x) * 1000) / 1024)
1352 struct ath_hal *ah = sc->sc_ah;
1353 struct ieee80211com *ic = &sc->sc_ic;
1354 struct ieee80211_node *ni = ic->ic_bss;
1355 u_int32_t nexttbtt, intval;
1356
1357 nexttbtt = (LE_READ_4(ni->ni_tstamp + 4) << 22) |
1358 (LE_READ_4(ni->ni_tstamp) >> 10);
1359 intval = MAX(1, ni->ni_intval) & HAL_BEACON_PERIOD;
1360 if (nexttbtt == 0) { /* e.g. for ap mode */
1361 nexttbtt = intval;
1362 } else if (intval) {
1363 nexttbtt = roundup(nexttbtt, intval);
1364 }
1365 DPRINTF(ATH_DEBUG_BEACON, ("%s: intval %u nexttbtt %u\n",
1366 __func__, ni->ni_intval, nexttbtt));
1367 if (ic->ic_opmode == IEEE80211_M_STA) {
1368 HAL_BEACON_STATE bs;
1369
1370 /* NB: no PCF support right now */
1371 bzero(&bs, sizeof(bs));
1372 bs.bs_intval = intval;
1373 bs.bs_nexttbtt = nexttbtt;
1374 bs.bs_dtimperiod = bs.bs_intval;
1375 bs.bs_nextdtim = nexttbtt;
1376 /*
1377 * Calculate the number of consecutive beacons to miss
1378 * before taking a BMISS interrupt.
1379 * Note that we clamp the result to at most 7 beacons.
1380 */
1381 bs.bs_bmissthreshold = ic->ic_bmissthres;
1382 if (bs.bs_bmissthreshold > 7) {
1383 bs.bs_bmissthreshold = 7;
1384 } else if (bs.bs_bmissthreshold <= 0) {
1385 bs.bs_bmissthreshold = 1;
1386 }
1387
1388 /*
1389 * Calculate sleep duration. The configuration is
1390 * given in ms. We insure a multiple of the beacon
1391 * period is used. Also, if the sleep duration is
1392 * greater than the DTIM period then it makes senses
1393 * to make it a multiple of that.
1394 *
1395 * XXX fixed at 100ms
1396 */
1397 bs.bs_sleepduration =
1398 roundup(MS_TO_TU(100), bs.bs_intval);
1399 if (bs.bs_sleepduration > bs.bs_dtimperiod) {
1400 bs.bs_sleepduration =
1401 roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
1402 }
1403
1404 DPRINTF(ATH_DEBUG_BEACON,
1405 ("%s: intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u"
1406 " sleep %u\n"
1407 , __func__
1408 , bs.bs_intval
1409 , bs.bs_nexttbtt
1410 , bs.bs_dtimperiod
1411 , bs.bs_nextdtim
1412 , bs.bs_bmissthreshold
1413 , bs.bs_sleepduration
1414 ));
1415 ath_hal_set_intr(ah, 0);
1416 ath_hal_set_beacon_timers(ah, &bs, 0/*XXX*/, 0, 0);
1417 sc->sc_imask |= HAL_INT_BMISS;
1418 ath_hal_set_intr(ah, sc->sc_imask);
1419 }
1420 #ifndef IEEE80211_STA_ONLY
1421 else {
1422 ath_hal_set_intr(ah, 0);
1423 if (nexttbtt == intval)
1424 intval |= HAL_BEACON_RESET_TSF;
1425 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1426 /*
1427 * In IBSS mode enable the beacon timers but only
1428 * enable SWBA interrupts if we need to manually
1429 * prepare beacon frames. Otherwise we use a
1430 * self-linked tx descriptor and let the hardware
1431 * deal with things.
1432 */
1433 intval |= HAL_BEACON_ENA;
1434 if (!sc->sc_veol)
1435 sc->sc_imask |= HAL_INT_SWBA;
1436 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
1437 /*
1438 * In AP mode we enable the beacon timers and
1439 * SWBA interrupts to prepare beacon frames.
1440 */
1441 intval |= HAL_BEACON_ENA;
1442 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
1443 }
1444 ath_hal_init_beacon(ah, nexttbtt, intval);
1445 ath_hal_set_intr(ah, sc->sc_imask);
1446 /*
1447 * When using a self-linked beacon descriptor in IBBS
1448 * mode load it once here.
1449 */
1450 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_veol)
1451 ath_beacon_proc(sc, 0);
1452 }
1453 #endif
1454 }
1455
1456 int
ath_desc_alloc(struct ath_softc * sc)1457 ath_desc_alloc(struct ath_softc *sc)
1458 {
1459 int i, bsize, error = -1;
1460 struct ath_desc *ds;
1461 struct ath_buf *bf;
1462
1463 /* allocate descriptors */
1464 sc->sc_desc_len = sizeof(struct ath_desc) *
1465 (ATH_TXBUF * ATH_TXDESC + ATH_RXBUF + 1);
1466 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_desc_len, PAGE_SIZE,
1467 0, &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) {
1468 printf("%s: unable to allocate control data, error = %d\n",
1469 sc->sc_dev.dv_xname, error);
1470 goto fail0;
1471 }
1472
1473 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg,
1474 sc->sc_desc_len, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT)) != 0) {
1475 printf("%s: unable to map control data, error = %d\n",
1476 sc->sc_dev.dv_xname, error);
1477 goto fail1;
1478 }
1479
1480 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_desc_len, 1,
1481 sc->sc_desc_len, 0, 0, &sc->sc_ddmamap)) != 0) {
1482 printf("%s: unable to create control data DMA map, "
1483 "error = %d\n", sc->sc_dev.dv_xname, error);
1484 goto fail2;
1485 }
1486
1487 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc,
1488 sc->sc_desc_len, NULL, 0)) != 0) {
1489 printf("%s: unable to load control data DMA map, error = %d\n",
1490 sc->sc_dev.dv_xname, error);
1491 goto fail3;
1492 }
1493
1494 ds = sc->sc_desc;
1495 sc->sc_desc_paddr = sc->sc_ddmamap->dm_segs[0].ds_addr;
1496
1497 DPRINTF(ATH_DEBUG_XMIT_DESC|ATH_DEBUG_RECV_DESC,
1498 ("ath_desc_alloc: DMA map: %p (%lu) -> %p (%lu)\n",
1499 ds, (u_long)sc->sc_desc_len,
1500 (caddr_t) sc->sc_desc_paddr, /*XXX*/ (u_long) sc->sc_desc_len));
1501
1502 /* allocate buffers */
1503 bsize = sizeof(struct ath_buf) * (ATH_TXBUF + ATH_RXBUF + 1);
1504 bf = malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO);
1505 if (bf == NULL) {
1506 printf("%s: unable to allocate Tx/Rx buffers\n",
1507 sc->sc_dev.dv_xname);
1508 error = ENOMEM;
1509 goto fail3;
1510 }
1511 sc->sc_bufptr = bf;
1512
1513 TAILQ_INIT(&sc->sc_rxbuf);
1514 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++) {
1515 bf->bf_desc = ds;
1516 bf->bf_daddr = sc->sc_desc_paddr +
1517 ((caddr_t)ds - (caddr_t)sc->sc_desc);
1518 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1519 MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) {
1520 printf("%s: unable to create Rx dmamap, error = %d\n",
1521 sc->sc_dev.dv_xname, error);
1522 goto fail4;
1523 }
1524 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
1525 }
1526
1527 TAILQ_INIT(&sc->sc_txbuf);
1528 for (i = 0; i < ATH_TXBUF; i++, bf++, ds += ATH_TXDESC) {
1529 bf->bf_desc = ds;
1530 bf->bf_daddr = sc->sc_desc_paddr +
1531 ((caddr_t)ds - (caddr_t)sc->sc_desc);
1532 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1533 ATH_TXDESC, MCLBYTES, 0, 0, &bf->bf_dmamap)) != 0) {
1534 printf("%s: unable to create Tx dmamap, error = %d\n",
1535 sc->sc_dev.dv_xname, error);
1536 goto fail5;
1537 }
1538 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
1539 }
1540 TAILQ_INIT(&sc->sc_txq);
1541
1542 /* beacon buffer */
1543 bf->bf_desc = ds;
1544 bf->bf_daddr = sc->sc_desc_paddr + ((caddr_t)ds - (caddr_t)sc->sc_desc);
1545 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0,
1546 &bf->bf_dmamap)) != 0) {
1547 printf("%s: unable to create beacon dmamap, error = %d\n",
1548 sc->sc_dev.dv_xname, error);
1549 goto fail5;
1550 }
1551 sc->sc_bcbuf = bf;
1552 return 0;
1553
1554 fail5:
1555 for (i = ATH_RXBUF; i < ATH_RXBUF + ATH_TXBUF; i++) {
1556 if (sc->sc_bufptr[i].bf_dmamap == NULL)
1557 continue;
1558 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap);
1559 }
1560 fail4:
1561 for (i = 0; i < ATH_RXBUF; i++) {
1562 if (sc->sc_bufptr[i].bf_dmamap == NULL)
1563 continue;
1564 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufptr[i].bf_dmamap);
1565 }
1566 fail3:
1567 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap);
1568 fail2:
1569 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap);
1570 sc->sc_ddmamap = NULL;
1571 fail1:
1572 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, sc->sc_desc_len);
1573 fail0:
1574 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg);
1575 return error;
1576 }
1577
1578 void
ath_desc_free(struct ath_softc * sc)1579 ath_desc_free(struct ath_softc *sc)
1580 {
1581 struct ath_buf *bf;
1582
1583 bus_dmamap_unload(sc->sc_dmat, sc->sc_ddmamap);
1584 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap);
1585 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg);
1586
1587 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) {
1588 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
1589 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
1590 m_freem(bf->bf_m);
1591 }
1592 TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list)
1593 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
1594 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
1595 if (bf->bf_m) {
1596 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
1597 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
1598 m_freem(bf->bf_m);
1599 bf->bf_m = NULL;
1600 }
1601 }
1602 if (sc->sc_bcbuf != NULL) {
1603 bus_dmamap_unload(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap);
1604 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bcbuf->bf_dmamap);
1605 sc->sc_bcbuf = NULL;
1606 }
1607
1608 TAILQ_INIT(&sc->sc_rxbuf);
1609 TAILQ_INIT(&sc->sc_txbuf);
1610 TAILQ_INIT(&sc->sc_txq);
1611 free(sc->sc_bufptr, M_DEVBUF, 0);
1612 sc->sc_bufptr = NULL;
1613 }
1614
1615 struct ieee80211_node *
ath_node_alloc(struct ieee80211com * ic)1616 ath_node_alloc(struct ieee80211com *ic)
1617 {
1618 struct ath_node *an;
1619
1620 an = malloc(sizeof(*an), M_DEVBUF, M_NOWAIT | M_ZERO);
1621 if (an) {
1622 int i;
1623 for (i = 0; i < ATH_RHIST_SIZE; i++)
1624 an->an_rx_hist[i].arh_ticks = ATH_RHIST_NOTIME;
1625 an->an_rx_hist_next = ATH_RHIST_SIZE-1;
1626 return &an->an_node;
1627 } else
1628 return NULL;
1629 }
1630
1631 void
ath_node_free(struct ieee80211com * ic,struct ieee80211_node * ni)1632 ath_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1633 {
1634 struct ath_softc *sc = ic->ic_if.if_softc;
1635 struct ath_buf *bf;
1636
1637 TAILQ_FOREACH(bf, &sc->sc_txq, bf_list) {
1638 if (bf->bf_node == ni)
1639 bf->bf_node = NULL;
1640 }
1641 (*sc->sc_node_free)(ic, ni);
1642 }
1643
1644 void
ath_node_copy(struct ieee80211com * ic,struct ieee80211_node * dst,const struct ieee80211_node * src)1645 ath_node_copy(struct ieee80211com *ic,
1646 struct ieee80211_node *dst, const struct ieee80211_node *src)
1647 {
1648 struct ath_softc *sc = ic->ic_if.if_softc;
1649
1650 bcopy(&src[1], &dst[1],
1651 sizeof(struct ath_node) - sizeof(struct ieee80211_node));
1652 (*sc->sc_node_copy)(ic, dst, src);
1653 }
1654
1655 u_int8_t
ath_node_getrssi(struct ieee80211com * ic,const struct ieee80211_node * ni)1656 ath_node_getrssi(struct ieee80211com *ic, const struct ieee80211_node *ni)
1657 {
1658 const struct ath_node *an = ATH_NODE(ni);
1659 int i, now, nsamples, rssi;
1660
1661 /*
1662 * Calculate the average over the last second of sampled data.
1663 */
1664 now = ATH_TICKS();
1665 nsamples = 0;
1666 rssi = 0;
1667 i = an->an_rx_hist_next;
1668 do {
1669 const struct ath_recv_hist *rh = &an->an_rx_hist[i];
1670 if (rh->arh_ticks == ATH_RHIST_NOTIME)
1671 goto done;
1672 if (now - rh->arh_ticks > hz)
1673 goto done;
1674 rssi += rh->arh_rssi;
1675 nsamples++;
1676 if (i == 0) {
1677 i = ATH_RHIST_SIZE-1;
1678 } else {
1679 i--;
1680 }
1681 } while (i != an->an_rx_hist_next);
1682 done:
1683 /*
1684 * Return either the average or the last known
1685 * value if there is no recent data.
1686 */
1687 return (nsamples ? rssi / nsamples : an->an_rx_hist[i].arh_rssi);
1688 }
1689
1690 int
ath_rxbuf_init(struct ath_softc * sc,struct ath_buf * bf)1691 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
1692 {
1693 struct ath_hal *ah = sc->sc_ah;
1694 int error;
1695 struct mbuf *m;
1696 struct ath_desc *ds;
1697
1698 m = bf->bf_m;
1699 if (m == NULL) {
1700 /*
1701 * NB: by assigning a page to the rx dma buffer we
1702 * implicitly satisfy the Atheros requirement that
1703 * this buffer be cache-line-aligned and sized to be
1704 * multiple of the cache line size. Not doing this
1705 * causes weird stuff to happen (for the 5210 at least).
1706 */
1707 m = ath_getmbuf(M_DONTWAIT, MT_DATA, MCLBYTES);
1708 if (m == NULL) {
1709 DPRINTF(ATH_DEBUG_ANY,
1710 ("%s: no mbuf/cluster\n", __func__));
1711 sc->sc_stats.ast_rx_nombuf++;
1712 return ENOMEM;
1713 }
1714 bf->bf_m = m;
1715 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1716
1717 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m,
1718 BUS_DMA_NOWAIT);
1719 if (error != 0) {
1720 DPRINTF(ATH_DEBUG_ANY,
1721 ("%s: ath_bus_dmamap_load_mbuf failed;"
1722 " error %d\n", __func__, error));
1723 sc->sc_stats.ast_rx_busdma++;
1724 return error;
1725 }
1726 KASSERT(bf->bf_nseg == 1,
1727 ("ath_rxbuf_init: multi-segment packet; nseg %u",
1728 bf->bf_nseg));
1729 }
1730 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,
1731 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1732
1733 /*
1734 * Setup descriptors. For receive we always terminate
1735 * the descriptor list with a self-linked entry so we'll
1736 * not get overrun under high load (as can happen with a
1737 * 5212 when ANI processing enables PHY errors).
1738 *
1739 * To insure the last descriptor is self-linked we create
1740 * each descriptor as self-linked and add it to the end. As
1741 * each additional descriptor is added the previous self-linked
1742 * entry is ``fixed'' naturally. This should be safe even
1743 * if DMA is happening. When processing RX interrupts we
1744 * never remove/process the last, self-linked, entry on the
1745 * descriptor list. This insures the hardware always has
1746 * someplace to write a new frame.
1747 */
1748 ds = bf->bf_desc;
1749 bzero(ds, sizeof(struct ath_desc));
1750 #ifndef IEEE80211_STA_ONLY
1751 if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP)
1752 ds->ds_link = bf->bf_daddr; /* link to self */
1753 #endif
1754 ds->ds_data = bf->bf_segs[0].ds_addr;
1755 ath_hal_setup_rx_desc(ah, ds
1756 , m->m_len /* buffer size */
1757 , 0
1758 );
1759
1760 if (sc->sc_rxlink != NULL)
1761 *sc->sc_rxlink = bf->bf_daddr;
1762 sc->sc_rxlink = &ds->ds_link;
1763 return 0;
1764 }
1765
1766 void
ath_rx_proc(void * arg,int npending)1767 ath_rx_proc(void *arg, int npending)
1768 {
1769 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1770 #define PA2DESC(_sc, _pa) \
1771 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \
1772 ((_pa) - (_sc)->sc_desc_paddr)))
1773 struct ath_softc *sc = arg;
1774 struct ath_buf *bf;
1775 struct ieee80211com *ic = &sc->sc_ic;
1776 struct ifnet *ifp = &ic->ic_if;
1777 struct ath_hal *ah = sc->sc_ah;
1778 struct ath_desc *ds;
1779 struct mbuf *m;
1780 struct ieee80211_frame *wh;
1781 struct ieee80211_frame whbuf;
1782 struct ieee80211_rxinfo rxi;
1783 struct ieee80211_node *ni;
1784 struct ath_node *an;
1785 struct ath_recv_hist *rh;
1786 int len;
1787 u_int phyerr;
1788 HAL_STATUS status;
1789
1790 DPRINTF(ATH_DEBUG_RX_PROC, ("%s: pending %u\n", __func__, npending));
1791 do {
1792 bf = TAILQ_FIRST(&sc->sc_rxbuf);
1793 if (bf == NULL) { /* NB: shouldn't happen */
1794 printf("%s: ath_rx_proc: no buffer!\n", ifp->if_xname);
1795 break;
1796 }
1797 ds = bf->bf_desc;
1798 if (ds->ds_link == bf->bf_daddr) {
1799 /* NB: never process the self-linked entry at the end */
1800 break;
1801 }
1802 m = bf->bf_m;
1803 if (m == NULL) { /* NB: shouldn't happen */
1804 printf("%s: ath_rx_proc: no mbuf!\n", ifp->if_xname);
1805 continue;
1806 }
1807 /* XXX sync descriptor memory */
1808 /*
1809 * Must provide the virtual address of the current
1810 * descriptor, the physical address, and the virtual
1811 * address of the next descriptor in the h/w chain.
1812 * This allows the HAL to look ahead to see if the
1813 * hardware is done with a descriptor by checking the
1814 * done bit in the following descriptor and the address
1815 * of the current descriptor the DMA engine is working
1816 * on. All this is necessary because of our use of
1817 * a self-linked list to avoid rx overruns.
1818 */
1819 status = ath_hal_proc_rx_desc(ah, ds,
1820 bf->bf_daddr, PA2DESC(sc, ds->ds_link));
1821 #ifdef AR_DEBUG
1822 if (ath_debug & ATH_DEBUG_RECV_DESC)
1823 ath_printrxbuf(bf, status == HAL_OK);
1824 #endif
1825 if (status == HAL_EINPROGRESS)
1826 break;
1827 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
1828
1829 if (ds->ds_rxstat.rs_more) {
1830 /*
1831 * Frame spans multiple descriptors; this
1832 * cannot happen yet as we don't support
1833 * jumbograms. If not in monitor mode,
1834 * discard the frame.
1835 */
1836
1837 /*
1838 * Enable this if you want to see error
1839 * frames in Monitor mode.
1840 */
1841 #ifdef ERROR_FRAMES
1842 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
1843 /* XXX statistic */
1844 goto rx_next;
1845 }
1846 #endif
1847 /* fall thru for monitor mode handling... */
1848
1849 } else if (ds->ds_rxstat.rs_status != 0) {
1850 if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC)
1851 sc->sc_stats.ast_rx_crcerr++;
1852 if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO)
1853 sc->sc_stats.ast_rx_fifoerr++;
1854 if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT)
1855 sc->sc_stats.ast_rx_badcrypt++;
1856 if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) {
1857 sc->sc_stats.ast_rx_phyerr++;
1858 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
1859 sc->sc_stats.ast_rx_phy[phyerr]++;
1860 }
1861
1862 /*
1863 * reject error frames, we normally don't want
1864 * to see them in monitor mode.
1865 */
1866 if ((ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT ) ||
1867 (ds->ds_rxstat.rs_status & HAL_RXERR_PHY))
1868 goto rx_next;
1869
1870 /*
1871 * In monitor mode, allow through packets that
1872 * cannot be decrypted
1873 */
1874 if ((ds->ds_rxstat.rs_status & ~HAL_RXERR_DECRYPT) ||
1875 sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
1876 goto rx_next;
1877 }
1878
1879 len = ds->ds_rxstat.rs_datalen;
1880 if (len < IEEE80211_MIN_LEN) {
1881 DPRINTF(ATH_DEBUG_RECV, ("%s: short packet %d\n",
1882 __func__, len));
1883 sc->sc_stats.ast_rx_tooshort++;
1884 goto rx_next;
1885 }
1886
1887 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,
1888 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1889
1890 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
1891 bf->bf_m = NULL;
1892 m->m_pkthdr.len = m->m_len = len;
1893
1894 #if NBPFILTER > 0
1895 if (sc->sc_drvbpf) {
1896 sc->sc_rxtap.wr_flags = IEEE80211_RADIOTAP_F_FCS;
1897 sc->sc_rxtap.wr_rate =
1898 sc->sc_hwmap[ds->ds_rxstat.rs_rate] &
1899 IEEE80211_RATE_VAL;
1900 sc->sc_rxtap.wr_antenna = ds->ds_rxstat.rs_antenna;
1901 sc->sc_rxtap.wr_rssi = ds->ds_rxstat.rs_rssi;
1902 sc->sc_rxtap.wr_max_rssi = ic->ic_max_rssi;
1903
1904 bpf_mtap_hdr(sc->sc_drvbpf, &sc->sc_rxtap,
1905 sc->sc_rxtap_len, m, BPF_DIRECTION_IN);
1906 }
1907 #endif
1908 m_adj(m, -IEEE80211_CRC_LEN);
1909 wh = mtod(m, struct ieee80211_frame *);
1910 memset(&rxi, 0, sizeof(rxi));
1911 if (!ath_softcrypto && (wh->i_fc[1] & IEEE80211_FC1_WEP)) {
1912 /*
1913 * WEP is decrypted by hardware. Clear WEP bit
1914 * and trim WEP header for ieee80211_inputm().
1915 */
1916 wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
1917 bcopy(wh, &whbuf, sizeof(whbuf));
1918 m_adj(m, IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN);
1919 wh = mtod(m, struct ieee80211_frame *);
1920 bcopy(&whbuf, wh, sizeof(whbuf));
1921 /*
1922 * Also trim WEP ICV from the tail.
1923 */
1924 m_adj(m, -IEEE80211_WEP_CRCLEN);
1925 /*
1926 * The header has probably moved.
1927 */
1928 wh = mtod(m, struct ieee80211_frame *);
1929
1930 rxi.rxi_flags |= IEEE80211_RXI_HWDEC;
1931 }
1932
1933 /*
1934 * Locate the node for sender, track state, and
1935 * then pass this node (referenced) up to the 802.11
1936 * layer for its use.
1937 */
1938 ni = ieee80211_find_rxnode(ic, wh);
1939
1940 /*
1941 * Record driver-specific state.
1942 */
1943 an = ATH_NODE(ni);
1944 if (++(an->an_rx_hist_next) == ATH_RHIST_SIZE)
1945 an->an_rx_hist_next = 0;
1946 rh = &an->an_rx_hist[an->an_rx_hist_next];
1947 rh->arh_ticks = ATH_TICKS();
1948 rh->arh_rssi = ds->ds_rxstat.rs_rssi;
1949 rh->arh_antenna = ds->ds_rxstat.rs_antenna;
1950
1951 /*
1952 * Send frame up for processing.
1953 */
1954 rxi.rxi_rssi = ds->ds_rxstat.rs_rssi;
1955 rxi.rxi_tstamp = ds->ds_rxstat.rs_tstamp;
1956 ieee80211_inputm(ifp, m, ni, &rxi, &ml);
1957
1958 /* Handle the rate adaption */
1959 ieee80211_rssadapt_input(ic, ni, &an->an_rssadapt,
1960 ds->ds_rxstat.rs_rssi);
1961
1962 /*
1963 * The frame may have caused the node to be marked for
1964 * reclamation (e.g. in response to a DEAUTH message)
1965 * so use release_node here instead of unref_node.
1966 */
1967 ieee80211_release_node(ic, ni);
1968
1969 rx_next:
1970 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
1971 } while (ath_rxbuf_init(sc, bf) == 0);
1972
1973 if_input(ifp, &ml);
1974
1975 ath_hal_set_rx_signal(ah); /* rx signal state monitoring */
1976 ath_hal_start_rx(ah); /* in case of RXEOL */
1977 #undef PA2DESC
1978 }
1979
1980 /*
1981 * XXX Size of an ACK control frame in bytes.
1982 */
1983 #define IEEE80211_ACK_SIZE (2+2+IEEE80211_ADDR_LEN+4)
1984
1985 int
ath_tx_start(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf,struct mbuf * m0)1986 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1987 struct ath_buf *bf, struct mbuf *m0)
1988 {
1989 struct ieee80211com *ic = &sc->sc_ic;
1990 struct ath_hal *ah = sc->sc_ah;
1991 struct ifnet *ifp = &sc->sc_ic.ic_if;
1992 int i, error, iswep, hdrlen, pktlen, len, s, tries;
1993 u_int8_t rix, cix, txrate, ctsrate;
1994 struct ath_desc *ds;
1995 struct ieee80211_frame *wh;
1996 struct ieee80211_key *k;
1997 u_int32_t iv;
1998 u_int8_t *ivp;
1999 u_int8_t hdrbuf[sizeof(struct ieee80211_frame) +
2000 IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN];
2001 u_int subtype, flags, ctsduration, antenna;
2002 HAL_PKT_TYPE atype;
2003 const HAL_RATE_TABLE *rt;
2004 HAL_BOOL shortPreamble;
2005 struct ath_node *an;
2006 u_int8_t hwqueue = HAL_TX_QUEUE_ID_DATA_MIN;
2007
2008 wh = mtod(m0, struct ieee80211_frame *);
2009 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
2010 hdrlen = sizeof(struct ieee80211_frame);
2011 pktlen = m0->m_pkthdr.len;
2012
2013 if (ath_softcrypto && iswep) {
2014 k = ieee80211_get_txkey(ic, wh, ni);
2015 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL)
2016 return ENOMEM;
2017 wh = mtod(m0, struct ieee80211_frame *);
2018
2019 /* reset len in case we got a new mbuf */
2020 pktlen = m0->m_pkthdr.len;
2021 } else if (!ath_softcrypto && iswep) {
2022 bcopy(mtod(m0, caddr_t), hdrbuf, hdrlen);
2023 m_adj(m0, hdrlen);
2024 M_PREPEND(m0, sizeof(hdrbuf), M_DONTWAIT);
2025 if (m0 == NULL) {
2026 sc->sc_stats.ast_tx_nombuf++;
2027 return ENOMEM;
2028 }
2029 ivp = hdrbuf + hdrlen;
2030 wh = mtod(m0, struct ieee80211_frame *);
2031 /*
2032 * XXX
2033 * IV must not duplicate during the lifetime of the key.
2034 * But no mechanism to renew keys is defined in IEEE 802.11
2035 * for WEP. And the IV may be duplicated at other stations
2036 * because the session key itself is shared. So we use a
2037 * pseudo random IV for now, though it is not the right way.
2038 *
2039 * NB: Rather than use a strictly random IV we select a
2040 * random one to start and then increment the value for
2041 * each frame. This is an explicit tradeoff between
2042 * overhead and security. Given the basic insecurity of
2043 * WEP this seems worthwhile.
2044 */
2045
2046 /*
2047 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir:
2048 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255
2049 */
2050 iv = ic->ic_iv;
2051 if ((iv & 0xff00) == 0xff00) {
2052 int B = (iv & 0xff0000) >> 16;
2053 if (3 <= B && B < 16)
2054 iv = (B+1) << 16;
2055 }
2056 ic->ic_iv = iv + 1;
2057
2058 /*
2059 * NB: Preserve byte order of IV for packet
2060 * sniffers; it doesn't matter otherwise.
2061 */
2062 #if BYTE_ORDER == BIG_ENDIAN
2063 ivp[0] = iv >> 0;
2064 ivp[1] = iv >> 8;
2065 ivp[2] = iv >> 16;
2066 #else
2067 ivp[2] = iv >> 0;
2068 ivp[1] = iv >> 8;
2069 ivp[0] = iv >> 16;
2070 #endif
2071 ivp[3] = ic->ic_wep_txkey << 6; /* Key ID and pad */
2072 bcopy(hdrbuf, mtod(m0, caddr_t), sizeof(hdrbuf));
2073 /*
2074 * The length of hdrlen and pktlen must be increased for WEP
2075 */
2076 len = IEEE80211_WEP_IVLEN +
2077 IEEE80211_WEP_KIDLEN +
2078 IEEE80211_WEP_CRCLEN;
2079 hdrlen += len;
2080 pktlen += len;
2081 }
2082 pktlen += IEEE80211_CRC_LEN;
2083
2084 /*
2085 * Load the DMA map so any coalescing is done. This
2086 * also calculates the number of descriptors we need.
2087 */
2088 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0,
2089 BUS_DMA_NOWAIT);
2090 /*
2091 * Discard null packets and check for packets that
2092 * require too many TX descriptors. We try to convert
2093 * the latter to a cluster.
2094 */
2095 if (error == EFBIG) { /* too many desc's, linearize */
2096 sc->sc_stats.ast_tx_linear++;
2097 if (m_defrag(m0, M_DONTWAIT)) {
2098 sc->sc_stats.ast_tx_nomcl++;
2099 m_freem(m0);
2100 return ENOMEM;
2101 }
2102 error = bus_dmamap_load_mbuf(sc->sc_dmat, bf->bf_dmamap, m0,
2103 BUS_DMA_NOWAIT);
2104 if (error != 0) {
2105 sc->sc_stats.ast_tx_busdma++;
2106 m_freem(m0);
2107 return error;
2108 }
2109 KASSERT(bf->bf_nseg == 1,
2110 ("ath_tx_start: packet not one segment; nseg %u",
2111 bf->bf_nseg));
2112 } else if (error != 0) {
2113 sc->sc_stats.ast_tx_busdma++;
2114 m_freem(m0);
2115 return error;
2116 } else if (bf->bf_nseg == 0) { /* null packet, discard */
2117 sc->sc_stats.ast_tx_nodata++;
2118 m_freem(m0);
2119 return EIO;
2120 }
2121 DPRINTF(ATH_DEBUG_XMIT, ("%s: m %p len %u\n", __func__, m0, pktlen));
2122 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,
2123 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
2124 bf->bf_m = m0;
2125 bf->bf_node = ni; /* NB: held reference */
2126 an = ATH_NODE(ni);
2127
2128 /* setup descriptors */
2129 ds = bf->bf_desc;
2130 rt = sc->sc_currates;
2131 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2132
2133 /*
2134 * Calculate Atheros packet type from IEEE80211 packet header
2135 * and setup for rate calculations.
2136 */
2137 bf->bf_id.id_node = NULL;
2138 atype = HAL_PKT_TYPE_NORMAL; /* default */
2139 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
2140 case IEEE80211_FC0_TYPE_MGT:
2141 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2142 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
2143 atype = HAL_PKT_TYPE_BEACON;
2144 } else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
2145 atype = HAL_PKT_TYPE_PROBE_RESP;
2146 } else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) {
2147 atype = HAL_PKT_TYPE_ATIM;
2148 }
2149 rix = 0; /* XXX lowest rate */
2150 break;
2151 case IEEE80211_FC0_TYPE_CTL:
2152 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2153 if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL)
2154 atype = HAL_PKT_TYPE_PSPOLL;
2155 rix = 0; /* XXX lowest rate */
2156 break;
2157 default:
2158 /* remember link conditions for rate adaptation algorithm */
2159 if (ic->ic_fixed_rate == -1) {
2160 bf->bf_id.id_len = m0->m_pkthdr.len;
2161 bf->bf_id.id_rateidx = ni->ni_txrate;
2162 bf->bf_id.id_node = ni;
2163 bf->bf_id.id_rssi = ath_node_getrssi(ic, ni);
2164 }
2165 ni->ni_txrate = ieee80211_rssadapt_choose(&an->an_rssadapt,
2166 &ni->ni_rates, wh, m0->m_pkthdr.len, ic->ic_fixed_rate,
2167 ifp->if_xname, 0);
2168 rix = sc->sc_rixmap[ni->ni_rates.rs_rates[ni->ni_txrate] &
2169 IEEE80211_RATE_VAL];
2170 if (rix == 0xff) {
2171 printf("%s: bogus xmit rate 0x%x (idx 0x%x)\n",
2172 ifp->if_xname, ni->ni_rates.rs_rates[ni->ni_txrate],
2173 ni->ni_txrate);
2174 sc->sc_stats.ast_tx_badrate++;
2175 m_freem(m0);
2176 return EIO;
2177 }
2178 break;
2179 }
2180
2181 /*
2182 * NB: the 802.11 layer marks whether or not we should
2183 * use short preamble based on the current mode and
2184 * negotiated parameters.
2185 */
2186 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
2187 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
2188 txrate = rt->info[rix].rateCode | rt->info[rix].shortPreamble;
2189 shortPreamble = AH_TRUE;
2190 sc->sc_stats.ast_tx_shortpre++;
2191 } else {
2192 txrate = rt->info[rix].rateCode;
2193 shortPreamble = AH_FALSE;
2194 }
2195
2196 /*
2197 * Calculate miscellaneous flags.
2198 */
2199 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for wep errors */
2200 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2201 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
2202 sc->sc_stats.ast_tx_noack++;
2203 } else if (pktlen > ic->ic_rtsthreshold) {
2204 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
2205 sc->sc_stats.ast_tx_rts++;
2206 }
2207
2208 /*
2209 * Calculate duration. This logically belongs in the 802.11
2210 * layer but it lacks sufficient information to calculate it.
2211 */
2212 if ((flags & HAL_TXDESC_NOACK) == 0 &&
2213 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
2214 u_int16_t dur;
2215 /*
2216 * XXX not right with fragmentation.
2217 */
2218 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE,
2219 rix, shortPreamble);
2220 *((u_int16_t*) wh->i_dur) = htole16(dur);
2221 }
2222
2223 /*
2224 * Calculate RTS/CTS rate and duration if needed.
2225 */
2226 ctsduration = 0;
2227 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
2228 /*
2229 * CTS transmit rate is derived from the transmit rate
2230 * by looking in the h/w rate table. We must also factor
2231 * in whether or not a short preamble is to be used.
2232 */
2233 cix = rt->info[rix].controlRate;
2234 ctsrate = rt->info[cix].rateCode;
2235 if (shortPreamble)
2236 ctsrate |= rt->info[cix].shortPreamble;
2237 /*
2238 * Compute the transmit duration based on the size
2239 * of an ACK frame. We call into the HAL to do the
2240 * computation since it depends on the characteristics
2241 * of the actual PHY being used.
2242 */
2243 if (flags & HAL_TXDESC_RTSENA) { /* SIFS + CTS */
2244 ctsduration += ath_hal_computetxtime(ah,
2245 rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
2246 }
2247 /* SIFS + data */
2248 ctsduration += ath_hal_computetxtime(ah,
2249 rt, pktlen, rix, shortPreamble);
2250 if ((flags & HAL_TXDESC_NOACK) == 0) { /* SIFS + ACK */
2251 ctsduration += ath_hal_computetxtime(ah,
2252 rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
2253 }
2254 } else
2255 ctsrate = 0;
2256
2257 /*
2258 * For now use the antenna on which the last good
2259 * frame was received on. We assume this field is
2260 * initialized to 0 which gives us ``auto'' or the
2261 * ``default'' antenna.
2262 */
2263 if (an->an_tx_antenna) {
2264 antenna = an->an_tx_antenna;
2265 } else {
2266 antenna = an->an_rx_hist[an->an_rx_hist_next].arh_antenna;
2267 }
2268
2269 #if NBPFILTER > 0
2270 if (ic->ic_rawbpf)
2271 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
2272
2273 if (sc->sc_drvbpf) {
2274 sc->sc_txtap.wt_flags = 0;
2275 if (shortPreamble)
2276 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2277 if (!ath_softcrypto && iswep)
2278 sc->sc_txtap.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2279 sc->sc_txtap.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate] &
2280 IEEE80211_RATE_VAL;
2281 sc->sc_txtap.wt_txpower = 30;
2282 sc->sc_txtap.wt_antenna = antenna;
2283
2284 bpf_mtap_hdr(sc->sc_drvbpf, &sc->sc_txtap, sc->sc_txtap_len,
2285 m0, BPF_DIRECTION_OUT);
2286 }
2287 #endif
2288
2289 /*
2290 * Formulate first tx descriptor with tx controls.
2291 */
2292 tries = IEEE80211_IS_MULTICAST(wh->i_addr1) ? 1 : 15;
2293 /* XXX check return value? */
2294 ath_hal_setup_tx_desc(ah, ds
2295 , pktlen /* packet length */
2296 , hdrlen /* header length */
2297 , atype /* Atheros packet type */
2298 , 60 /* txpower XXX */
2299 , txrate, tries /* series 0 rate/tries */
2300 , iswep ? sc->sc_ic.ic_wep_txkey : HAL_TXKEYIX_INVALID
2301 , antenna /* antenna mode */
2302 , flags /* flags */
2303 , ctsrate /* rts/cts rate */
2304 , ctsduration /* rts/cts duration */
2305 );
2306 #ifdef notyet
2307 ath_hal_setup_xtx_desc(ah, ds
2308 , AH_FALSE /* short preamble */
2309 , 0, 0 /* series 1 rate/tries */
2310 , 0, 0 /* series 2 rate/tries */
2311 , 0, 0 /* series 3 rate/tries */
2312 );
2313 #endif
2314 /*
2315 * Fillin the remainder of the descriptor info.
2316 */
2317 for (i = 0; i < bf->bf_nseg; i++, ds++) {
2318 ds->ds_data = bf->bf_segs[i].ds_addr;
2319 if (i == bf->bf_nseg - 1) {
2320 ds->ds_link = 0;
2321 } else {
2322 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
2323 }
2324 ath_hal_fill_tx_desc(ah, ds
2325 , bf->bf_segs[i].ds_len /* segment length */
2326 , i == 0 /* first segment */
2327 , i == bf->bf_nseg - 1 /* last segment */
2328 );
2329 DPRINTF(ATH_DEBUG_XMIT,
2330 ("%s: %d: %08x %08x %08x %08x %08x %08x\n",
2331 __func__, i, ds->ds_link, ds->ds_data,
2332 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]));
2333 }
2334
2335 /*
2336 * Insert the frame on the outbound list and
2337 * pass it on to the hardware.
2338 */
2339 s = splnet();
2340 TAILQ_INSERT_TAIL(&sc->sc_txq, bf, bf_list);
2341 if (sc->sc_txlink == NULL) {
2342 ath_hal_put_tx_buf(ah, sc->sc_txhalq[hwqueue], bf->bf_daddr);
2343 DPRINTF(ATH_DEBUG_XMIT, ("%s: TXDP0 = %p (%p)\n", __func__,
2344 (caddr_t)bf->bf_daddr, bf->bf_desc));
2345 } else {
2346 *sc->sc_txlink = bf->bf_daddr;
2347 DPRINTF(ATH_DEBUG_XMIT, ("%s: link(%p)=%p (%p)\n", __func__,
2348 sc->sc_txlink, (caddr_t)bf->bf_daddr, bf->bf_desc));
2349 }
2350 sc->sc_txlink = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
2351 splx(s);
2352
2353 ath_hal_tx_start(ah, sc->sc_txhalq[hwqueue]);
2354 return 0;
2355 }
2356
2357 void
ath_tx_proc(void * arg,int npending)2358 ath_tx_proc(void *arg, int npending)
2359 {
2360 struct ath_softc *sc = arg;
2361 struct ath_hal *ah = sc->sc_ah;
2362 struct ath_buf *bf;
2363 struct ieee80211com *ic = &sc->sc_ic;
2364 struct ifnet *ifp = &ic->ic_if;
2365 struct ath_desc *ds;
2366 struct ieee80211_node *ni;
2367 struct ath_node *an;
2368 int sr, lr, s;
2369 HAL_STATUS status;
2370
2371 for (;;) {
2372 s = splnet();
2373 bf = TAILQ_FIRST(&sc->sc_txq);
2374 if (bf == NULL) {
2375 sc->sc_txlink = NULL;
2376 splx(s);
2377 break;
2378 }
2379 /* only the last descriptor is needed */
2380 ds = &bf->bf_desc[bf->bf_nseg - 1];
2381 status = ath_hal_proc_tx_desc(ah, ds);
2382 #ifdef AR_DEBUG
2383 if (ath_debug & ATH_DEBUG_XMIT_DESC)
2384 ath_printtxbuf(bf, status == HAL_OK);
2385 #endif
2386 if (status == HAL_EINPROGRESS) {
2387 splx(s);
2388 break;
2389 }
2390 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list);
2391 splx(s);
2392
2393 ni = bf->bf_node;
2394 if (ni != NULL) {
2395 an = (struct ath_node *) ni;
2396 if (ds->ds_txstat.ts_status == 0) {
2397 if (bf->bf_id.id_node != NULL)
2398 ieee80211_rssadapt_raise_rate(ic,
2399 &an->an_rssadapt, &bf->bf_id);
2400 an->an_tx_antenna = ds->ds_txstat.ts_antenna;
2401 } else {
2402 if (bf->bf_id.id_node != NULL)
2403 ieee80211_rssadapt_lower_rate(ic, ni,
2404 &an->an_rssadapt, &bf->bf_id);
2405 if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY)
2406 sc->sc_stats.ast_tx_xretries++;
2407 if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO)
2408 sc->sc_stats.ast_tx_fifoerr++;
2409 if (ds->ds_txstat.ts_status & HAL_TXERR_FILT)
2410 sc->sc_stats.ast_tx_filtered++;
2411 an->an_tx_antenna = 0; /* invalidate */
2412 }
2413 sr = ds->ds_txstat.ts_shortretry;
2414 lr = ds->ds_txstat.ts_longretry;
2415 sc->sc_stats.ast_tx_shortretry += sr;
2416 sc->sc_stats.ast_tx_longretry += lr;
2417 /*
2418 * Reclaim reference to node.
2419 *
2420 * NB: the node may be reclaimed here if, for example
2421 * this is a DEAUTH message that was sent and the
2422 * node was timed out due to inactivity.
2423 */
2424 ieee80211_release_node(ic, ni);
2425 }
2426 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 0,
2427 bf->bf_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2428 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2429 m_freem(bf->bf_m);
2430 bf->bf_m = NULL;
2431 bf->bf_node = NULL;
2432
2433 s = splnet();
2434 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
2435 splx(s);
2436 }
2437 ifq_clr_oactive(&ifp->if_snd);
2438 sc->sc_tx_timer = 0;
2439
2440 ath_start(ifp);
2441 }
2442
2443 /*
2444 * Drain the transmit queue and reclaim resources.
2445 */
2446 void
ath_draintxq(struct ath_softc * sc)2447 ath_draintxq(struct ath_softc *sc)
2448 {
2449 struct ath_hal *ah = sc->sc_ah;
2450 struct ieee80211com *ic = &sc->sc_ic;
2451 struct ifnet *ifp = &ic->ic_if;
2452 struct ieee80211_node *ni;
2453 struct ath_buf *bf;
2454 int s, i;
2455
2456 /* XXX return value */
2457 if (!sc->sc_invalid) {
2458 for (i = 0; i <= HAL_TX_QUEUE_ID_DATA_MAX; i++) {
2459 /* don't touch the hardware if marked invalid */
2460 (void) ath_hal_stop_tx_dma(ah, sc->sc_txhalq[i]);
2461 DPRINTF(ATH_DEBUG_RESET,
2462 ("%s: tx queue %d (%p), link %p\n", __func__, i,
2463 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah,
2464 sc->sc_txhalq[i]), sc->sc_txlink));
2465 }
2466 (void) ath_hal_stop_tx_dma(ah, sc->sc_bhalq);
2467 DPRINTF(ATH_DEBUG_RESET,
2468 ("%s: beacon queue (%p)\n", __func__,
2469 (caddr_t)(u_intptr_t)ath_hal_get_tx_buf(ah, sc->sc_bhalq)));
2470 }
2471 for (;;) {
2472 s = splnet();
2473 bf = TAILQ_FIRST(&sc->sc_txq);
2474 if (bf == NULL) {
2475 sc->sc_txlink = NULL;
2476 splx(s);
2477 break;
2478 }
2479 TAILQ_REMOVE(&sc->sc_txq, bf, bf_list);
2480 splx(s);
2481 #ifdef AR_DEBUG
2482 if (ath_debug & ATH_DEBUG_RESET) {
2483 ath_printtxbuf(bf,
2484 ath_hal_proc_tx_desc(ah, bf->bf_desc) == HAL_OK);
2485 }
2486 #endif /* AR_DEBUG */
2487 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2488 m_freem(bf->bf_m);
2489 bf->bf_m = NULL;
2490 ni = bf->bf_node;
2491 bf->bf_node = NULL;
2492 s = splnet();
2493 if (ni != NULL) {
2494 /*
2495 * Reclaim node reference.
2496 */
2497 ieee80211_release_node(ic, ni);
2498 }
2499 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
2500 splx(s);
2501 }
2502 ifq_clr_oactive(&ifp->if_snd);
2503 sc->sc_tx_timer = 0;
2504 }
2505
2506 /*
2507 * Disable the receive h/w in preparation for a reset.
2508 */
2509 void
ath_stoprecv(struct ath_softc * sc)2510 ath_stoprecv(struct ath_softc *sc)
2511 {
2512 #define PA2DESC(_sc, _pa) \
2513 ((struct ath_desc *)((caddr_t)(_sc)->sc_desc + \
2514 ((_pa) - (_sc)->sc_desc_paddr)))
2515 struct ath_hal *ah = sc->sc_ah;
2516
2517 ath_hal_stop_pcu_recv(ah); /* disable PCU */
2518 ath_hal_set_rx_filter(ah, 0); /* clear recv filter */
2519 ath_hal_stop_rx_dma(ah); /* disable DMA engine */
2520 #ifdef AR_DEBUG
2521 if (ath_debug & ATH_DEBUG_RESET) {
2522 struct ath_buf *bf;
2523
2524 printf("%s: rx queue %p, link %p\n", __func__,
2525 (caddr_t)(u_intptr_t)ath_hal_get_rx_buf(ah), sc->sc_rxlink);
2526 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
2527 struct ath_desc *ds = bf->bf_desc;
2528 if (ath_hal_proc_rx_desc(ah, ds, bf->bf_daddr,
2529 PA2DESC(sc, ds->ds_link)) == HAL_OK)
2530 ath_printrxbuf(bf, 1);
2531 }
2532 }
2533 #endif
2534 sc->sc_rxlink = NULL; /* just in case */
2535 #undef PA2DESC
2536 }
2537
2538 /*
2539 * Enable the receive h/w following a reset.
2540 */
2541 int
ath_startrecv(struct ath_softc * sc)2542 ath_startrecv(struct ath_softc *sc)
2543 {
2544 struct ath_hal *ah = sc->sc_ah;
2545 struct ath_buf *bf;
2546
2547 sc->sc_rxlink = NULL;
2548 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
2549 int error = ath_rxbuf_init(sc, bf);
2550 if (error != 0) {
2551 DPRINTF(ATH_DEBUG_RECV,
2552 ("%s: ath_rxbuf_init failed %d\n",
2553 __func__, error));
2554 return error;
2555 }
2556 }
2557
2558 bf = TAILQ_FIRST(&sc->sc_rxbuf);
2559 ath_hal_put_rx_buf(ah, bf->bf_daddr);
2560 ath_hal_start_rx(ah); /* enable recv descriptors */
2561 ath_mode_init(sc); /* set filters, etc. */
2562 ath_hal_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
2563 return 0;
2564 }
2565
2566 /*
2567 * Set/change channels. If the channel is really being changed,
2568 * it's done by resetting the chip. To accomplish this we must
2569 * first cleanup any pending DMA, then restart stuff after a la
2570 * ath_init.
2571 */
2572 int
ath_chan_set(struct ath_softc * sc,struct ieee80211_channel * chan)2573 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
2574 {
2575 struct ath_hal *ah = sc->sc_ah;
2576 struct ieee80211com *ic = &sc->sc_ic;
2577 struct ifnet *ifp = &ic->ic_if;
2578
2579 DPRINTF(ATH_DEBUG_ANY, ("%s: %u (%u MHz) -> %u (%u MHz)\n", __func__,
2580 ieee80211_chan2ieee(ic, ic->ic_ibss_chan),
2581 ic->ic_ibss_chan->ic_freq,
2582 ieee80211_chan2ieee(ic, chan), chan->ic_freq));
2583 if (chan != ic->ic_ibss_chan) {
2584 HAL_STATUS status;
2585 HAL_CHANNEL hchan;
2586 enum ieee80211_phymode mode;
2587
2588 /*
2589 * To switch channels clear any pending DMA operations;
2590 * wait long enough for the RX fifo to drain, reset the
2591 * hardware at the new frequency, and then re-enable
2592 * the relevant bits of the h/w.
2593 */
2594 ath_hal_set_intr(ah, 0); /* disable interrupts */
2595 ath_draintxq(sc); /* clear pending tx frames */
2596 ath_stoprecv(sc); /* turn off frame recv */
2597 /*
2598 * Convert to a HAL channel description.
2599 */
2600 hchan.channel = chan->ic_freq;
2601 hchan.channelFlags = chan->ic_flags;
2602 if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE,
2603 &status)) {
2604 printf("%s: ath_chan_set: unable to reset "
2605 "channel %u (%u MHz)\n", ifp->if_xname,
2606 ieee80211_chan2ieee(ic, chan), chan->ic_freq);
2607 return EIO;
2608 }
2609 ath_set_slot_time(sc);
2610 /*
2611 * Re-enable rx framework.
2612 */
2613 if (ath_startrecv(sc) != 0) {
2614 printf("%s: ath_chan_set: unable to restart recv "
2615 "logic\n", ifp->if_xname);
2616 return EIO;
2617 }
2618
2619 #if NBPFILTER > 0
2620 /*
2621 * Update BPF state.
2622 */
2623 sc->sc_txtap.wt_chan_freq = sc->sc_rxtap.wr_chan_freq =
2624 htole16(chan->ic_freq);
2625 sc->sc_txtap.wt_chan_flags = sc->sc_rxtap.wr_chan_flags =
2626 htole16(chan->ic_flags);
2627 #endif
2628
2629 /*
2630 * Change channels and update the h/w rate map
2631 * if we're switching; e.g. 11a to 11b/g.
2632 */
2633 ic->ic_ibss_chan = chan;
2634 mode = ieee80211_chan2mode(ic, chan);
2635 if (mode != sc->sc_curmode)
2636 ath_setcurmode(sc, mode);
2637
2638 /*
2639 * Re-enable interrupts.
2640 */
2641 ath_hal_set_intr(ah, sc->sc_imask);
2642 }
2643 return 0;
2644 }
2645
2646 void
ath_next_scan(void * arg)2647 ath_next_scan(void *arg)
2648 {
2649 struct ath_softc *sc = arg;
2650 struct ieee80211com *ic = &sc->sc_ic;
2651 struct ifnet *ifp = &ic->ic_if;
2652 int s;
2653
2654 /* don't call ath_start w/o network interrupts blocked */
2655 s = splnet();
2656
2657 if (ic->ic_state == IEEE80211_S_SCAN)
2658 ieee80211_next_scan(ifp);
2659 splx(s);
2660 }
2661
2662 int
ath_set_slot_time(struct ath_softc * sc)2663 ath_set_slot_time(struct ath_softc *sc)
2664 {
2665 struct ath_hal *ah = sc->sc_ah;
2666 struct ieee80211com *ic = &sc->sc_ic;
2667
2668 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2669 return (ath_hal_set_slot_time(ah, HAL_SLOT_TIME_9));
2670
2671 return (0);
2672 }
2673
2674 /*
2675 * Periodically recalibrate the PHY to account
2676 * for temperature/environment changes.
2677 */
2678 void
ath_calibrate(void * arg)2679 ath_calibrate(void *arg)
2680 {
2681 struct ath_softc *sc = arg;
2682 struct ath_hal *ah = sc->sc_ah;
2683 struct ieee80211com *ic = &sc->sc_ic;
2684 struct ieee80211_channel *c;
2685 HAL_CHANNEL hchan;
2686 int s;
2687
2688 sc->sc_stats.ast_per_cal++;
2689
2690 /*
2691 * Convert to a HAL channel description.
2692 */
2693 c = ic->ic_ibss_chan;
2694 hchan.channel = c->ic_freq;
2695 hchan.channelFlags = c->ic_flags;
2696
2697 s = splnet();
2698 DPRINTF(ATH_DEBUG_CALIBRATE,
2699 ("%s: channel %u/%x\n", __func__, c->ic_freq, c->ic_flags));
2700
2701 if (ath_hal_get_rf_gain(ah) == HAL_RFGAIN_NEED_CHANGE) {
2702 /*
2703 * Rfgain is out of bounds, reset the chip
2704 * to load new gain values.
2705 */
2706 sc->sc_stats.ast_per_rfgain++;
2707 ath_reset(sc, 1);
2708 }
2709 if (!ath_hal_calibrate(ah, &hchan)) {
2710 DPRINTF(ATH_DEBUG_ANY,
2711 ("%s: calibration of channel %u failed\n",
2712 __func__, c->ic_freq));
2713 sc->sc_stats.ast_per_calfail++;
2714 }
2715 timeout_add_sec(&sc->sc_cal_to, ath_calinterval);
2716 splx(s);
2717 }
2718
2719 void
ath_ledstate(struct ath_softc * sc,enum ieee80211_state state)2720 ath_ledstate(struct ath_softc *sc, enum ieee80211_state state)
2721 {
2722 HAL_LED_STATE led = HAL_LED_INIT;
2723 u_int32_t softled = AR5K_SOFTLED_OFF;
2724
2725 switch (state) {
2726 case IEEE80211_S_INIT:
2727 break;
2728 case IEEE80211_S_SCAN:
2729 led = HAL_LED_SCAN;
2730 break;
2731 case IEEE80211_S_AUTH:
2732 led = HAL_LED_AUTH;
2733 break;
2734 case IEEE80211_S_ASSOC:
2735 led = HAL_LED_ASSOC;
2736 softled = AR5K_SOFTLED_ON;
2737 break;
2738 case IEEE80211_S_RUN:
2739 led = HAL_LED_RUN;
2740 softled = AR5K_SOFTLED_ON;
2741 break;
2742 }
2743
2744 ath_hal_set_ledstate(sc->sc_ah, led);
2745 if (sc->sc_softled) {
2746 ath_hal_set_gpio_output(sc->sc_ah, AR5K_SOFTLED_PIN);
2747 ath_hal_set_gpio(sc->sc_ah, AR5K_SOFTLED_PIN, softled);
2748 }
2749 }
2750
2751 int
ath_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)2752 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2753 {
2754 struct ifnet *ifp = &ic->ic_if;
2755 struct ath_softc *sc = ifp->if_softc;
2756 struct ath_hal *ah = sc->sc_ah;
2757 struct ieee80211_node *ni;
2758 const u_int8_t *bssid;
2759 int error, i;
2760
2761 u_int32_t rfilt;
2762
2763 DPRINTF(ATH_DEBUG_ANY, ("%s: %s -> %s\n", __func__,
2764 ieee80211_state_name[ic->ic_state],
2765 ieee80211_state_name[nstate]));
2766
2767 timeout_del(&sc->sc_scan_to);
2768 timeout_del(&sc->sc_cal_to);
2769 ath_ledstate(sc, nstate);
2770
2771 if (nstate == IEEE80211_S_INIT) {
2772 timeout_del(&sc->sc_rssadapt_to);
2773 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
2774 ath_hal_set_intr(ah, sc->sc_imask);
2775 return (*sc->sc_newstate)(ic, nstate, arg);
2776 }
2777 ni = ic->ic_bss;
2778 error = ath_chan_set(sc, ni->ni_chan);
2779 if (error != 0)
2780 goto bad;
2781 rfilt = ath_calcrxfilter(sc);
2782 if (nstate == IEEE80211_S_SCAN ||
2783 ic->ic_opmode == IEEE80211_M_MONITOR) {
2784 bssid = sc->sc_broadcast_addr;
2785 } else {
2786 bssid = ni->ni_bssid;
2787 }
2788 ath_hal_set_rx_filter(ah, rfilt);
2789 DPRINTF(ATH_DEBUG_ANY, ("%s: RX filter 0x%x bssid %s\n",
2790 __func__, rfilt, ether_sprintf((u_char*)bssid)));
2791
2792 if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) {
2793 ath_hal_set_associd(ah, bssid, ni->ni_associd);
2794 } else {
2795 ath_hal_set_associd(ah, bssid, 0);
2796 }
2797
2798 if (!ath_softcrypto && (ic->ic_flags & IEEE80211_F_WEPON)) {
2799 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2800 if (ath_hal_is_key_valid(ah, i))
2801 ath_hal_set_key_lladdr(ah, i, bssid);
2802 }
2803 }
2804
2805 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
2806 /* nothing to do */
2807 } else if (nstate == IEEE80211_S_RUN) {
2808 DPRINTF(ATH_DEBUG_ANY, ("%s(RUN): "
2809 "ic_flags=0x%08x iv=%d bssid=%s "
2810 "capinfo=0x%04x chan=%d\n",
2811 __func__,
2812 ic->ic_flags,
2813 ni->ni_intval,
2814 ether_sprintf(ni->ni_bssid),
2815 ni->ni_capinfo,
2816 ieee80211_chan2ieee(ic, ni->ni_chan)));
2817
2818 /*
2819 * Allocate and setup the beacon frame for AP or adhoc mode.
2820 */
2821 #ifndef IEEE80211_STA_ONLY
2822 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2823 ic->ic_opmode == IEEE80211_M_IBSS) {
2824 error = ath_beacon_alloc(sc, ni);
2825 if (error != 0)
2826 goto bad;
2827 }
2828 #endif
2829 /*
2830 * Configure the beacon and sleep timers.
2831 */
2832 ath_beacon_config(sc);
2833 } else {
2834 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
2835 ath_hal_set_intr(ah, sc->sc_imask);
2836 }
2837
2838 /*
2839 * Invoke the parent method to complete the work.
2840 */
2841 error = (*sc->sc_newstate)(ic, nstate, arg);
2842
2843 if (nstate == IEEE80211_S_RUN) {
2844 /* start periodic recalibration timer */
2845 timeout_add_sec(&sc->sc_cal_to, ath_calinterval);
2846
2847 if (ic->ic_opmode != IEEE80211_M_MONITOR)
2848 timeout_add_msec(&sc->sc_rssadapt_to, 100);
2849 } else if (nstate == IEEE80211_S_SCAN) {
2850 /* start ap/neighbor scan timer */
2851 timeout_add_msec(&sc->sc_scan_to, ath_dwelltime);
2852 }
2853 bad:
2854 return error;
2855 }
2856
2857 #ifndef IEEE80211_STA_ONLY
2858 void
ath_recv_mgmt(struct ieee80211com * ic,struct mbuf * m,struct ieee80211_node * ni,struct ieee80211_rxinfo * rxi,int subtype)2859 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m,
2860 struct ieee80211_node *ni, struct ieee80211_rxinfo *rxi, int subtype)
2861 {
2862 struct ath_softc *sc = (struct ath_softc*)ic->ic_softc;
2863 struct ath_hal *ah = sc->sc_ah;
2864
2865 (*sc->sc_recv_mgmt)(ic, m, ni, rxi, subtype);
2866
2867 switch (subtype) {
2868 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
2869 case IEEE80211_FC0_SUBTYPE_BEACON:
2870 if (ic->ic_opmode != IEEE80211_M_IBSS ||
2871 ic->ic_state != IEEE80211_S_RUN)
2872 break;
2873 if (ieee80211_ibss_merge(ic, ni, ath_hal_get_tsf64(ah)) ==
2874 ENETRESET)
2875 ath_hal_set_associd(ah, ic->ic_bss->ni_bssid, 0);
2876 break;
2877 default:
2878 break;
2879 }
2880 return;
2881 }
2882 #endif
2883
2884 /*
2885 * Setup driver-specific state for a newly associated node.
2886 * Note that we're called also on a re-associate, the isnew
2887 * param tells us if this is the first time or not.
2888 */
2889 void
ath_newassoc(struct ieee80211com * ic,struct ieee80211_node * ni,int isnew)2890 ath_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
2891 {
2892 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2893 return;
2894 }
2895
2896 int
ath_getchannels(struct ath_softc * sc,HAL_BOOL outdoor,HAL_BOOL xchanmode)2897 ath_getchannels(struct ath_softc *sc, HAL_BOOL outdoor, HAL_BOOL xchanmode)
2898 {
2899 struct ieee80211com *ic = &sc->sc_ic;
2900 struct ifnet *ifp = &ic->ic_if;
2901 struct ath_hal *ah = sc->sc_ah;
2902 HAL_CHANNEL *chans;
2903 int i, ix, nchan;
2904
2905 sc->sc_nchan = 0;
2906 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL),
2907 M_TEMP, M_NOWAIT);
2908 if (chans == NULL) {
2909 printf("%s: unable to allocate channel table\n", ifp->if_xname);
2910 return ENOMEM;
2911 }
2912 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan,
2913 HAL_MODE_ALL, outdoor, xchanmode)) {
2914 printf("%s: unable to collect channel list from hal\n",
2915 ifp->if_xname);
2916 free(chans, M_TEMP, 0);
2917 return EINVAL;
2918 }
2919
2920 /*
2921 * Convert HAL channels to ieee80211 ones and insert
2922 * them in the table according to their channel number.
2923 */
2924 for (i = 0; i < nchan; i++) {
2925 HAL_CHANNEL *c = &chans[i];
2926 ix = ieee80211_mhz2ieee(c->channel, c->channelFlags);
2927 if (ix > IEEE80211_CHAN_MAX) {
2928 printf("%s: bad hal channel %u (%u/%x) ignored\n",
2929 ifp->if_xname, ix, c->channel, c->channelFlags);
2930 continue;
2931 }
2932 DPRINTF(ATH_DEBUG_ANY,
2933 ("%s: HAL channel %d/%d freq %d flags %#04x idx %d\n",
2934 sc->sc_dev.dv_xname, i, nchan, c->channel, c->channelFlags,
2935 ix));
2936 /* NB: flags are known to be compatible */
2937 if (ic->ic_channels[ix].ic_freq == 0) {
2938 ic->ic_channels[ix].ic_freq = c->channel;
2939 ic->ic_channels[ix].ic_flags = c->channelFlags;
2940 } else {
2941 /* channels overlap; e.g. 11g and 11b */
2942 ic->ic_channels[ix].ic_flags |= c->channelFlags;
2943 }
2944 /* count valid channels */
2945 sc->sc_nchan++;
2946 }
2947 free(chans, M_TEMP, 0);
2948
2949 if (sc->sc_nchan < 1) {
2950 printf("%s: no valid channels for regdomain %s(%u)\n",
2951 ifp->if_xname, ieee80211_regdomain2name(ah->ah_regdomain),
2952 ah->ah_regdomain);
2953 return ENOENT;
2954 }
2955
2956 /* set an initial channel */
2957 ic->ic_ibss_chan = &ic->ic_channels[0];
2958
2959 return 0;
2960 }
2961
2962 int
ath_rate_setup(struct ath_softc * sc,u_int mode)2963 ath_rate_setup(struct ath_softc *sc, u_int mode)
2964 {
2965 struct ath_hal *ah = sc->sc_ah;
2966 struct ieee80211com *ic = &sc->sc_ic;
2967 const HAL_RATE_TABLE *rt;
2968 struct ieee80211_rateset *rs;
2969 int i, maxrates;
2970
2971 switch (mode) {
2972 case IEEE80211_MODE_11A:
2973 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11A);
2974 break;
2975 case IEEE80211_MODE_11B:
2976 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11B);
2977 break;
2978 case IEEE80211_MODE_11G:
2979 sc->sc_rates[mode] = ath_hal_get_rate_table(ah, HAL_MODE_11G);
2980 break;
2981 default:
2982 DPRINTF(ATH_DEBUG_ANY,
2983 ("%s: invalid mode %u\n", __func__, mode));
2984 return 0;
2985 }
2986 rt = sc->sc_rates[mode];
2987 if (rt == NULL)
2988 return 0;
2989 if (rt->rateCount > IEEE80211_RATE_MAXSIZE) {
2990 DPRINTF(ATH_DEBUG_ANY,
2991 ("%s: rate table too small (%u > %u)\n",
2992 __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE));
2993 maxrates = IEEE80211_RATE_MAXSIZE;
2994 } else {
2995 maxrates = rt->rateCount;
2996 }
2997 rs = &ic->ic_sup_rates[mode];
2998 for (i = 0; i < maxrates; i++)
2999 rs->rs_rates[i] = rt->info[i].dot11Rate;
3000 rs->rs_nrates = maxrates;
3001 return 1;
3002 }
3003
3004 void
ath_setcurmode(struct ath_softc * sc,enum ieee80211_phymode mode)3005 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
3006 {
3007 const HAL_RATE_TABLE *rt;
3008 struct ieee80211com *ic = &sc->sc_ic;
3009 struct ieee80211_node *ni;
3010 int i;
3011
3012 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
3013 rt = sc->sc_rates[mode];
3014 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
3015 for (i = 0; i < rt->rateCount; i++)
3016 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i;
3017 bzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
3018 for (i = 0; i < 32; i++)
3019 sc->sc_hwmap[i] = rt->info[rt->rateCodeToIndex[i]].dot11Rate;
3020 sc->sc_currates = rt;
3021 sc->sc_curmode = mode;
3022 ni = ic->ic_bss;
3023 ni->ni_rates.rs_nrates = sc->sc_currates->rateCount;
3024 if (ni->ni_txrate >= ni->ni_rates.rs_nrates)
3025 ni->ni_txrate = 0;
3026 }
3027
3028 void
ath_rssadapt_updatenode(void * arg,struct ieee80211_node * ni)3029 ath_rssadapt_updatenode(void *arg, struct ieee80211_node *ni)
3030 {
3031 struct ath_node *an = ATH_NODE(ni);
3032
3033 ieee80211_rssadapt_updatestats(&an->an_rssadapt);
3034 }
3035
3036 void
ath_rssadapt_updatestats(void * arg)3037 ath_rssadapt_updatestats(void *arg)
3038 {
3039 struct ath_softc *sc = (struct ath_softc *)arg;
3040 struct ieee80211com *ic = &sc->sc_ic;
3041
3042 if (ic->ic_opmode == IEEE80211_M_STA) {
3043 ath_rssadapt_updatenode(arg, ic->ic_bss);
3044 } else {
3045 ieee80211_iterate_nodes(ic, ath_rssadapt_updatenode, arg);
3046 }
3047
3048 timeout_add_msec(&sc->sc_rssadapt_to, 100);
3049 }
3050
3051 #ifdef AR_DEBUG
3052 void
ath_printrxbuf(struct ath_buf * bf,int done)3053 ath_printrxbuf(struct ath_buf *bf, int done)
3054 {
3055 struct ath_desc *ds;
3056 int i;
3057
3058 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
3059 printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n",
3060 i, ds, (struct ath_desc *)bf->bf_daddr + i,
3061 ds->ds_link, ds->ds_data,
3062 ds->ds_ctl0, ds->ds_ctl1,
3063 ds->ds_hw[0], ds->ds_hw[1],
3064 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!');
3065 }
3066 }
3067
3068 void
ath_printtxbuf(struct ath_buf * bf,int done)3069 ath_printtxbuf(struct ath_buf *bf, int done)
3070 {
3071 struct ath_desc *ds;
3072 int i;
3073
3074 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
3075 printf("T%d (%p %p) "
3076 "%08x %08x %08x %08x %08x %08x %08x %08x %c\n",
3077 i, ds, (struct ath_desc *)bf->bf_daddr + i,
3078 ds->ds_link, ds->ds_data,
3079 ds->ds_ctl0, ds->ds_ctl1,
3080 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
3081 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!');
3082 }
3083 }
3084 #endif /* AR_DEBUG */
3085
3086 int
ath_gpio_attach(struct ath_softc * sc,u_int16_t devid)3087 ath_gpio_attach(struct ath_softc *sc, u_int16_t devid)
3088 {
3089 struct ath_hal *ah = sc->sc_ah;
3090 struct gpiobus_attach_args gba;
3091 int i;
3092
3093 if (ah->ah_gpio_npins < 1)
3094 return 0;
3095
3096 /* Initialize gpio pins array */
3097 for (i = 0; i < ah->ah_gpio_npins && i < AR5K_MAX_GPIO; i++) {
3098 sc->sc_gpio_pins[i].pin_num = i;
3099 sc->sc_gpio_pins[i].pin_caps = GPIO_PIN_INPUT |
3100 GPIO_PIN_OUTPUT;
3101
3102 /* Set pin mode to input */
3103 ath_hal_set_gpio_input(ah, i);
3104 sc->sc_gpio_pins[i].pin_flags = GPIO_PIN_INPUT;
3105
3106 /* Get pin input */
3107 sc->sc_gpio_pins[i].pin_state = ath_hal_get_gpio(ah, i) ?
3108 GPIO_PIN_HIGH : GPIO_PIN_LOW;
3109 }
3110
3111 /* Enable GPIO-controlled software LED if available */
3112 if ((ah->ah_version == AR5K_AR5211) ||
3113 (devid == PCI_PRODUCT_ATHEROS_AR5212_IBM)) {
3114 sc->sc_softled = 1;
3115 ath_hal_set_gpio_output(ah, AR5K_SOFTLED_PIN);
3116 ath_hal_set_gpio(ah, AR5K_SOFTLED_PIN, AR5K_SOFTLED_OFF);
3117 }
3118
3119 /* Create gpio controller tag */
3120 sc->sc_gpio_gc.gp_cookie = sc;
3121 sc->sc_gpio_gc.gp_pin_read = ath_gpio_pin_read;
3122 sc->sc_gpio_gc.gp_pin_write = ath_gpio_pin_write;
3123 sc->sc_gpio_gc.gp_pin_ctl = ath_gpio_pin_ctl;
3124
3125 gba.gba_name = "gpio";
3126 gba.gba_gc = &sc->sc_gpio_gc;
3127 gba.gba_pins = sc->sc_gpio_pins;
3128 gba.gba_npins = ah->ah_gpio_npins;
3129
3130 #ifdef notyet
3131 #if NGPIO > 0
3132 if (config_found(&sc->sc_dev, &gba, gpiobus_print) == NULL)
3133 return (ENODEV);
3134 #endif
3135 #endif
3136
3137 return (0);
3138 }
3139
3140 int
ath_gpio_pin_read(void * arg,int pin)3141 ath_gpio_pin_read(void *arg, int pin)
3142 {
3143 struct ath_softc *sc = arg;
3144 struct ath_hal *ah = sc->sc_ah;
3145 return (ath_hal_get_gpio(ah, pin) ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
3146 }
3147
3148 void
ath_gpio_pin_write(void * arg,int pin,int value)3149 ath_gpio_pin_write(void *arg, int pin, int value)
3150 {
3151 struct ath_softc *sc = arg;
3152 struct ath_hal *ah = sc->sc_ah;
3153 ath_hal_set_gpio(ah, pin, value ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
3154 }
3155
3156 void
ath_gpio_pin_ctl(void * arg,int pin,int flags)3157 ath_gpio_pin_ctl(void *arg, int pin, int flags)
3158 {
3159 struct ath_softc *sc = arg;
3160 struct ath_hal *ah = sc->sc_ah;
3161
3162 if (flags & GPIO_PIN_INPUT) {
3163 ath_hal_set_gpio_input(ah, pin);
3164 } else if (flags & GPIO_PIN_OUTPUT) {
3165 ath_hal_set_gpio_output(ah, pin);
3166 }
3167 }
3168