1 /* $OpenBSD: if_ral.c,v 1.150 2024/05/23 03:21:08 jsg Exp $ */
2
3 /*-
4 * Copyright (c) 2005, 2006
5 * Damien Bergamini <damien.bergamini@free.fr>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*-
21 * Ralink Technology RT2500USB chipset driver
22 * http://www.ralinktech.com.tw/
23 */
24
25 #include "bpfilter.h"
26
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/systm.h>
31 #include <sys/timeout.h>
32 #include <sys/device.h>
33 #include <sys/endian.h>
34
35 #include <machine/intr.h>
36
37 #if NBPFILTER > 0
38 #include <net/bpf.h>
39 #endif
40 #include <net/if.h>
41 #include <net/if_dl.h>
42 #include <net/if_media.h>
43
44 #include <netinet/in.h>
45 #include <netinet/if_ether.h>
46
47 #include <net80211/ieee80211_var.h>
48 #include <net80211/ieee80211_amrr.h>
49 #include <net80211/ieee80211_radiotap.h>
50
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdevs.h>
54
55 #include <dev/usb/if_ralreg.h>
56 #include <dev/usb/if_ralvar.h>
57
58 #ifdef URAL_DEBUG
59 #define DPRINTF(x) do { if (ural_debug) printf x; } while (0)
60 #define DPRINTFN(n, x) do { if (ural_debug >= (n)) printf x; } while (0)
61 int ural_debug = 0;
62 #else
63 #define DPRINTF(x)
64 #define DPRINTFN(n, x)
65 #endif
66
67 /* various supported device vendors/products */
68 static const struct usb_devno ural_devs[] = {
69 { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2570 },
70 { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2570_2 },
71 { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050 },
72 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54G },
73 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GP },
74 { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_HU200TS },
75 { USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_C54RU },
76 { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_RT2570 },
77 { USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWBKG },
78 { USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254 },
79 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54 },
80 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54AI },
81 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54YB },
82 { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_NINWIFI },
83 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570 },
84 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_2 },
85 { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_3 },
86 { USB_VENDOR_NOVATECH, USB_PRODUCT_NOVATECH_NV902W },
87 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570 },
88 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_2 },
89 { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_3 },
90 { USB_VENDOR_SPHAIRON, USB_PRODUCT_SPHAIRON_UB801R },
91 { USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_RT2570 },
92 { USB_VENDOR_VTECH, USB_PRODUCT_VTECH_RT2570 },
93 { USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT2570 }
94 };
95
96 int ural_alloc_tx_list(struct ural_softc *);
97 void ural_free_tx_list(struct ural_softc *);
98 int ural_alloc_rx_list(struct ural_softc *);
99 void ural_free_rx_list(struct ural_softc *);
100 int ural_media_change(struct ifnet *);
101 void ural_next_scan(void *);
102 void ural_task(void *);
103 int ural_newstate(struct ieee80211com *, enum ieee80211_state,
104 int);
105 void ural_txeof(struct usbd_xfer *, void *, usbd_status);
106 void ural_rxeof(struct usbd_xfer *, void *, usbd_status);
107 #if NBPFILTER > 0
108 uint8_t ural_rxrate(const struct ural_rx_desc *);
109 #endif
110 int ural_ack_rate(struct ieee80211com *, int);
111 uint16_t ural_txtime(int, int, uint32_t);
112 uint8_t ural_plcp_signal(int);
113 void ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *,
114 uint32_t, int, int);
115 #ifndef IEEE80211_STA_ONLY
116 int ural_tx_bcn(struct ural_softc *, struct mbuf *,
117 struct ieee80211_node *);
118 #endif
119 int ural_tx_data(struct ural_softc *, struct mbuf *,
120 struct ieee80211_node *);
121 void ural_start(struct ifnet *);
122 void ural_watchdog(struct ifnet *);
123 int ural_ioctl(struct ifnet *, u_long, caddr_t);
124 void ural_eeprom_read(struct ural_softc *, uint16_t, void *, int);
125 uint16_t ural_read(struct ural_softc *, uint16_t);
126 void ural_read_multi(struct ural_softc *, uint16_t, void *, int);
127 void ural_write(struct ural_softc *, uint16_t, uint16_t);
128 void ural_write_multi(struct ural_softc *, uint16_t, void *, int);
129 void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t);
130 uint8_t ural_bbp_read(struct ural_softc *, uint8_t);
131 void ural_rf_write(struct ural_softc *, uint8_t, uint32_t);
132 void ural_set_chan(struct ural_softc *, struct ieee80211_channel *);
133 void ural_disable_rf_tune(struct ural_softc *);
134 void ural_enable_tsf_sync(struct ural_softc *);
135 void ural_update_slot(struct ural_softc *);
136 void ural_set_txpreamble(struct ural_softc *);
137 void ural_set_basicrates(struct ural_softc *);
138 void ural_set_bssid(struct ural_softc *, const uint8_t *);
139 void ural_set_macaddr(struct ural_softc *, const uint8_t *);
140 void ural_update_promisc(struct ural_softc *);
141 const char *ural_get_rf(int);
142 void ural_read_eeprom(struct ural_softc *);
143 int ural_bbp_init(struct ural_softc *);
144 void ural_set_txantenna(struct ural_softc *, int);
145 void ural_set_rxantenna(struct ural_softc *, int);
146 int ural_init(struct ifnet *);
147 void ural_stop(struct ifnet *, int);
148 void ural_newassoc(struct ieee80211com *, struct ieee80211_node *,
149 int);
150 void ural_amrr_start(struct ural_softc *, struct ieee80211_node *);
151 void ural_amrr_timeout(void *);
152 void ural_amrr_update(struct usbd_xfer *, void *,
153 usbd_status status);
154
155 static const struct {
156 uint16_t reg;
157 uint16_t val;
158 } ural_def_mac[] = {
159 RAL_DEF_MAC
160 };
161
162 static const struct {
163 uint8_t reg;
164 uint8_t val;
165 } ural_def_bbp[] = {
166 RAL_DEF_BBP
167 };
168
169 static const uint32_t ural_rf2522_r2[] = RAL_RF2522_R2;
170 static const uint32_t ural_rf2523_r2[] = RAL_RF2523_R2;
171 static const uint32_t ural_rf2524_r2[] = RAL_RF2524_R2;
172 static const uint32_t ural_rf2525_r2[] = RAL_RF2525_R2;
173 static const uint32_t ural_rf2525_hi_r2[] = RAL_RF2525_HI_R2;
174 static const uint32_t ural_rf2525e_r2[] = RAL_RF2525E_R2;
175 static const uint32_t ural_rf2526_hi_r2[] = RAL_RF2526_HI_R2;
176 static const uint32_t ural_rf2526_r2[] = RAL_RF2526_R2;
177
178 int ural_match(struct device *, void *, void *);
179 void ural_attach(struct device *, struct device *, void *);
180 int ural_detach(struct device *, int);
181
182 struct cfdriver ural_cd = {
183 NULL, "ural", DV_IFNET
184 };
185
186 const struct cfattach ural_ca = {
187 sizeof(struct ural_softc), ural_match, ural_attach, ural_detach
188 };
189
190 int
ural_match(struct device * parent,void * match,void * aux)191 ural_match(struct device *parent, void *match, void *aux)
192 {
193 struct usb_attach_arg *uaa = aux;
194
195 if (uaa->configno != RAL_CONFIG_NO || uaa->ifaceno != RAL_IFACE_NO)
196 return UMATCH_NONE;
197
198 return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ?
199 UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
200 }
201
202 void
ural_attach(struct device * parent,struct device * self,void * aux)203 ural_attach(struct device *parent, struct device *self, void *aux)
204 {
205 struct ural_softc *sc = (struct ural_softc *)self;
206 struct usb_attach_arg *uaa = aux;
207 struct ieee80211com *ic = &sc->sc_ic;
208 struct ifnet *ifp = &ic->ic_if;
209 usb_interface_descriptor_t *id;
210 usb_endpoint_descriptor_t *ed;
211 int i;
212
213 sc->sc_udev = uaa->device;
214 sc->sc_iface = uaa->iface;
215
216 /*
217 * Find endpoints.
218 */
219 id = usbd_get_interface_descriptor(sc->sc_iface);
220
221 sc->sc_rx_no = sc->sc_tx_no = -1;
222 for (i = 0; i < id->bNumEndpoints; i++) {
223 ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i);
224 if (ed == NULL) {
225 printf("%s: no endpoint descriptor for iface %d\n",
226 sc->sc_dev.dv_xname, i);
227 return;
228 }
229
230 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
231 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
232 sc->sc_rx_no = ed->bEndpointAddress;
233 else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
234 UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
235 sc->sc_tx_no = ed->bEndpointAddress;
236 }
237 if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) {
238 printf("%s: missing endpoint\n", sc->sc_dev.dv_xname);
239 return;
240 }
241
242 usb_init_task(&sc->sc_task, ural_task, sc, USB_TASK_TYPE_GENERIC);
243 timeout_set(&sc->scan_to, ural_next_scan, sc);
244
245 sc->amrr.amrr_min_success_threshold = 1;
246 sc->amrr.amrr_max_success_threshold = 10;
247 timeout_set(&sc->amrr_to, ural_amrr_timeout, sc);
248
249 /* retrieve RT2570 rev. no */
250 sc->asic_rev = ural_read(sc, RAL_MAC_CSR0);
251
252 /* retrieve MAC address and various other things from EEPROM */
253 ural_read_eeprom(sc);
254
255 printf("%s: MAC/BBP RT%04x (rev 0x%02x), RF %s, address %s\n",
256 sc->sc_dev.dv_xname, sc->macbbp_rev, sc->asic_rev,
257 ural_get_rf(sc->rf_rev), ether_sprintf(ic->ic_myaddr));
258
259 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
260 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
261 ic->ic_state = IEEE80211_S_INIT;
262
263 /* set device capabilities */
264 ic->ic_caps =
265 IEEE80211_C_MONITOR | /* monitor mode supported */
266 #ifndef IEEE80211_STA_ONLY
267 IEEE80211_C_IBSS | /* IBSS mode supported */
268 IEEE80211_C_HOSTAP | /* HostAp mode supported */
269 #endif
270 IEEE80211_C_TXPMGT | /* tx power management */
271 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
272 IEEE80211_C_SHSLOT | /* short slot time supported */
273 IEEE80211_C_WEP | /* s/w WEP */
274 IEEE80211_C_RSN; /* WPA/RSN */
275
276 /* set supported .11b and .11g rates */
277 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
278 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
279
280 /* set supported .11b and .11g channels (1 through 14) */
281 for (i = 1; i <= 14; i++) {
282 ic->ic_channels[i].ic_freq =
283 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
284 ic->ic_channels[i].ic_flags =
285 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
286 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
287 }
288
289 ifp->if_softc = sc;
290 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
291 ifp->if_ioctl = ural_ioctl;
292 ifp->if_start = ural_start;
293 ifp->if_watchdog = ural_watchdog;
294 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
295
296 if_attach(ifp);
297 ieee80211_ifattach(ifp);
298 ic->ic_newassoc = ural_newassoc;
299
300 /* override state transition machine */
301 sc->sc_newstate = ic->ic_newstate;
302 ic->ic_newstate = ural_newstate;
303 ieee80211_media_init(ifp, ural_media_change, ieee80211_media_status);
304
305 #if NBPFILTER > 0
306 bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
307 sizeof (struct ieee80211_frame) + 64);
308
309 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
310 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
311 sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT);
312
313 sc->sc_txtap_len = sizeof sc->sc_txtapu;
314 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
315 sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT);
316 #endif
317 }
318
319 int
ural_detach(struct device * self,int flags)320 ural_detach(struct device *self, int flags)
321 {
322 struct ural_softc *sc = (struct ural_softc *)self;
323 struct ifnet *ifp = &sc->sc_ic.ic_if;
324 int s;
325
326 s = splusb();
327
328 if (timeout_initialized(&sc->scan_to))
329 timeout_del(&sc->scan_to);
330 if (timeout_initialized(&sc->amrr_to))
331 timeout_del(&sc->amrr_to);
332
333 usb_rem_wait_task(sc->sc_udev, &sc->sc_task);
334
335 usbd_ref_wait(sc->sc_udev);
336
337 if (ifp->if_softc != NULL) {
338 ieee80211_ifdetach(ifp); /* free all nodes */
339 if_detach(ifp);
340 }
341
342 if (sc->amrr_xfer != NULL) {
343 usbd_free_xfer(sc->amrr_xfer);
344 sc->amrr_xfer = NULL;
345 }
346
347 if (sc->sc_rx_pipeh != NULL)
348 usbd_close_pipe(sc->sc_rx_pipeh);
349
350 if (sc->sc_tx_pipeh != NULL)
351 usbd_close_pipe(sc->sc_tx_pipeh);
352
353 ural_free_rx_list(sc);
354 ural_free_tx_list(sc);
355
356 splx(s);
357
358 return 0;
359 }
360
361 int
ural_alloc_tx_list(struct ural_softc * sc)362 ural_alloc_tx_list(struct ural_softc *sc)
363 {
364 int i, error;
365
366 sc->tx_cur = sc->tx_queued = 0;
367
368 for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
369 struct ural_tx_data *data = &sc->tx_data[i];
370
371 data->sc = sc;
372
373 data->xfer = usbd_alloc_xfer(sc->sc_udev);
374 if (data->xfer == NULL) {
375 printf("%s: could not allocate tx xfer\n",
376 sc->sc_dev.dv_xname);
377 error = ENOMEM;
378 goto fail;
379 }
380 data->buf = usbd_alloc_buffer(data->xfer,
381 RAL_TX_DESC_SIZE + IEEE80211_MAX_LEN);
382 if (data->buf == NULL) {
383 printf("%s: could not allocate tx buffer\n",
384 sc->sc_dev.dv_xname);
385 error = ENOMEM;
386 goto fail;
387 }
388 }
389
390 return 0;
391
392 fail: ural_free_tx_list(sc);
393 return error;
394 }
395
396 void
ural_free_tx_list(struct ural_softc * sc)397 ural_free_tx_list(struct ural_softc *sc)
398 {
399 int i;
400
401 for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
402 struct ural_tx_data *data = &sc->tx_data[i];
403
404 if (data->xfer != NULL) {
405 usbd_free_xfer(data->xfer);
406 data->xfer = NULL;
407 }
408 /*
409 * The node has already been freed at that point so don't call
410 * ieee80211_release_node() here.
411 */
412 data->ni = NULL;
413 }
414 }
415
416 int
ural_alloc_rx_list(struct ural_softc * sc)417 ural_alloc_rx_list(struct ural_softc *sc)
418 {
419 int i, error;
420
421 for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
422 struct ural_rx_data *data = &sc->rx_data[i];
423
424 data->sc = sc;
425
426 data->xfer = usbd_alloc_xfer(sc->sc_udev);
427 if (data->xfer == NULL) {
428 printf("%s: could not allocate rx xfer\n",
429 sc->sc_dev.dv_xname);
430 error = ENOMEM;
431 goto fail;
432 }
433 if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) {
434 printf("%s: could not allocate rx buffer\n",
435 sc->sc_dev.dv_xname);
436 error = ENOMEM;
437 goto fail;
438 }
439
440 MGETHDR(data->m, M_DONTWAIT, MT_DATA);
441 if (data->m == NULL) {
442 printf("%s: could not allocate rx mbuf\n",
443 sc->sc_dev.dv_xname);
444 error = ENOMEM;
445 goto fail;
446 }
447 MCLGET(data->m, M_DONTWAIT);
448 if (!(data->m->m_flags & M_EXT)) {
449 printf("%s: could not allocate rx mbuf cluster\n",
450 sc->sc_dev.dv_xname);
451 error = ENOMEM;
452 goto fail;
453 }
454 data->buf = mtod(data->m, uint8_t *);
455 }
456
457 return 0;
458
459 fail: ural_free_rx_list(sc);
460 return error;
461 }
462
463 void
ural_free_rx_list(struct ural_softc * sc)464 ural_free_rx_list(struct ural_softc *sc)
465 {
466 int i;
467
468 for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
469 struct ural_rx_data *data = &sc->rx_data[i];
470
471 if (data->xfer != NULL) {
472 usbd_free_xfer(data->xfer);
473 data->xfer = NULL;
474 }
475 if (data->m != NULL) {
476 m_freem(data->m);
477 data->m = NULL;
478 }
479 }
480 }
481
482 int
ural_media_change(struct ifnet * ifp)483 ural_media_change(struct ifnet *ifp)
484 {
485 int error;
486
487 error = ieee80211_media_change(ifp);
488 if (error != ENETRESET)
489 return error;
490
491 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING))
492 error = ural_init(ifp);
493
494 return error;
495 }
496
497 /*
498 * This function is called periodically (every 200ms) during scanning to
499 * switch from one channel to another.
500 */
501 void
ural_next_scan(void * arg)502 ural_next_scan(void *arg)
503 {
504 struct ural_softc *sc = arg;
505 struct ieee80211com *ic = &sc->sc_ic;
506 struct ifnet *ifp = &ic->ic_if;
507
508 if (usbd_is_dying(sc->sc_udev))
509 return;
510
511 usbd_ref_incr(sc->sc_udev);
512
513 if (ic->ic_state == IEEE80211_S_SCAN)
514 ieee80211_next_scan(ifp);
515
516 usbd_ref_decr(sc->sc_udev);
517 }
518
519 void
ural_task(void * arg)520 ural_task(void *arg)
521 {
522 struct ural_softc *sc = arg;
523 struct ieee80211com *ic = &sc->sc_ic;
524 enum ieee80211_state ostate;
525 struct ieee80211_node *ni;
526
527 if (usbd_is_dying(sc->sc_udev))
528 return;
529
530 ostate = ic->ic_state;
531
532 switch (sc->sc_state) {
533 case IEEE80211_S_INIT:
534 if (ostate == IEEE80211_S_RUN) {
535 /* abort TSF synchronization */
536 ural_write(sc, RAL_TXRX_CSR19, 0);
537
538 /* force tx led to stop blinking */
539 ural_write(sc, RAL_MAC_CSR20, 0);
540 }
541 break;
542
543 case IEEE80211_S_SCAN:
544 ural_set_chan(sc, ic->ic_bss->ni_chan);
545 if (!usbd_is_dying(sc->sc_udev))
546 timeout_add_msec(&sc->scan_to, 200);
547 break;
548
549 case IEEE80211_S_AUTH:
550 ural_set_chan(sc, ic->ic_bss->ni_chan);
551 break;
552
553 case IEEE80211_S_ASSOC:
554 ural_set_chan(sc, ic->ic_bss->ni_chan);
555 break;
556
557 case IEEE80211_S_RUN:
558 ural_set_chan(sc, ic->ic_bss->ni_chan);
559
560 ni = ic->ic_bss;
561
562 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
563 ural_update_slot(sc);
564 ural_set_txpreamble(sc);
565 ural_set_basicrates(sc);
566 ural_set_bssid(sc, ni->ni_bssid);
567 }
568
569 #ifndef IEEE80211_STA_ONLY
570 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
571 ic->ic_opmode == IEEE80211_M_IBSS) {
572 struct mbuf *m = ieee80211_beacon_alloc(ic, ni);
573 if (m == NULL) {
574 printf("%s: could not allocate beacon\n",
575 sc->sc_dev.dv_xname);
576 return;
577 }
578
579 if (ural_tx_bcn(sc, m, ni) != 0) {
580 m_freem(m);
581 printf("%s: could not transmit beacon\n",
582 sc->sc_dev.dv_xname);
583 return;
584 }
585
586 /* beacon is no longer needed */
587 m_freem(m);
588 }
589 #endif
590
591 /* make tx led blink on tx (controlled by ASIC) */
592 ural_write(sc, RAL_MAC_CSR20, 1);
593
594 if (ic->ic_opmode != IEEE80211_M_MONITOR)
595 ural_enable_tsf_sync(sc);
596
597 if (ic->ic_opmode == IEEE80211_M_STA) {
598 /* fake a join to init the tx rate */
599 ural_newassoc(ic, ic->ic_bss, 1);
600
601 /* enable automatic rate control in STA mode */
602 if (ic->ic_fixed_rate == -1)
603 ural_amrr_start(sc, ic->ic_bss);
604 }
605
606 break;
607 }
608
609 sc->sc_newstate(ic, sc->sc_state, sc->sc_arg);
610 }
611
612 int
ural_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)613 ural_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
614 {
615 struct ural_softc *sc = ic->ic_if.if_softc;
616
617 usb_rem_task(sc->sc_udev, &sc->sc_task);
618 timeout_del(&sc->scan_to);
619 timeout_del(&sc->amrr_to);
620
621 /* do it in a process context */
622 sc->sc_state = nstate;
623 sc->sc_arg = arg;
624 usb_add_task(sc->sc_udev, &sc->sc_task);
625 return 0;
626 }
627
628 /* quickly determine if a given rate is CCK or OFDM */
629 #define RAL_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
630
631 #define RAL_ACK_SIZE 14 /* 10 + 4(FCS) */
632 #define RAL_CTS_SIZE 14 /* 10 + 4(FCS) */
633
634 #define RAL_SIFS 10 /* us */
635
636 #define RAL_RXTX_TURNAROUND 5 /* us */
637
638 void
ural_txeof(struct usbd_xfer * xfer,void * priv,usbd_status status)639 ural_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
640 {
641 struct ural_tx_data *data = priv;
642 struct ural_softc *sc = data->sc;
643 struct ieee80211com *ic = &sc->sc_ic;
644 struct ifnet *ifp = &ic->ic_if;
645 int s;
646
647 if (status != USBD_NORMAL_COMPLETION) {
648 if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
649 return;
650
651 printf("%s: could not transmit buffer: %s\n",
652 sc->sc_dev.dv_xname, usbd_errstr(status));
653
654 if (status == USBD_STALLED)
655 usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh);
656
657 ifp->if_oerrors++;
658 return;
659 }
660
661 s = splnet();
662
663 ieee80211_release_node(ic, data->ni);
664 data->ni = NULL;
665
666 sc->tx_queued--;
667
668 DPRINTFN(10, ("tx done\n"));
669
670 sc->sc_tx_timer = 0;
671 ifq_clr_oactive(&ifp->if_snd);
672 ural_start(ifp);
673
674 splx(s);
675 }
676
677 void
ural_rxeof(struct usbd_xfer * xfer,void * priv,usbd_status status)678 ural_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
679 {
680 struct ural_rx_data *data = priv;
681 struct ural_softc *sc = data->sc;
682 struct ieee80211com *ic = &sc->sc_ic;
683 struct ifnet *ifp = &ic->ic_if;
684 const struct ural_rx_desc *desc;
685 struct ieee80211_frame *wh;
686 struct ieee80211_rxinfo rxi;
687 struct ieee80211_node *ni;
688 struct mbuf *mnew, *m;
689 int s, len;
690
691 if (status != USBD_NORMAL_COMPLETION) {
692 if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
693 return;
694
695 if (status == USBD_STALLED)
696 usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh);
697 goto skip;
698 }
699
700 usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL);
701
702 if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) {
703 DPRINTF(("%s: xfer too short %d\n", sc->sc_dev.dv_xname,
704 len));
705 ifp->if_ierrors++;
706 goto skip;
707 }
708
709 /* rx descriptor is located at the end */
710 desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE);
711
712 if (letoh32(desc->flags) & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) {
713 /*
714 * This should not happen since we did not request to receive
715 * those frames when we filled RAL_TXRX_CSR2.
716 */
717 DPRINTFN(5, ("PHY or CRC error\n"));
718 ifp->if_ierrors++;
719 goto skip;
720 }
721
722 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
723 if (mnew == NULL) {
724 printf("%s: could not allocate rx mbuf\n",
725 sc->sc_dev.dv_xname);
726 ifp->if_ierrors++;
727 goto skip;
728 }
729 MCLGET(mnew, M_DONTWAIT);
730 if (!(mnew->m_flags & M_EXT)) {
731 printf("%s: could not allocate rx mbuf cluster\n",
732 sc->sc_dev.dv_xname);
733 m_freem(mnew);
734 ifp->if_ierrors++;
735 goto skip;
736 }
737 m = data->m;
738 data->m = mnew;
739 data->buf = mtod(data->m, uint8_t *);
740
741 /* finalize mbuf */
742 m->m_pkthdr.len = m->m_len = (letoh32(desc->flags) >> 16) & 0xfff;
743
744 s = splnet();
745
746 #if NBPFILTER > 0
747 if (sc->sc_drvbpf != NULL) {
748 struct mbuf mb;
749 struct ural_rx_radiotap_header *tap = &sc->sc_rxtap;
750
751 tap->wr_flags = IEEE80211_RADIOTAP_F_FCS;
752 tap->wr_rate = ural_rxrate(desc);
753 tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
754 tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
755 tap->wr_antenna = sc->rx_ant;
756 tap->wr_antsignal = desc->rssi;
757
758 mb.m_data = (caddr_t)tap;
759 mb.m_len = sc->sc_rxtap_len;
760 mb.m_next = m;
761 mb.m_nextpkt = NULL;
762 mb.m_type = 0;
763 mb.m_flags = 0;
764 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
765 }
766 #endif
767 m_adj(m, -IEEE80211_CRC_LEN); /* trim FCS */
768
769 wh = mtod(m, struct ieee80211_frame *);
770 ni = ieee80211_find_rxnode(ic, wh);
771
772 /* send the frame to the 802.11 layer */
773 memset(&rxi, 0, sizeof(rxi));
774 rxi.rxi_rssi = desc->rssi;
775 ieee80211_input(ifp, m, ni, &rxi);
776
777 /* node is no longer needed */
778 ieee80211_release_node(ic, ni);
779
780 splx(s);
781
782 DPRINTFN(15, ("rx done\n"));
783
784 skip: /* setup a new transfer */
785 usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES,
786 USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
787 (void)usbd_transfer(xfer);
788 }
789
790 /*
791 * This function is only used by the Rx radiotap code. It returns the rate at
792 * which a given frame was received.
793 */
794 #if NBPFILTER > 0
795 uint8_t
ural_rxrate(const struct ural_rx_desc * desc)796 ural_rxrate(const struct ural_rx_desc *desc)
797 {
798 if (letoh32(desc->flags) & RAL_RX_OFDM) {
799 /* reverse function of ural_plcp_signal */
800 switch (desc->rate) {
801 case 0xb: return 12;
802 case 0xf: return 18;
803 case 0xa: return 24;
804 case 0xe: return 36;
805 case 0x9: return 48;
806 case 0xd: return 72;
807 case 0x8: return 96;
808 case 0xc: return 108;
809 }
810 } else {
811 if (desc->rate == 10)
812 return 2;
813 if (desc->rate == 20)
814 return 4;
815 if (desc->rate == 55)
816 return 11;
817 if (desc->rate == 110)
818 return 22;
819 }
820 return 2; /* should not get there */
821 }
822 #endif
823
824 /*
825 * Return the expected ack rate for a frame transmitted at rate `rate'.
826 */
827 int
ural_ack_rate(struct ieee80211com * ic,int rate)828 ural_ack_rate(struct ieee80211com *ic, int rate)
829 {
830 switch (rate) {
831 /* CCK rates */
832 case 2:
833 return 2;
834 case 4:
835 case 11:
836 case 22:
837 return (ic->ic_curmode == IEEE80211_MODE_11B) ? 4 : rate;
838
839 /* OFDM rates */
840 case 12:
841 case 18:
842 return 12;
843 case 24:
844 case 36:
845 return 24;
846 case 48:
847 case 72:
848 case 96:
849 case 108:
850 return 48;
851 }
852
853 /* default to 1Mbps */
854 return 2;
855 }
856
857 /*
858 * Compute the duration (in us) needed to transmit `len' bytes at rate `rate'.
859 * The function automatically determines the operating mode depending on the
860 * given rate. `flags' indicates whether short preamble is in use or not.
861 */
862 uint16_t
ural_txtime(int len,int rate,uint32_t flags)863 ural_txtime(int len, int rate, uint32_t flags)
864 {
865 uint16_t txtime;
866
867 if (RAL_RATE_IS_OFDM(rate)) {
868 /* IEEE Std 802.11g-2003, pp. 44 */
869 txtime = (8 + 4 * len + 3 + rate - 1) / rate;
870 txtime = 16 + 4 + 4 * txtime + 6;
871 } else {
872 /* IEEE Std 802.11b-1999, pp. 28 */
873 txtime = (16 * len + rate - 1) / rate;
874 if (rate != 2 && (flags & IEEE80211_F_SHPREAMBLE))
875 txtime += 72 + 24;
876 else
877 txtime += 144 + 48;
878 }
879 return txtime;
880 }
881
882 uint8_t
ural_plcp_signal(int rate)883 ural_plcp_signal(int rate)
884 {
885 switch (rate) {
886 /* CCK rates (returned values are device-dependent) */
887 case 2: return 0x0;
888 case 4: return 0x1;
889 case 11: return 0x2;
890 case 22: return 0x3;
891
892 /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
893 case 12: return 0xb;
894 case 18: return 0xf;
895 case 24: return 0xa;
896 case 36: return 0xe;
897 case 48: return 0x9;
898 case 72: return 0xd;
899 case 96: return 0x8;
900 case 108: return 0xc;
901
902 /* unsupported rates (should not get there) */
903 default: return 0xff;
904 }
905 }
906
907 void
ural_setup_tx_desc(struct ural_softc * sc,struct ural_tx_desc * desc,uint32_t flags,int len,int rate)908 ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc,
909 uint32_t flags, int len, int rate)
910 {
911 struct ieee80211com *ic = &sc->sc_ic;
912 uint16_t plcp_length;
913 int remainder;
914
915 desc->flags = htole32(flags);
916 desc->flags |= htole32(len << 16);
917
918 desc->wme = htole16(
919 RAL_AIFSN(2) |
920 RAL_LOGCWMIN(3) |
921 RAL_LOGCWMAX(5));
922
923 /* setup PLCP fields */
924 desc->plcp_signal = ural_plcp_signal(rate);
925 desc->plcp_service = 4;
926
927 len += IEEE80211_CRC_LEN;
928 if (RAL_RATE_IS_OFDM(rate)) {
929 desc->flags |= htole32(RAL_TX_OFDM);
930
931 plcp_length = len & 0xfff;
932 desc->plcp_length_hi = plcp_length >> 6;
933 desc->plcp_length_lo = plcp_length & 0x3f;
934 } else {
935 plcp_length = (16 * len + rate - 1) / rate;
936 if (rate == 22) {
937 remainder = (16 * len) % 22;
938 if (remainder != 0 && remainder < 7)
939 desc->plcp_service |= RAL_PLCP_LENGEXT;
940 }
941 desc->plcp_length_hi = plcp_length >> 8;
942 desc->plcp_length_lo = plcp_length & 0xff;
943
944 if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
945 desc->plcp_signal |= 0x08;
946 }
947
948 desc->iv = 0;
949 desc->eiv = 0;
950 }
951
952 #define RAL_TX_TIMEOUT 5000
953
954 #ifndef IEEE80211_STA_ONLY
955 int
ural_tx_bcn(struct ural_softc * sc,struct mbuf * m0,struct ieee80211_node * ni)956 ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
957 {
958 struct ural_tx_desc *desc;
959 struct usbd_xfer *xfer;
960 usbd_status error;
961 uint8_t cmd = 0;
962 uint8_t *buf;
963 int xferlen, rate = 2;
964
965 xfer = usbd_alloc_xfer(sc->sc_udev);
966 if (xfer == NULL)
967 return ENOMEM;
968
969 /* xfer length needs to be a multiple of two! */
970 xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
971
972 buf = usbd_alloc_buffer(xfer, xferlen);
973 if (buf == NULL) {
974 usbd_free_xfer(xfer);
975 return ENOMEM;
976 }
977
978 usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd,
979 USBD_FORCE_SHORT_XFER | USBD_SYNCHRONOUS, RAL_TX_TIMEOUT, NULL);
980
981 error = usbd_transfer(xfer);
982 if (error != 0) {
983 usbd_free_xfer(xfer);
984 return error;
985 }
986
987 desc = (struct ural_tx_desc *)buf;
988
989 m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE);
990 ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP,
991 m0->m_pkthdr.len, rate);
992
993 DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n",
994 m0->m_pkthdr.len, rate, xferlen));
995
996 usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen,
997 USBD_FORCE_SHORT_XFER | USBD_NO_COPY | USBD_SYNCHRONOUS,
998 RAL_TX_TIMEOUT, NULL);
999
1000 error = usbd_transfer(xfer);
1001 usbd_free_xfer(xfer);
1002
1003 return error;
1004 }
1005 #endif
1006
1007 int
ural_tx_data(struct ural_softc * sc,struct mbuf * m0,struct ieee80211_node * ni)1008 ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
1009 {
1010 struct ieee80211com *ic = &sc->sc_ic;
1011 struct ural_tx_desc *desc;
1012 struct ural_tx_data *data;
1013 struct ieee80211_frame *wh;
1014 struct ieee80211_key *k;
1015 uint32_t flags = RAL_TX_NEWSEQ;
1016 uint16_t dur;
1017 usbd_status error;
1018 int rate, xferlen, pktlen, needrts = 0, needcts = 0;
1019
1020 wh = mtod(m0, struct ieee80211_frame *);
1021
1022 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
1023 k = ieee80211_get_txkey(ic, wh, ni);
1024
1025 if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL)
1026 return ENOBUFS;
1027
1028 /* packet header may have moved, reset our local pointer */
1029 wh = mtod(m0, struct ieee80211_frame *);
1030 }
1031
1032 /* compute actual packet length (including CRC and crypto overhead) */
1033 pktlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN;
1034
1035 /* pickup a rate */
1036 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
1037 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1038 IEEE80211_FC0_TYPE_MGT)) {
1039 /* mgmt/multicast frames are sent at the lowest avail. rate */
1040 rate = ni->ni_rates.rs_rates[0];
1041 } else if (ic->ic_fixed_rate != -1) {
1042 rate = ic->ic_sup_rates[ic->ic_curmode].
1043 rs_rates[ic->ic_fixed_rate];
1044 } else
1045 rate = ni->ni_rates.rs_rates[ni->ni_txrate];
1046 if (rate == 0)
1047 rate = 2; /* XXX should not happen */
1048 rate &= IEEE80211_RATE_VAL;
1049
1050 /* check if RTS/CTS or CTS-to-self protection must be used */
1051 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1052 /* multicast frames are not sent at OFDM rates in 802.11b/g */
1053 if (pktlen > ic->ic_rtsthreshold) {
1054 needrts = 1; /* RTS/CTS based on frame length */
1055 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1056 RAL_RATE_IS_OFDM(rate)) {
1057 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
1058 needcts = 1; /* CTS-to-self */
1059 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
1060 needrts = 1; /* RTS/CTS */
1061 }
1062 }
1063 if (needrts || needcts) {
1064 struct mbuf *mprot;
1065 int protrate, ackrate;
1066 uint16_t dur;
1067
1068 protrate = 2;
1069 ackrate = ural_ack_rate(ic, rate);
1070
1071 dur = ural_txtime(pktlen, rate, ic->ic_flags) +
1072 ural_txtime(RAL_ACK_SIZE, ackrate, ic->ic_flags) +
1073 2 * RAL_SIFS;
1074 if (needrts) {
1075 dur += ural_txtime(RAL_CTS_SIZE, ural_ack_rate(ic,
1076 protrate), ic->ic_flags) + RAL_SIFS;
1077 mprot = ieee80211_get_rts(ic, wh, dur);
1078 } else {
1079 mprot = ieee80211_get_cts_to_self(ic, dur);
1080 }
1081 if (mprot == NULL) {
1082 printf("%s: could not allocate protection frame\n",
1083 sc->sc_dev.dv_xname);
1084 m_freem(m0);
1085 return ENOBUFS;
1086 }
1087
1088 data = &sc->tx_data[sc->tx_cur];
1089 desc = (struct ural_tx_desc *)data->buf;
1090
1091 /* avoid multiple free() of the same node for each fragment */
1092 data->ni = ieee80211_ref_node(ni);
1093
1094 m_copydata(mprot, 0, mprot->m_pkthdr.len,
1095 data->buf + RAL_TX_DESC_SIZE);
1096 ural_setup_tx_desc(sc, desc,
1097 (needrts ? RAL_TX_NEED_ACK : 0) | RAL_TX_RETRY(7),
1098 mprot->m_pkthdr.len, protrate);
1099
1100 /* no roundup necessary here */
1101 xferlen = RAL_TX_DESC_SIZE + mprot->m_pkthdr.len;
1102
1103 /* XXX may want to pass the protection frame to BPF */
1104
1105 /* mbuf is no longer needed */
1106 m_freem(mprot);
1107
1108 usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf,
1109 xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
1110 RAL_TX_TIMEOUT, ural_txeof);
1111 error = usbd_transfer(data->xfer);
1112 if (error != 0 && error != USBD_IN_PROGRESS) {
1113 m_freem(m0);
1114 return error;
1115 }
1116
1117 sc->tx_queued++;
1118 sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1119
1120 flags |= RAL_TX_IFS_SIFS;
1121 }
1122
1123 data = &sc->tx_data[sc->tx_cur];
1124 desc = (struct ural_tx_desc *)data->buf;
1125
1126 data->ni = ni;
1127
1128 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1129 flags |= RAL_TX_NEED_ACK;
1130 flags |= RAL_TX_RETRY(7);
1131
1132 dur = ural_txtime(RAL_ACK_SIZE, ural_ack_rate(ic, rate),
1133 ic->ic_flags) + RAL_SIFS;
1134 *(uint16_t *)wh->i_dur = htole16(dur);
1135
1136 #ifndef IEEE80211_STA_ONLY
1137 /* tell hardware to set timestamp in probe responses */
1138 if ((wh->i_fc[0] &
1139 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
1140 (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
1141 flags |= RAL_TX_TIMESTAMP;
1142 #endif
1143 }
1144
1145 #if NBPFILTER > 0
1146 if (sc->sc_drvbpf != NULL) {
1147 struct mbuf mb;
1148 struct ural_tx_radiotap_header *tap = &sc->sc_txtap;
1149
1150 tap->wt_flags = 0;
1151 tap->wt_rate = rate;
1152 tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1153 tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1154 tap->wt_antenna = sc->tx_ant;
1155
1156 mb.m_data = (caddr_t)tap;
1157 mb.m_len = sc->sc_txtap_len;
1158 mb.m_next = m0;
1159 mb.m_nextpkt = NULL;
1160 mb.m_type = 0;
1161 mb.m_flags = 0;
1162 bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1163 }
1164 #endif
1165
1166 m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE);
1167 ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate);
1168
1169 /* align end on a 2-bytes boundary */
1170 xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
1171
1172 /*
1173 * No space left in the last URB to store the extra 2 bytes, force
1174 * sending of another URB.
1175 */
1176 if ((xferlen % 64) == 0)
1177 xferlen += 2;
1178
1179 DPRINTFN(10, ("sending frame len=%u rate=%u xfer len=%u\n",
1180 m0->m_pkthdr.len, rate, xferlen));
1181
1182 /* mbuf is no longer needed */
1183 m_freem(m0);
1184
1185 usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen,
1186 USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof);
1187 error = usbd_transfer(data->xfer);
1188 if (error != 0 && error != USBD_IN_PROGRESS)
1189 return error;
1190
1191 sc->tx_queued++;
1192 sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1193
1194 return 0;
1195 }
1196
1197 void
ural_start(struct ifnet * ifp)1198 ural_start(struct ifnet *ifp)
1199 {
1200 struct ural_softc *sc = ifp->if_softc;
1201 struct ieee80211com *ic = &sc->sc_ic;
1202 struct ieee80211_node *ni;
1203 struct mbuf *m0;
1204
1205 /*
1206 * net80211 may still try to send management frames even if the
1207 * IFF_RUNNING flag is not set...
1208 */
1209 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1210 return;
1211
1212 for (;;) {
1213 if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) {
1214 ifq_set_oactive(&ifp->if_snd);
1215 break;
1216 }
1217
1218 m0 = mq_dequeue(&ic->ic_mgtq);
1219 if (m0 != NULL) {
1220 ni = m0->m_pkthdr.ph_cookie;
1221 #if NBPFILTER > 0
1222 if (ic->ic_rawbpf != NULL)
1223 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1224 #endif
1225 if (ural_tx_data(sc, m0, ni) != 0)
1226 break;
1227
1228 } else {
1229 if (ic->ic_state != IEEE80211_S_RUN)
1230 break;
1231
1232 m0 = ifq_dequeue(&ifp->if_snd);
1233 if (m0 == NULL)
1234 break;
1235 #if NBPFILTER > 0
1236 if (ifp->if_bpf != NULL)
1237 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1238 #endif
1239 m0 = ieee80211_encap(ifp, m0, &ni);
1240 if (m0 == NULL)
1241 continue;
1242 #if NBPFILTER > 0
1243 if (ic->ic_rawbpf != NULL)
1244 bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1245 #endif
1246 if (ural_tx_data(sc, m0, ni) != 0) {
1247 if (ni != NULL)
1248 ieee80211_release_node(ic, ni);
1249 ifp->if_oerrors++;
1250 break;
1251 }
1252 }
1253
1254 sc->sc_tx_timer = 5;
1255 ifp->if_timer = 1;
1256 }
1257 }
1258
1259 void
ural_watchdog(struct ifnet * ifp)1260 ural_watchdog(struct ifnet *ifp)
1261 {
1262 struct ural_softc *sc = ifp->if_softc;
1263
1264 ifp->if_timer = 0;
1265
1266 if (sc->sc_tx_timer > 0) {
1267 if (--sc->sc_tx_timer == 0) {
1268 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1269 /*ural_init(ifp); XXX needs a process context! */
1270 ifp->if_oerrors++;
1271 return;
1272 }
1273 ifp->if_timer = 1;
1274 }
1275
1276 ieee80211_watchdog(ifp);
1277 }
1278
1279 int
ural_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1280 ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1281 {
1282 struct ural_softc *sc = ifp->if_softc;
1283 struct ieee80211com *ic = &sc->sc_ic;
1284 int s, error = 0;
1285
1286 if (usbd_is_dying(sc->sc_udev))
1287 return ENXIO;
1288
1289 usbd_ref_incr(sc->sc_udev);
1290
1291 s = splnet();
1292
1293 switch (cmd) {
1294 case SIOCSIFADDR:
1295 ifp->if_flags |= IFF_UP;
1296 /* FALLTHROUGH */
1297 case SIOCSIFFLAGS:
1298 if (ifp->if_flags & IFF_UP) {
1299 if (ifp->if_flags & IFF_RUNNING)
1300 ural_update_promisc(sc);
1301 else
1302 ural_init(ifp);
1303 } else {
1304 if (ifp->if_flags & IFF_RUNNING)
1305 ural_stop(ifp, 1);
1306 }
1307 break;
1308
1309 case SIOCS80211CHANNEL:
1310 /*
1311 * This allows for fast channel switching in monitor mode
1312 * (used by kismet). In IBSS mode, we must explicitly reset
1313 * the interface to generate a new beacon frame.
1314 */
1315 error = ieee80211_ioctl(ifp, cmd, data);
1316 if (error == ENETRESET &&
1317 ic->ic_opmode == IEEE80211_M_MONITOR) {
1318 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1319 (IFF_UP | IFF_RUNNING))
1320 ural_set_chan(sc, ic->ic_ibss_chan);
1321 error = 0;
1322 }
1323 break;
1324
1325 default:
1326 error = ieee80211_ioctl(ifp, cmd, data);
1327 }
1328
1329 if (error == ENETRESET) {
1330 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1331 (IFF_UP | IFF_RUNNING))
1332 ural_init(ifp);
1333 error = 0;
1334 }
1335
1336 splx(s);
1337
1338 usbd_ref_decr(sc->sc_udev);
1339
1340 return error;
1341 }
1342
1343 void
ural_eeprom_read(struct ural_softc * sc,uint16_t addr,void * buf,int len)1344 ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len)
1345 {
1346 usb_device_request_t req;
1347 usbd_status error;
1348
1349 req.bmRequestType = UT_READ_VENDOR_DEVICE;
1350 req.bRequest = RAL_READ_EEPROM;
1351 USETW(req.wValue, 0);
1352 USETW(req.wIndex, addr);
1353 USETW(req.wLength, len);
1354
1355 error = usbd_do_request(sc->sc_udev, &req, buf);
1356 if (error != 0) {
1357 printf("%s: could not read EEPROM: %s\n",
1358 sc->sc_dev.dv_xname, usbd_errstr(error));
1359 }
1360 }
1361
1362 uint16_t
ural_read(struct ural_softc * sc,uint16_t reg)1363 ural_read(struct ural_softc *sc, uint16_t reg)
1364 {
1365 usb_device_request_t req;
1366 usbd_status error;
1367 uint16_t val;
1368
1369 req.bmRequestType = UT_READ_VENDOR_DEVICE;
1370 req.bRequest = RAL_READ_MAC;
1371 USETW(req.wValue, 0);
1372 USETW(req.wIndex, reg);
1373 USETW(req.wLength, sizeof (uint16_t));
1374
1375 error = usbd_do_request(sc->sc_udev, &req, &val);
1376 if (error != 0) {
1377 printf("%s: could not read MAC register: %s\n",
1378 sc->sc_dev.dv_xname, usbd_errstr(error));
1379 return 0;
1380 }
1381 return letoh16(val);
1382 }
1383
1384 void
ural_read_multi(struct ural_softc * sc,uint16_t reg,void * buf,int len)1385 ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1386 {
1387 usb_device_request_t req;
1388 usbd_status error;
1389
1390 req.bmRequestType = UT_READ_VENDOR_DEVICE;
1391 req.bRequest = RAL_READ_MULTI_MAC;
1392 USETW(req.wValue, 0);
1393 USETW(req.wIndex, reg);
1394 USETW(req.wLength, len);
1395
1396 error = usbd_do_request(sc->sc_udev, &req, buf);
1397 if (error != 0) {
1398 printf("%s: could not read MAC register: %s\n",
1399 sc->sc_dev.dv_xname, usbd_errstr(error));
1400 }
1401 }
1402
1403 void
ural_write(struct ural_softc * sc,uint16_t reg,uint16_t val)1404 ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val)
1405 {
1406 usb_device_request_t req;
1407 usbd_status error;
1408
1409 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1410 req.bRequest = RAL_WRITE_MAC;
1411 USETW(req.wValue, val);
1412 USETW(req.wIndex, reg);
1413 USETW(req.wLength, 0);
1414
1415 error = usbd_do_request(sc->sc_udev, &req, NULL);
1416 if (error != 0) {
1417 printf("%s: could not write MAC register: %s\n",
1418 sc->sc_dev.dv_xname, usbd_errstr(error));
1419 }
1420 }
1421
1422 void
ural_write_multi(struct ural_softc * sc,uint16_t reg,void * buf,int len)1423 ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1424 {
1425 usb_device_request_t req;
1426 usbd_status error;
1427
1428 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1429 req.bRequest = RAL_WRITE_MULTI_MAC;
1430 USETW(req.wValue, 0);
1431 USETW(req.wIndex, reg);
1432 USETW(req.wLength, len);
1433
1434 error = usbd_do_request(sc->sc_udev, &req, buf);
1435 if (error != 0) {
1436 printf("%s: could not write MAC register: %s\n",
1437 sc->sc_dev.dv_xname, usbd_errstr(error));
1438 }
1439 }
1440
1441 void
ural_bbp_write(struct ural_softc * sc,uint8_t reg,uint8_t val)1442 ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val)
1443 {
1444 uint16_t tmp;
1445 int ntries;
1446
1447 for (ntries = 0; ntries < 5; ntries++) {
1448 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1449 break;
1450 }
1451 if (ntries == 5) {
1452 printf("%s: could not write to BBP\n", sc->sc_dev.dv_xname);
1453 return;
1454 }
1455
1456 tmp = reg << 8 | val;
1457 ural_write(sc, RAL_PHY_CSR7, tmp);
1458 }
1459
1460 uint8_t
ural_bbp_read(struct ural_softc * sc,uint8_t reg)1461 ural_bbp_read(struct ural_softc *sc, uint8_t reg)
1462 {
1463 uint16_t val;
1464 int ntries;
1465
1466 val = RAL_BBP_WRITE | reg << 8;
1467 ural_write(sc, RAL_PHY_CSR7, val);
1468
1469 for (ntries = 0; ntries < 5; ntries++) {
1470 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1471 break;
1472 }
1473 if (ntries == 5) {
1474 printf("%s: could not read BBP\n", sc->sc_dev.dv_xname);
1475 return 0;
1476 }
1477 return ural_read(sc, RAL_PHY_CSR7) & 0xff;
1478 }
1479
1480 void
ural_rf_write(struct ural_softc * sc,uint8_t reg,uint32_t val)1481 ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val)
1482 {
1483 uint32_t tmp;
1484 int ntries;
1485
1486 for (ntries = 0; ntries < 5; ntries++) {
1487 if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY))
1488 break;
1489 }
1490 if (ntries == 5) {
1491 printf("%s: could not write to RF\n", sc->sc_dev.dv_xname);
1492 return;
1493 }
1494
1495 tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3);
1496 ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff);
1497 ural_write(sc, RAL_PHY_CSR10, tmp >> 16);
1498
1499 /* remember last written value in sc */
1500 sc->rf_regs[reg] = val;
1501
1502 DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff));
1503 }
1504
1505 void
ural_set_chan(struct ural_softc * sc,struct ieee80211_channel * c)1506 ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c)
1507 {
1508 struct ieee80211com *ic = &sc->sc_ic;
1509 uint8_t power, tmp;
1510 u_int chan;
1511
1512 chan = ieee80211_chan2ieee(ic, c);
1513 if (chan == 0 || chan == IEEE80211_CHAN_ANY)
1514 return;
1515
1516 power = min(sc->txpow[chan - 1], 31);
1517
1518 DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power));
1519
1520 switch (sc->rf_rev) {
1521 case RAL_RF_2522:
1522 ural_rf_write(sc, RAL_RF1, 0x00814);
1523 ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]);
1524 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1525 break;
1526
1527 case RAL_RF_2523:
1528 ural_rf_write(sc, RAL_RF1, 0x08804);
1529 ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]);
1530 ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044);
1531 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1532 break;
1533
1534 case RAL_RF_2524:
1535 ural_rf_write(sc, RAL_RF1, 0x0c808);
1536 ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]);
1537 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1538 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1539 break;
1540
1541 case RAL_RF_2525:
1542 ural_rf_write(sc, RAL_RF1, 0x08808);
1543 ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]);
1544 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1545 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1546
1547 ural_rf_write(sc, RAL_RF1, 0x08808);
1548 ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]);
1549 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1550 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1551 break;
1552
1553 case RAL_RF_2525E:
1554 ural_rf_write(sc, RAL_RF1, 0x08808);
1555 ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]);
1556 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1557 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282);
1558 break;
1559
1560 case RAL_RF_2526:
1561 ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]);
1562 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1563 ural_rf_write(sc, RAL_RF1, 0x08804);
1564
1565 ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]);
1566 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1567 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1568 break;
1569 }
1570
1571 if (ic->ic_opmode != IEEE80211_M_MONITOR &&
1572 ic->ic_state != IEEE80211_S_SCAN) {
1573 /* set Japan filter bit for channel 14 */
1574 tmp = ural_bbp_read(sc, 70);
1575
1576 tmp &= ~RAL_JAPAN_FILTER;
1577 if (chan == 14)
1578 tmp |= RAL_JAPAN_FILTER;
1579
1580 ural_bbp_write(sc, 70, tmp);
1581
1582 /* clear CRC errors */
1583 ural_read(sc, RAL_STA_CSR0);
1584
1585 DELAY(1000); /* RF needs a 1ms delay here */
1586 ural_disable_rf_tune(sc);
1587 }
1588 }
1589
1590 /*
1591 * Disable RF auto-tuning.
1592 */
1593 void
ural_disable_rf_tune(struct ural_softc * sc)1594 ural_disable_rf_tune(struct ural_softc *sc)
1595 {
1596 uint32_t tmp;
1597
1598 if (sc->rf_rev != RAL_RF_2523) {
1599 tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE;
1600 ural_rf_write(sc, RAL_RF1, tmp);
1601 }
1602
1603 tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE;
1604 ural_rf_write(sc, RAL_RF3, tmp);
1605
1606 DPRINTFN(2, ("disabling RF autotune\n"));
1607 }
1608
1609 /*
1610 * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF
1611 * synchronization.
1612 */
1613 void
ural_enable_tsf_sync(struct ural_softc * sc)1614 ural_enable_tsf_sync(struct ural_softc *sc)
1615 {
1616 struct ieee80211com *ic = &sc->sc_ic;
1617 uint16_t logcwmin, preload, tmp;
1618
1619 /* first, disable TSF synchronization */
1620 ural_write(sc, RAL_TXRX_CSR19, 0);
1621
1622 tmp = (16 * ic->ic_bss->ni_intval) << 4;
1623 ural_write(sc, RAL_TXRX_CSR18, tmp);
1624
1625 #ifndef IEEE80211_STA_ONLY
1626 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1627 logcwmin = 2;
1628 preload = 320;
1629 } else
1630 #endif
1631 {
1632 logcwmin = 0;
1633 preload = 6;
1634 }
1635 tmp = logcwmin << 12 | preload;
1636 ural_write(sc, RAL_TXRX_CSR20, tmp);
1637
1638 /* finally, enable TSF synchronization */
1639 tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN;
1640 if (ic->ic_opmode == IEEE80211_M_STA)
1641 tmp |= RAL_ENABLE_TSF_SYNC(1);
1642 #ifndef IEEE80211_STA_ONLY
1643 else
1644 tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR;
1645 #endif
1646 ural_write(sc, RAL_TXRX_CSR19, tmp);
1647
1648 DPRINTF(("enabling TSF synchronization\n"));
1649 }
1650
1651 void
ural_update_slot(struct ural_softc * sc)1652 ural_update_slot(struct ural_softc *sc)
1653 {
1654 struct ieee80211com *ic = &sc->sc_ic;
1655 uint16_t slottime, sifs, eifs;
1656
1657 slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ?
1658 IEEE80211_DUR_DS_SHSLOT : IEEE80211_DUR_DS_SLOT;
1659
1660 /*
1661 * These settings may sound a bit inconsistent but this is what the
1662 * reference driver does.
1663 */
1664 if (ic->ic_curmode == IEEE80211_MODE_11B) {
1665 sifs = 16 - RAL_RXTX_TURNAROUND;
1666 eifs = 364;
1667 } else {
1668 sifs = 10 - RAL_RXTX_TURNAROUND;
1669 eifs = 64;
1670 }
1671
1672 ural_write(sc, RAL_MAC_CSR10, slottime);
1673 ural_write(sc, RAL_MAC_CSR11, sifs);
1674 ural_write(sc, RAL_MAC_CSR12, eifs);
1675 }
1676
1677 void
ural_set_txpreamble(struct ural_softc * sc)1678 ural_set_txpreamble(struct ural_softc *sc)
1679 {
1680 uint16_t tmp;
1681
1682 tmp = ural_read(sc, RAL_TXRX_CSR10);
1683
1684 tmp &= ~RAL_SHORT_PREAMBLE;
1685 if (sc->sc_ic.ic_flags & IEEE80211_F_SHPREAMBLE)
1686 tmp |= RAL_SHORT_PREAMBLE;
1687
1688 ural_write(sc, RAL_TXRX_CSR10, tmp);
1689 }
1690
1691 void
ural_set_basicrates(struct ural_softc * sc)1692 ural_set_basicrates(struct ural_softc *sc)
1693 {
1694 struct ieee80211com *ic = &sc->sc_ic;
1695
1696 /* update basic rate set */
1697 if (ic->ic_curmode == IEEE80211_MODE_11B) {
1698 /* 11b basic rates: 1, 2Mbps */
1699 ural_write(sc, RAL_TXRX_CSR11, 0x3);
1700 } else {
1701 /* 11b/g basic rates: 1, 2, 5.5, 11Mbps */
1702 ural_write(sc, RAL_TXRX_CSR11, 0xf);
1703 }
1704 }
1705
1706 void
ural_set_bssid(struct ural_softc * sc,const uint8_t * bssid)1707 ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid)
1708 {
1709 uint16_t tmp;
1710
1711 tmp = bssid[0] | bssid[1] << 8;
1712 ural_write(sc, RAL_MAC_CSR5, tmp);
1713
1714 tmp = bssid[2] | bssid[3] << 8;
1715 ural_write(sc, RAL_MAC_CSR6, tmp);
1716
1717 tmp = bssid[4] | bssid[5] << 8;
1718 ural_write(sc, RAL_MAC_CSR7, tmp);
1719
1720 DPRINTF(("setting BSSID to %s\n", ether_sprintf((uint8_t *)bssid)));
1721 }
1722
1723 void
ural_set_macaddr(struct ural_softc * sc,const uint8_t * addr)1724 ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr)
1725 {
1726 uint16_t tmp;
1727
1728 tmp = addr[0] | addr[1] << 8;
1729 ural_write(sc, RAL_MAC_CSR2, tmp);
1730
1731 tmp = addr[2] | addr[3] << 8;
1732 ural_write(sc, RAL_MAC_CSR3, tmp);
1733
1734 tmp = addr[4] | addr[5] << 8;
1735 ural_write(sc, RAL_MAC_CSR4, tmp);
1736
1737 DPRINTF(("setting MAC address to %s\n",
1738 ether_sprintf((uint8_t *)addr)));
1739 }
1740
1741 void
ural_update_promisc(struct ural_softc * sc)1742 ural_update_promisc(struct ural_softc *sc)
1743 {
1744 struct ifnet *ifp = &sc->sc_ic.ic_if;
1745 uint16_t tmp;
1746
1747 tmp = ural_read(sc, RAL_TXRX_CSR2);
1748
1749 tmp &= ~RAL_DROP_NOT_TO_ME;
1750 if (!(ifp->if_flags & IFF_PROMISC))
1751 tmp |= RAL_DROP_NOT_TO_ME;
1752
1753 ural_write(sc, RAL_TXRX_CSR2, tmp);
1754
1755 DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ?
1756 "entering" : "leaving"));
1757 }
1758
1759 const char *
ural_get_rf(int rev)1760 ural_get_rf(int rev)
1761 {
1762 switch (rev) {
1763 case RAL_RF_2522: return "RT2522";
1764 case RAL_RF_2523: return "RT2523";
1765 case RAL_RF_2524: return "RT2524";
1766 case RAL_RF_2525: return "RT2525";
1767 case RAL_RF_2525E: return "RT2525e";
1768 case RAL_RF_2526: return "RT2526";
1769 case RAL_RF_5222: return "RT5222";
1770 default: return "unknown";
1771 }
1772 }
1773
1774 void
ural_read_eeprom(struct ural_softc * sc)1775 ural_read_eeprom(struct ural_softc *sc)
1776 {
1777 struct ieee80211com *ic = &sc->sc_ic;
1778 uint16_t val;
1779
1780 /* retrieve MAC/BBP type */
1781 ural_eeprom_read(sc, RAL_EEPROM_MACBBP, &val, 2);
1782 sc->macbbp_rev = letoh16(val);
1783
1784 ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2);
1785 val = letoh16(val);
1786 sc->rf_rev = (val >> 11) & 0x7;
1787 sc->hw_radio = (val >> 10) & 0x1;
1788 sc->led_mode = (val >> 6) & 0x7;
1789 sc->rx_ant = (val >> 4) & 0x3;
1790 sc->tx_ant = (val >> 2) & 0x3;
1791 sc->nb_ant = val & 0x3;
1792
1793 /* read MAC address */
1794 ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6);
1795
1796 /* read default values for BBP registers */
1797 ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16);
1798
1799 /* read Tx power for all b/g channels */
1800 ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14);
1801 }
1802
1803 int
ural_bbp_init(struct ural_softc * sc)1804 ural_bbp_init(struct ural_softc *sc)
1805 {
1806 int i, ntries;
1807
1808 /* wait for BBP to be ready */
1809 for (ntries = 0; ntries < 100; ntries++) {
1810 if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0)
1811 break;
1812 DELAY(1000);
1813 }
1814 if (ntries == 100) {
1815 printf("%s: timeout waiting for BBP\n", sc->sc_dev.dv_xname);
1816 return EIO;
1817 }
1818
1819 /* initialize BBP registers to default values */
1820 for (i = 0; i < nitems(ural_def_bbp); i++)
1821 ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val);
1822
1823 #if 0
1824 /* initialize BBP registers to values stored in EEPROM */
1825 for (i = 0; i < 16; i++) {
1826 if (sc->bbp_prom[i].reg == 0xff)
1827 continue;
1828 ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
1829 }
1830 #endif
1831
1832 return 0;
1833 }
1834
1835 void
ural_set_txantenna(struct ural_softc * sc,int antenna)1836 ural_set_txantenna(struct ural_softc *sc, int antenna)
1837 {
1838 uint16_t tmp;
1839 uint8_t tx;
1840
1841 tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK;
1842 if (antenna == 1)
1843 tx |= RAL_BBP_ANTA;
1844 else if (antenna == 2)
1845 tx |= RAL_BBP_ANTB;
1846 else
1847 tx |= RAL_BBP_DIVERSITY;
1848
1849 /* need to force I/Q flip for RF 2525e, 2526 and 5222 */
1850 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 ||
1851 sc->rf_rev == RAL_RF_5222)
1852 tx |= RAL_BBP_FLIPIQ;
1853
1854 ural_bbp_write(sc, RAL_BBP_TX, tx);
1855
1856 /* update flags in PHY_CSR5 and PHY_CSR6 too */
1857 tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7;
1858 ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7));
1859
1860 tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7;
1861 ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7));
1862 }
1863
1864 void
ural_set_rxantenna(struct ural_softc * sc,int antenna)1865 ural_set_rxantenna(struct ural_softc *sc, int antenna)
1866 {
1867 uint8_t rx;
1868
1869 rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK;
1870 if (antenna == 1)
1871 rx |= RAL_BBP_ANTA;
1872 else if (antenna == 2)
1873 rx |= RAL_BBP_ANTB;
1874 else
1875 rx |= RAL_BBP_DIVERSITY;
1876
1877 /* need to force no I/Q flip for RF 2525e and 2526 */
1878 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526)
1879 rx &= ~RAL_BBP_FLIPIQ;
1880
1881 ural_bbp_write(sc, RAL_BBP_RX, rx);
1882 }
1883
1884 int
ural_init(struct ifnet * ifp)1885 ural_init(struct ifnet *ifp)
1886 {
1887 struct ural_softc *sc = ifp->if_softc;
1888 struct ieee80211com *ic = &sc->sc_ic;
1889 uint16_t tmp;
1890 usbd_status error;
1891 int i, ntries;
1892
1893 ural_stop(ifp, 0);
1894
1895 /* initialize MAC registers to default values */
1896 for (i = 0; i < nitems(ural_def_mac); i++)
1897 ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val);
1898
1899 /* wait for BBP and RF to wake up (this can take a long time!) */
1900 for (ntries = 0; ntries < 100; ntries++) {
1901 tmp = ural_read(sc, RAL_MAC_CSR17);
1902 if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) ==
1903 (RAL_BBP_AWAKE | RAL_RF_AWAKE))
1904 break;
1905 DELAY(1000);
1906 }
1907 if (ntries == 100) {
1908 printf("%s: timeout waiting for BBP/RF to wakeup\n",
1909 sc->sc_dev.dv_xname);
1910 error = EIO;
1911 goto fail;
1912 }
1913
1914 /* we're ready! */
1915 ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY);
1916
1917 /* set basic rate set (will be updated later) */
1918 ural_write(sc, RAL_TXRX_CSR11, 0x153);
1919
1920 error = ural_bbp_init(sc);
1921 if (error != 0)
1922 goto fail;
1923
1924 /* set default BSS channel */
1925 ic->ic_bss->ni_chan = ic->ic_ibss_chan;
1926 ural_set_chan(sc, ic->ic_bss->ni_chan);
1927
1928 /* clear statistic registers (STA_CSR0 to STA_CSR10) */
1929 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
1930
1931 /* set default sensitivity */
1932 ural_bbp_write(sc, 17, 0x48);
1933
1934 ural_set_txantenna(sc, 1);
1935 ural_set_rxantenna(sc, 1);
1936
1937 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
1938 ural_set_macaddr(sc, ic->ic_myaddr);
1939
1940 /*
1941 * Copy WEP keys into adapter's memory (SEC_CSR0 to SEC_CSR31).
1942 */
1943 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1944 struct ieee80211_key *k = &ic->ic_nw_keys[i];
1945 ural_write_multi(sc, RAL_SEC_CSR0 + i * IEEE80211_KEYBUF_SIZE,
1946 k->k_key, IEEE80211_KEYBUF_SIZE);
1947 }
1948
1949 /*
1950 * Allocate xfer for AMRR statistics requests.
1951 */
1952 sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev);
1953 if (sc->amrr_xfer == NULL) {
1954 printf("%s: could not allocate AMRR xfer\n",
1955 sc->sc_dev.dv_xname);
1956 goto fail;
1957 }
1958
1959 /*
1960 * Open Tx and Rx USB bulk pipes.
1961 */
1962 error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE,
1963 &sc->sc_tx_pipeh);
1964 if (error != 0) {
1965 printf("%s: could not open Tx pipe: %s\n",
1966 sc->sc_dev.dv_xname, usbd_errstr(error));
1967 goto fail;
1968 }
1969 error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE,
1970 &sc->sc_rx_pipeh);
1971 if (error != 0) {
1972 printf("%s: could not open Rx pipe: %s\n",
1973 sc->sc_dev.dv_xname, usbd_errstr(error));
1974 goto fail;
1975 }
1976
1977 /*
1978 * Allocate Tx and Rx xfer queues.
1979 */
1980 error = ural_alloc_tx_list(sc);
1981 if (error != 0) {
1982 printf("%s: could not allocate Tx list\n",
1983 sc->sc_dev.dv_xname);
1984 goto fail;
1985 }
1986 error = ural_alloc_rx_list(sc);
1987 if (error != 0) {
1988 printf("%s: could not allocate Rx list\n",
1989 sc->sc_dev.dv_xname);
1990 goto fail;
1991 }
1992
1993 /*
1994 * Start up the receive pipe.
1995 */
1996 for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
1997 struct ural_rx_data *data = &sc->rx_data[i];
1998
1999 usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf,
2000 MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
2001 error = usbd_transfer(data->xfer);
2002 if (error != 0 && error != USBD_IN_PROGRESS) {
2003 printf("%s: could not queue Rx transfer\n",
2004 sc->sc_dev.dv_xname);
2005 goto fail;
2006 }
2007 }
2008
2009 /* kick Rx */
2010 tmp = RAL_DROP_PHY_ERROR | RAL_DROP_CRC_ERROR;
2011 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
2012 tmp |= RAL_DROP_CTL | RAL_DROP_VERSION_ERROR;
2013 #ifndef IEEE80211_STA_ONLY
2014 if (ic->ic_opmode != IEEE80211_M_HOSTAP)
2015 #endif
2016 tmp |= RAL_DROP_TODS;
2017 if (!(ifp->if_flags & IFF_PROMISC))
2018 tmp |= RAL_DROP_NOT_TO_ME;
2019 }
2020 ural_write(sc, RAL_TXRX_CSR2, tmp);
2021
2022 ifq_clr_oactive(&ifp->if_snd);
2023 ifp->if_flags |= IFF_RUNNING;
2024
2025 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2026 ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2027 else
2028 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2029
2030 return 0;
2031
2032 fail: ural_stop(ifp, 1);
2033 return error;
2034 }
2035
2036 void
ural_stop(struct ifnet * ifp,int disable)2037 ural_stop(struct ifnet *ifp, int disable)
2038 {
2039 struct ural_softc *sc = ifp->if_softc;
2040 struct ieee80211com *ic = &sc->sc_ic;
2041
2042 sc->sc_tx_timer = 0;
2043 ifp->if_timer = 0;
2044 ifp->if_flags &= ~IFF_RUNNING;
2045 ifq_clr_oactive(&ifp->if_snd);
2046
2047 ieee80211_new_state(ic, IEEE80211_S_INIT, -1); /* free all nodes */
2048
2049 /* disable Rx */
2050 ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX);
2051
2052 /* reset ASIC and BBP (but won't reset MAC registers!) */
2053 ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP);
2054 ural_write(sc, RAL_MAC_CSR1, 0);
2055
2056 if (sc->amrr_xfer != NULL) {
2057 usbd_free_xfer(sc->amrr_xfer);
2058 sc->amrr_xfer = NULL;
2059 }
2060 if (sc->sc_rx_pipeh != NULL) {
2061 usbd_close_pipe(sc->sc_rx_pipeh);
2062 sc->sc_rx_pipeh = NULL;
2063 }
2064 if (sc->sc_tx_pipeh != NULL) {
2065 usbd_close_pipe(sc->sc_tx_pipeh);
2066 sc->sc_tx_pipeh = NULL;
2067 }
2068
2069 ural_free_rx_list(sc);
2070 ural_free_tx_list(sc);
2071 }
2072
2073 void
ural_newassoc(struct ieee80211com * ic,struct ieee80211_node * ni,int isnew)2074 ural_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
2075 {
2076 /* start with lowest Tx rate */
2077 ni->ni_txrate = 0;
2078 }
2079
2080 void
ural_amrr_start(struct ural_softc * sc,struct ieee80211_node * ni)2081 ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni)
2082 {
2083 int i;
2084
2085 /* clear statistic registers (STA_CSR0 to STA_CSR10) */
2086 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
2087
2088 ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
2089
2090 /* set rate to some reasonable initial value */
2091 for (i = ni->ni_rates.rs_nrates - 1;
2092 i > 0 && (ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) > 72;
2093 i--);
2094 ni->ni_txrate = i;
2095
2096 if (!usbd_is_dying(sc->sc_udev))
2097 timeout_add_sec(&sc->amrr_to, 1);
2098 }
2099
2100 void
ural_amrr_timeout(void * arg)2101 ural_amrr_timeout(void *arg)
2102 {
2103 struct ural_softc *sc = arg;
2104 usb_device_request_t req;
2105 int s;
2106
2107 if (usbd_is_dying(sc->sc_udev))
2108 return;
2109
2110 usbd_ref_incr(sc->sc_udev);
2111
2112 s = splusb();
2113
2114 /*
2115 * Asynchronously read statistic registers (cleared by read).
2116 */
2117 req.bmRequestType = UT_READ_VENDOR_DEVICE;
2118 req.bRequest = RAL_READ_MULTI_MAC;
2119 USETW(req.wValue, 0);
2120 USETW(req.wIndex, RAL_STA_CSR0);
2121 USETW(req.wLength, sizeof sc->sta);
2122
2123 usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, sc,
2124 USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0,
2125 ural_amrr_update);
2126 (void)usbd_transfer(sc->amrr_xfer);
2127
2128 splx(s);
2129
2130 usbd_ref_decr(sc->sc_udev);
2131 }
2132
2133 void
ural_amrr_update(struct usbd_xfer * xfer,void * priv,usbd_status status)2134 ural_amrr_update(struct usbd_xfer *xfer, void *priv,
2135 usbd_status status)
2136 {
2137 struct ural_softc *sc = (struct ural_softc *)priv;
2138 struct ifnet *ifp = &sc->sc_ic.ic_if;
2139
2140 if (status != USBD_NORMAL_COMPLETION) {
2141 printf("%s: could not retrieve Tx statistics - cancelling "
2142 "automatic rate control\n", sc->sc_dev.dv_xname);
2143 return;
2144 }
2145
2146 /* count TX retry-fail as Tx errors */
2147 ifp->if_oerrors += letoh16(sc->sta[9]);
2148
2149 sc->amn.amn_retrycnt =
2150 letoh16(sc->sta[7]) + /* TX one-retry ok count */
2151 letoh16(sc->sta[8]) + /* TX more-retry ok count */
2152 letoh16(sc->sta[9]); /* TX retry-fail count */
2153
2154 sc->amn.amn_txcnt =
2155 sc->amn.amn_retrycnt +
2156 letoh16(sc->sta[6]); /* TX no-retry ok count */
2157
2158 ieee80211_amrr_choose(&sc->amrr, sc->sc_ic.ic_bss, &sc->amn);
2159
2160 if (!usbd_is_dying(sc->sc_udev))
2161 timeout_add_sec(&sc->amrr_to, 1);
2162 }
2163