xref: /openbsd/sys/dev/usb/if_ral.c (revision 3bef86f7)
1 /*	$OpenBSD: if_ral.c,v 1.149 2022/04/21 21:03:03 stsp Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005, 2006
5  *	Damien Bergamini <damien.bergamini@free.fr>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Ralink Technology RT2500USB chipset driver
22  * http://www.ralinktech.com.tw/
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/timeout.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 
38 #include <machine/intr.h>
39 
40 #if NBPFILTER > 0
41 #include <net/bpf.h>
42 #endif
43 #include <net/if.h>
44 #include <net/if_dl.h>
45 #include <net/if_media.h>
46 
47 #include <netinet/in.h>
48 #include <netinet/if_ether.h>
49 
50 #include <net80211/ieee80211_var.h>
51 #include <net80211/ieee80211_amrr.h>
52 #include <net80211/ieee80211_radiotap.h>
53 
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usbdi.h>
56 #include <dev/usb/usbdi_util.h>
57 #include <dev/usb/usbdevs.h>
58 
59 #include <dev/usb/if_ralreg.h>
60 #include <dev/usb/if_ralvar.h>
61 
62 #ifdef URAL_DEBUG
63 #define DPRINTF(x)	do { if (ural_debug) printf x; } while (0)
64 #define DPRINTFN(n, x)	do { if (ural_debug >= (n)) printf x; } while (0)
65 int ural_debug = 0;
66 #else
67 #define DPRINTF(x)
68 #define DPRINTFN(n, x)
69 #endif
70 
71 /* various supported device vendors/products */
72 static const struct usb_devno ural_devs[] = {
73 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570 },
74 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570_2 },
75 	{ USB_VENDOR_BELKIN,		USB_PRODUCT_BELKIN_F5D7050 },
76 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54G },
77 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54GP },
78 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_HU200TS },
79 	{ USB_VENDOR_CONCEPTRONIC2,	USB_PRODUCT_CONCEPTRONIC2_C54RU },
80 	{ USB_VENDOR_DLINK,		USB_PRODUCT_DLINK_RT2570 },
81 	{ USB_VENDOR_GIGABYTE,		USB_PRODUCT_GIGABYTE_GNWBKG },
82 	{ USB_VENDOR_GUILLEMOT,		USB_PRODUCT_GUILLEMOT_HWGUSB254 },
83 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54 },
84 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54AI },
85 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54YB },
86 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_NINWIFI },
87 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570 },
88 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_2 },
89 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_3 },
90 	{ USB_VENDOR_NOVATECH,		USB_PRODUCT_NOVATECH_NV902W },
91 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570 },
92 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_2 },
93 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_3 },
94 	{ USB_VENDOR_SPHAIRON,		USB_PRODUCT_SPHAIRON_UB801R },
95 	{ USB_VENDOR_SURECOM,		USB_PRODUCT_SURECOM_RT2570 },
96 	{ USB_VENDOR_VTECH,		USB_PRODUCT_VTECH_RT2570 },
97 	{ USB_VENDOR_ZINWELL,		USB_PRODUCT_ZINWELL_RT2570 }
98 };
99 
100 int		ural_alloc_tx_list(struct ural_softc *);
101 void		ural_free_tx_list(struct ural_softc *);
102 int		ural_alloc_rx_list(struct ural_softc *);
103 void		ural_free_rx_list(struct ural_softc *);
104 int		ural_media_change(struct ifnet *);
105 void		ural_next_scan(void *);
106 void		ural_task(void *);
107 int		ural_newstate(struct ieee80211com *, enum ieee80211_state,
108 		    int);
109 void		ural_txeof(struct usbd_xfer *, void *, usbd_status);
110 void		ural_rxeof(struct usbd_xfer *, void *, usbd_status);
111 #if NBPFILTER > 0
112 uint8_t		ural_rxrate(const struct ural_rx_desc *);
113 #endif
114 int		ural_ack_rate(struct ieee80211com *, int);
115 uint16_t	ural_txtime(int, int, uint32_t);
116 uint8_t		ural_plcp_signal(int);
117 void		ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *,
118 		    uint32_t, int, int);
119 #ifndef IEEE80211_STA_ONLY
120 int		ural_tx_bcn(struct ural_softc *, struct mbuf *,
121 		    struct ieee80211_node *);
122 #endif
123 int		ural_tx_data(struct ural_softc *, struct mbuf *,
124 		    struct ieee80211_node *);
125 void		ural_start(struct ifnet *);
126 void		ural_watchdog(struct ifnet *);
127 int		ural_ioctl(struct ifnet *, u_long, caddr_t);
128 void		ural_eeprom_read(struct ural_softc *, uint16_t, void *, int);
129 uint16_t	ural_read(struct ural_softc *, uint16_t);
130 void		ural_read_multi(struct ural_softc *, uint16_t, void *, int);
131 void		ural_write(struct ural_softc *, uint16_t, uint16_t);
132 void		ural_write_multi(struct ural_softc *, uint16_t, void *, int);
133 void		ural_bbp_write(struct ural_softc *, uint8_t, uint8_t);
134 uint8_t		ural_bbp_read(struct ural_softc *, uint8_t);
135 void		ural_rf_write(struct ural_softc *, uint8_t, uint32_t);
136 void		ural_set_chan(struct ural_softc *, struct ieee80211_channel *);
137 void		ural_disable_rf_tune(struct ural_softc *);
138 void		ural_enable_tsf_sync(struct ural_softc *);
139 void		ural_update_slot(struct ural_softc *);
140 void		ural_set_txpreamble(struct ural_softc *);
141 void		ural_set_basicrates(struct ural_softc *);
142 void		ural_set_bssid(struct ural_softc *, const uint8_t *);
143 void		ural_set_macaddr(struct ural_softc *, const uint8_t *);
144 void		ural_update_promisc(struct ural_softc *);
145 const char	*ural_get_rf(int);
146 void		ural_read_eeprom(struct ural_softc *);
147 int		ural_bbp_init(struct ural_softc *);
148 void		ural_set_txantenna(struct ural_softc *, int);
149 void		ural_set_rxantenna(struct ural_softc *, int);
150 int		ural_init(struct ifnet *);
151 void		ural_stop(struct ifnet *, int);
152 void		ural_newassoc(struct ieee80211com *, struct ieee80211_node *,
153 		    int);
154 void		ural_amrr_start(struct ural_softc *, struct ieee80211_node *);
155 void		ural_amrr_timeout(void *);
156 void		ural_amrr_update(struct usbd_xfer *, void *,
157 		    usbd_status status);
158 
159 static const struct {
160 	uint16_t	reg;
161 	uint16_t	val;
162 } ural_def_mac[] = {
163 	RAL_DEF_MAC
164 };
165 
166 static const struct {
167 	uint8_t	reg;
168 	uint8_t	val;
169 } ural_def_bbp[] = {
170 	RAL_DEF_BBP
171 };
172 
173 static const uint32_t ural_rf2522_r2[] =    RAL_RF2522_R2;
174 static const uint32_t ural_rf2523_r2[] =    RAL_RF2523_R2;
175 static const uint32_t ural_rf2524_r2[] =    RAL_RF2524_R2;
176 static const uint32_t ural_rf2525_r2[] =    RAL_RF2525_R2;
177 static const uint32_t ural_rf2525_hi_r2[] = RAL_RF2525_HI_R2;
178 static const uint32_t ural_rf2525e_r2[] =   RAL_RF2525E_R2;
179 static const uint32_t ural_rf2526_hi_r2[] = RAL_RF2526_HI_R2;
180 static const uint32_t ural_rf2526_r2[] =    RAL_RF2526_R2;
181 
182 int ural_match(struct device *, void *, void *);
183 void ural_attach(struct device *, struct device *, void *);
184 int ural_detach(struct device *, int);
185 
186 struct cfdriver ural_cd = {
187 	NULL, "ural", DV_IFNET
188 };
189 
190 const struct cfattach ural_ca = {
191 	sizeof(struct ural_softc), ural_match, ural_attach, ural_detach
192 };
193 
194 int
195 ural_match(struct device *parent, void *match, void *aux)
196 {
197 	struct usb_attach_arg *uaa = aux;
198 
199 	if (uaa->configno != RAL_CONFIG_NO || uaa->ifaceno != RAL_IFACE_NO)
200 		return UMATCH_NONE;
201 
202 	return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ?
203 	    UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
204 }
205 
206 void
207 ural_attach(struct device *parent, struct device *self, void *aux)
208 {
209 	struct ural_softc *sc = (struct ural_softc *)self;
210 	struct usb_attach_arg *uaa = aux;
211 	struct ieee80211com *ic = &sc->sc_ic;
212 	struct ifnet *ifp = &ic->ic_if;
213 	usb_interface_descriptor_t *id;
214 	usb_endpoint_descriptor_t *ed;
215 	int i;
216 
217 	sc->sc_udev = uaa->device;
218 	sc->sc_iface = uaa->iface;
219 
220 	/*
221 	 * Find endpoints.
222 	 */
223 	id = usbd_get_interface_descriptor(sc->sc_iface);
224 
225 	sc->sc_rx_no = sc->sc_tx_no = -1;
226 	for (i = 0; i < id->bNumEndpoints; i++) {
227 		ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i);
228 		if (ed == NULL) {
229 			printf("%s: no endpoint descriptor for iface %d\n",
230 			    sc->sc_dev.dv_xname, i);
231 			return;
232 		}
233 
234 		if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
235 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
236 			sc->sc_rx_no = ed->bEndpointAddress;
237 		else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
238 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
239 			sc->sc_tx_no = ed->bEndpointAddress;
240 	}
241 	if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) {
242 		printf("%s: missing endpoint\n", sc->sc_dev.dv_xname);
243 		return;
244 	}
245 
246 	usb_init_task(&sc->sc_task, ural_task, sc, USB_TASK_TYPE_GENERIC);
247 	timeout_set(&sc->scan_to, ural_next_scan, sc);
248 
249 	sc->amrr.amrr_min_success_threshold =  1;
250 	sc->amrr.amrr_max_success_threshold = 10;
251 	timeout_set(&sc->amrr_to, ural_amrr_timeout, sc);
252 
253 	/* retrieve RT2570 rev. no */
254 	sc->asic_rev = ural_read(sc, RAL_MAC_CSR0);
255 
256 	/* retrieve MAC address and various other things from EEPROM */
257 	ural_read_eeprom(sc);
258 
259 	printf("%s: MAC/BBP RT%04x (rev 0x%02x), RF %s, address %s\n",
260 	    sc->sc_dev.dv_xname, sc->macbbp_rev, sc->asic_rev,
261 	    ural_get_rf(sc->rf_rev), ether_sprintf(ic->ic_myaddr));
262 
263 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
264 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
265 	ic->ic_state = IEEE80211_S_INIT;
266 
267 	/* set device capabilities */
268 	ic->ic_caps =
269 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
270 #ifndef IEEE80211_STA_ONLY
271 	    IEEE80211_C_IBSS |		/* IBSS mode supported */
272 	    IEEE80211_C_HOSTAP |	/* HostAp mode supported */
273 #endif
274 	    IEEE80211_C_TXPMGT |	/* tx power management */
275 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
276 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
277 	    IEEE80211_C_WEP |		/* s/w WEP */
278 	    IEEE80211_C_RSN;		/* WPA/RSN */
279 
280 	/* set supported .11b and .11g rates */
281 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
282 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
283 
284 	/* set supported .11b and .11g channels (1 through 14) */
285 	for (i = 1; i <= 14; i++) {
286 		ic->ic_channels[i].ic_freq =
287 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
288 		ic->ic_channels[i].ic_flags =
289 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
290 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
291 	}
292 
293 	ifp->if_softc = sc;
294 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
295 	ifp->if_ioctl = ural_ioctl;
296 	ifp->if_start = ural_start;
297 	ifp->if_watchdog = ural_watchdog;
298 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
299 
300 	if_attach(ifp);
301 	ieee80211_ifattach(ifp);
302 	ic->ic_newassoc = ural_newassoc;
303 
304 	/* override state transition machine */
305 	sc->sc_newstate = ic->ic_newstate;
306 	ic->ic_newstate = ural_newstate;
307 	ieee80211_media_init(ifp, ural_media_change, ieee80211_media_status);
308 
309 #if NBPFILTER > 0
310 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
311 	    sizeof (struct ieee80211_frame) + 64);
312 
313 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
314 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
315 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT);
316 
317 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
318 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
319 	sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT);
320 #endif
321 }
322 
323 int
324 ural_detach(struct device *self, int flags)
325 {
326 	struct ural_softc *sc = (struct ural_softc *)self;
327 	struct ifnet *ifp = &sc->sc_ic.ic_if;
328 	int s;
329 
330 	s = splusb();
331 
332 	if (timeout_initialized(&sc->scan_to))
333 		timeout_del(&sc->scan_to);
334 	if (timeout_initialized(&sc->amrr_to))
335 		timeout_del(&sc->amrr_to);
336 
337 	usb_rem_wait_task(sc->sc_udev, &sc->sc_task);
338 
339 	usbd_ref_wait(sc->sc_udev);
340 
341 	if (ifp->if_softc != NULL) {
342 		ieee80211_ifdetach(ifp);	/* free all nodes */
343 		if_detach(ifp);
344 	}
345 
346 	if (sc->amrr_xfer != NULL) {
347 		usbd_free_xfer(sc->amrr_xfer);
348 		sc->amrr_xfer = NULL;
349 	}
350 
351 	if (sc->sc_rx_pipeh != NULL)
352 		usbd_close_pipe(sc->sc_rx_pipeh);
353 
354 	if (sc->sc_tx_pipeh != NULL)
355 		usbd_close_pipe(sc->sc_tx_pipeh);
356 
357 	ural_free_rx_list(sc);
358 	ural_free_tx_list(sc);
359 
360 	splx(s);
361 
362 	return 0;
363 }
364 
365 int
366 ural_alloc_tx_list(struct ural_softc *sc)
367 {
368 	int i, error;
369 
370 	sc->tx_cur = sc->tx_queued = 0;
371 
372 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
373 		struct ural_tx_data *data = &sc->tx_data[i];
374 
375 		data->sc = sc;
376 
377 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
378 		if (data->xfer == NULL) {
379 			printf("%s: could not allocate tx xfer\n",
380 			    sc->sc_dev.dv_xname);
381 			error = ENOMEM;
382 			goto fail;
383 		}
384 		data->buf = usbd_alloc_buffer(data->xfer,
385 		    RAL_TX_DESC_SIZE + IEEE80211_MAX_LEN);
386 		if (data->buf == NULL) {
387 			printf("%s: could not allocate tx buffer\n",
388 			    sc->sc_dev.dv_xname);
389 			error = ENOMEM;
390 			goto fail;
391 		}
392 	}
393 
394 	return 0;
395 
396 fail:	ural_free_tx_list(sc);
397 	return error;
398 }
399 
400 void
401 ural_free_tx_list(struct ural_softc *sc)
402 {
403 	int i;
404 
405 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
406 		struct ural_tx_data *data = &sc->tx_data[i];
407 
408 		if (data->xfer != NULL) {
409 			usbd_free_xfer(data->xfer);
410 			data->xfer = NULL;
411 		}
412 		/*
413 		 * The node has already been freed at that point so don't call
414 		 * ieee80211_release_node() here.
415 		 */
416 		data->ni = NULL;
417 	}
418 }
419 
420 int
421 ural_alloc_rx_list(struct ural_softc *sc)
422 {
423 	int i, error;
424 
425 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
426 		struct ural_rx_data *data = &sc->rx_data[i];
427 
428 		data->sc = sc;
429 
430 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
431 		if (data->xfer == NULL) {
432 			printf("%s: could not allocate rx xfer\n",
433 			    sc->sc_dev.dv_xname);
434 			error = ENOMEM;
435 			goto fail;
436 		}
437 		if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) {
438 			printf("%s: could not allocate rx buffer\n",
439 			    sc->sc_dev.dv_xname);
440 			error = ENOMEM;
441 			goto fail;
442 		}
443 
444 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
445 		if (data->m == NULL) {
446 			printf("%s: could not allocate rx mbuf\n",
447 			    sc->sc_dev.dv_xname);
448 			error = ENOMEM;
449 			goto fail;
450 		}
451 		MCLGET(data->m, M_DONTWAIT);
452 		if (!(data->m->m_flags & M_EXT)) {
453 			printf("%s: could not allocate rx mbuf cluster\n",
454 			    sc->sc_dev.dv_xname);
455 			error = ENOMEM;
456 			goto fail;
457 		}
458 		data->buf = mtod(data->m, uint8_t *);
459 	}
460 
461 	return 0;
462 
463 fail:	ural_free_rx_list(sc);
464 	return error;
465 }
466 
467 void
468 ural_free_rx_list(struct ural_softc *sc)
469 {
470 	int i;
471 
472 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
473 		struct ural_rx_data *data = &sc->rx_data[i];
474 
475 		if (data->xfer != NULL) {
476 			usbd_free_xfer(data->xfer);
477 			data->xfer = NULL;
478 		}
479 		if (data->m != NULL) {
480 			m_freem(data->m);
481 			data->m = NULL;
482 		}
483 	}
484 }
485 
486 int
487 ural_media_change(struct ifnet *ifp)
488 {
489 	int error;
490 
491 	error = ieee80211_media_change(ifp);
492 	if (error != ENETRESET)
493 		return error;
494 
495 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING))
496 		error = ural_init(ifp);
497 
498 	return error;
499 }
500 
501 /*
502  * This function is called periodically (every 200ms) during scanning to
503  * switch from one channel to another.
504  */
505 void
506 ural_next_scan(void *arg)
507 {
508 	struct ural_softc *sc = arg;
509 	struct ieee80211com *ic = &sc->sc_ic;
510 	struct ifnet *ifp = &ic->ic_if;
511 
512 	if (usbd_is_dying(sc->sc_udev))
513 		return;
514 
515 	usbd_ref_incr(sc->sc_udev);
516 
517 	if (ic->ic_state == IEEE80211_S_SCAN)
518 		ieee80211_next_scan(ifp);
519 
520 	usbd_ref_decr(sc->sc_udev);
521 }
522 
523 void
524 ural_task(void *arg)
525 {
526 	struct ural_softc *sc = arg;
527 	struct ieee80211com *ic = &sc->sc_ic;
528 	enum ieee80211_state ostate;
529 	struct ieee80211_node *ni;
530 
531 	if (usbd_is_dying(sc->sc_udev))
532 		return;
533 
534 	ostate = ic->ic_state;
535 
536 	switch (sc->sc_state) {
537 	case IEEE80211_S_INIT:
538 		if (ostate == IEEE80211_S_RUN) {
539 			/* abort TSF synchronization */
540 			ural_write(sc, RAL_TXRX_CSR19, 0);
541 
542 			/* force tx led to stop blinking */
543 			ural_write(sc, RAL_MAC_CSR20, 0);
544 		}
545 		break;
546 
547 	case IEEE80211_S_SCAN:
548 		ural_set_chan(sc, ic->ic_bss->ni_chan);
549 		if (!usbd_is_dying(sc->sc_udev))
550 			timeout_add_msec(&sc->scan_to, 200);
551 		break;
552 
553 	case IEEE80211_S_AUTH:
554 		ural_set_chan(sc, ic->ic_bss->ni_chan);
555 		break;
556 
557 	case IEEE80211_S_ASSOC:
558 		ural_set_chan(sc, ic->ic_bss->ni_chan);
559 		break;
560 
561 	case IEEE80211_S_RUN:
562 		ural_set_chan(sc, ic->ic_bss->ni_chan);
563 
564 		ni = ic->ic_bss;
565 
566 		if (ic->ic_opmode != IEEE80211_M_MONITOR) {
567 			ural_update_slot(sc);
568 			ural_set_txpreamble(sc);
569 			ural_set_basicrates(sc);
570 			ural_set_bssid(sc, ni->ni_bssid);
571 		}
572 
573 #ifndef IEEE80211_STA_ONLY
574 		if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
575 		    ic->ic_opmode == IEEE80211_M_IBSS) {
576 			struct mbuf *m = ieee80211_beacon_alloc(ic, ni);
577 			if (m == NULL) {
578 				printf("%s: could not allocate beacon\n",
579 				    sc->sc_dev.dv_xname);
580 				return;
581 			}
582 
583 			if (ural_tx_bcn(sc, m, ni) != 0) {
584 				m_freem(m);
585 				printf("%s: could not transmit beacon\n",
586 				    sc->sc_dev.dv_xname);
587 				return;
588 			}
589 
590 			/* beacon is no longer needed */
591 			m_freem(m);
592 		}
593 #endif
594 
595 		/* make tx led blink on tx (controlled by ASIC) */
596 		ural_write(sc, RAL_MAC_CSR20, 1);
597 
598 		if (ic->ic_opmode != IEEE80211_M_MONITOR)
599 			ural_enable_tsf_sync(sc);
600 
601 		if (ic->ic_opmode == IEEE80211_M_STA) {
602 			/* fake a join to init the tx rate */
603 			ural_newassoc(ic, ic->ic_bss, 1);
604 
605 			/* enable automatic rate control in STA mode */
606 			if (ic->ic_fixed_rate == -1)
607 				ural_amrr_start(sc, ic->ic_bss);
608 		}
609 
610 		break;
611 	}
612 
613 	sc->sc_newstate(ic, sc->sc_state, sc->sc_arg);
614 }
615 
616 int
617 ural_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
618 {
619 	struct ural_softc *sc = ic->ic_if.if_softc;
620 
621 	usb_rem_task(sc->sc_udev, &sc->sc_task);
622 	timeout_del(&sc->scan_to);
623 	timeout_del(&sc->amrr_to);
624 
625 	/* do it in a process context */
626 	sc->sc_state = nstate;
627 	sc->sc_arg = arg;
628 	usb_add_task(sc->sc_udev, &sc->sc_task);
629 	return 0;
630 }
631 
632 /* quickly determine if a given rate is CCK or OFDM */
633 #define RAL_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
634 
635 #define RAL_ACK_SIZE	14	/* 10 + 4(FCS) */
636 #define RAL_CTS_SIZE	14	/* 10 + 4(FCS) */
637 
638 #define RAL_SIFS		10	/* us */
639 
640 #define RAL_RXTX_TURNAROUND	5	/* us */
641 
642 void
643 ural_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
644 {
645 	struct ural_tx_data *data = priv;
646 	struct ural_softc *sc = data->sc;
647 	struct ieee80211com *ic = &sc->sc_ic;
648 	struct ifnet *ifp = &ic->ic_if;
649 	int s;
650 
651 	if (status != USBD_NORMAL_COMPLETION) {
652 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
653 			return;
654 
655 		printf("%s: could not transmit buffer: %s\n",
656 		    sc->sc_dev.dv_xname, usbd_errstr(status));
657 
658 		if (status == USBD_STALLED)
659 			usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh);
660 
661 		ifp->if_oerrors++;
662 		return;
663 	}
664 
665 	s = splnet();
666 
667 	ieee80211_release_node(ic, data->ni);
668 	data->ni = NULL;
669 
670 	sc->tx_queued--;
671 
672 	DPRINTFN(10, ("tx done\n"));
673 
674 	sc->sc_tx_timer = 0;
675 	ifq_clr_oactive(&ifp->if_snd);
676 	ural_start(ifp);
677 
678 	splx(s);
679 }
680 
681 void
682 ural_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
683 {
684 	struct ural_rx_data *data = priv;
685 	struct ural_softc *sc = data->sc;
686 	struct ieee80211com *ic = &sc->sc_ic;
687 	struct ifnet *ifp = &ic->ic_if;
688 	const struct ural_rx_desc *desc;
689 	struct ieee80211_frame *wh;
690 	struct ieee80211_rxinfo rxi;
691 	struct ieee80211_node *ni;
692 	struct mbuf *mnew, *m;
693 	int s, len;
694 
695 	if (status != USBD_NORMAL_COMPLETION) {
696 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
697 			return;
698 
699 		if (status == USBD_STALLED)
700 			usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh);
701 		goto skip;
702 	}
703 
704 	usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL);
705 
706 	if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) {
707 		DPRINTF(("%s: xfer too short %d\n", sc->sc_dev.dv_xname,
708 		    len));
709 		ifp->if_ierrors++;
710 		goto skip;
711 	}
712 
713 	/* rx descriptor is located at the end */
714 	desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE);
715 
716 	if (letoh32(desc->flags) & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) {
717 		/*
718 		 * This should not happen since we did not request to receive
719 		 * those frames when we filled RAL_TXRX_CSR2.
720 		 */
721 		DPRINTFN(5, ("PHY or CRC error\n"));
722 		ifp->if_ierrors++;
723 		goto skip;
724 	}
725 
726 	MGETHDR(mnew, M_DONTWAIT, MT_DATA);
727 	if (mnew == NULL) {
728 		printf("%s: could not allocate rx mbuf\n",
729 		    sc->sc_dev.dv_xname);
730 		ifp->if_ierrors++;
731 		goto skip;
732 	}
733 	MCLGET(mnew, M_DONTWAIT);
734 	if (!(mnew->m_flags & M_EXT)) {
735 		printf("%s: could not allocate rx mbuf cluster\n",
736 		    sc->sc_dev.dv_xname);
737 		m_freem(mnew);
738 		ifp->if_ierrors++;
739 		goto skip;
740 	}
741 	m = data->m;
742 	data->m = mnew;
743 	data->buf = mtod(data->m, uint8_t *);
744 
745 	/* finalize mbuf */
746 	m->m_pkthdr.len = m->m_len = (letoh32(desc->flags) >> 16) & 0xfff;
747 
748 	s = splnet();
749 
750 #if NBPFILTER > 0
751 	if (sc->sc_drvbpf != NULL) {
752 		struct mbuf mb;
753 		struct ural_rx_radiotap_header *tap = &sc->sc_rxtap;
754 
755 		tap->wr_flags = IEEE80211_RADIOTAP_F_FCS;
756 		tap->wr_rate = ural_rxrate(desc);
757 		tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
758 		tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
759 		tap->wr_antenna = sc->rx_ant;
760 		tap->wr_antsignal = desc->rssi;
761 
762 		mb.m_data = (caddr_t)tap;
763 		mb.m_len = sc->sc_rxtap_len;
764 		mb.m_next = m;
765 		mb.m_nextpkt = NULL;
766 		mb.m_type = 0;
767 		mb.m_flags = 0;
768 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
769 	}
770 #endif
771 	m_adj(m, -IEEE80211_CRC_LEN);	/* trim FCS */
772 
773 	wh = mtod(m, struct ieee80211_frame *);
774 	ni = ieee80211_find_rxnode(ic, wh);
775 
776 	/* send the frame to the 802.11 layer */
777 	memset(&rxi, 0, sizeof(rxi));
778 	rxi.rxi_rssi = desc->rssi;
779 	ieee80211_input(ifp, m, ni, &rxi);
780 
781 	/* node is no longer needed */
782 	ieee80211_release_node(ic, ni);
783 
784 	splx(s);
785 
786 	DPRINTFN(15, ("rx done\n"));
787 
788 skip:	/* setup a new transfer */
789 	usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES,
790 	    USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
791 	(void)usbd_transfer(xfer);
792 }
793 
794 /*
795  * This function is only used by the Rx radiotap code. It returns the rate at
796  * which a given frame was received.
797  */
798 #if NBPFILTER > 0
799 uint8_t
800 ural_rxrate(const struct ural_rx_desc *desc)
801 {
802 	if (letoh32(desc->flags) & RAL_RX_OFDM) {
803 		/* reverse function of ural_plcp_signal */
804 		switch (desc->rate) {
805 		case 0xb:	return 12;
806 		case 0xf:	return 18;
807 		case 0xa:	return 24;
808 		case 0xe:	return 36;
809 		case 0x9:	return 48;
810 		case 0xd:	return 72;
811 		case 0x8:	return 96;
812 		case 0xc:	return 108;
813 		}
814 	} else {
815 		if (desc->rate == 10)
816 			return 2;
817 		if (desc->rate == 20)
818 			return 4;
819 		if (desc->rate == 55)
820 			return 11;
821 		if (desc->rate == 110)
822 			return 22;
823 	}
824 	return 2;	/* should not get there */
825 }
826 #endif
827 
828 /*
829  * Return the expected ack rate for a frame transmitted at rate `rate'.
830  */
831 int
832 ural_ack_rate(struct ieee80211com *ic, int rate)
833 {
834 	switch (rate) {
835 	/* CCK rates */
836 	case 2:
837 		return 2;
838 	case 4:
839 	case 11:
840 	case 22:
841 		return (ic->ic_curmode == IEEE80211_MODE_11B) ? 4 : rate;
842 
843 	/* OFDM rates */
844 	case 12:
845 	case 18:
846 		return 12;
847 	case 24:
848 	case 36:
849 		return 24;
850 	case 48:
851 	case 72:
852 	case 96:
853 	case 108:
854 		return 48;
855 	}
856 
857 	/* default to 1Mbps */
858 	return 2;
859 }
860 
861 /*
862  * Compute the duration (in us) needed to transmit `len' bytes at rate `rate'.
863  * The function automatically determines the operating mode depending on the
864  * given rate. `flags' indicates whether short preamble is in use or not.
865  */
866 uint16_t
867 ural_txtime(int len, int rate, uint32_t flags)
868 {
869 	uint16_t txtime;
870 
871 	if (RAL_RATE_IS_OFDM(rate)) {
872 		/* IEEE Std 802.11g-2003, pp. 44 */
873 		txtime = (8 + 4 * len + 3 + rate - 1) / rate;
874 		txtime = 16 + 4 + 4 * txtime + 6;
875 	} else {
876 		/* IEEE Std 802.11b-1999, pp. 28 */
877 		txtime = (16 * len + rate - 1) / rate;
878 		if (rate != 2 && (flags & IEEE80211_F_SHPREAMBLE))
879 			txtime +=  72 + 24;
880 		else
881 			txtime += 144 + 48;
882 	}
883 	return txtime;
884 }
885 
886 uint8_t
887 ural_plcp_signal(int rate)
888 {
889 	switch (rate) {
890 	/* CCK rates (returned values are device-dependent) */
891 	case 2:		return 0x0;
892 	case 4:		return 0x1;
893 	case 11:	return 0x2;
894 	case 22:	return 0x3;
895 
896 	/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
897 	case 12:	return 0xb;
898 	case 18:	return 0xf;
899 	case 24:	return 0xa;
900 	case 36:	return 0xe;
901 	case 48:	return 0x9;
902 	case 72:	return 0xd;
903 	case 96:	return 0x8;
904 	case 108:	return 0xc;
905 
906 	/* unsupported rates (should not get there) */
907 	default:	return 0xff;
908 	}
909 }
910 
911 void
912 ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc,
913     uint32_t flags, int len, int rate)
914 {
915 	struct ieee80211com *ic = &sc->sc_ic;
916 	uint16_t plcp_length;
917 	int remainder;
918 
919 	desc->flags = htole32(flags);
920 	desc->flags |= htole32(len << 16);
921 
922 	desc->wme = htole16(
923 	    RAL_AIFSN(2) |
924 	    RAL_LOGCWMIN(3) |
925 	    RAL_LOGCWMAX(5));
926 
927 	/* setup PLCP fields */
928 	desc->plcp_signal  = ural_plcp_signal(rate);
929 	desc->plcp_service = 4;
930 
931 	len += IEEE80211_CRC_LEN;
932 	if (RAL_RATE_IS_OFDM(rate)) {
933 		desc->flags |= htole32(RAL_TX_OFDM);
934 
935 		plcp_length = len & 0xfff;
936 		desc->plcp_length_hi = plcp_length >> 6;
937 		desc->plcp_length_lo = plcp_length & 0x3f;
938 	} else {
939 		plcp_length = (16 * len + rate - 1) / rate;
940 		if (rate == 22) {
941 			remainder = (16 * len) % 22;
942 			if (remainder != 0 && remainder < 7)
943 				desc->plcp_service |= RAL_PLCP_LENGEXT;
944 		}
945 		desc->plcp_length_hi = plcp_length >> 8;
946 		desc->plcp_length_lo = plcp_length & 0xff;
947 
948 		if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
949 			desc->plcp_signal |= 0x08;
950 	}
951 
952 	desc->iv = 0;
953 	desc->eiv = 0;
954 }
955 
956 #define RAL_TX_TIMEOUT	5000
957 
958 #ifndef IEEE80211_STA_ONLY
959 int
960 ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
961 {
962 	struct ural_tx_desc *desc;
963 	struct usbd_xfer *xfer;
964 	usbd_status error;
965 	uint8_t cmd = 0;
966 	uint8_t *buf;
967 	int xferlen, rate = 2;
968 
969 	xfer = usbd_alloc_xfer(sc->sc_udev);
970 	if (xfer == NULL)
971 		return ENOMEM;
972 
973 	/* xfer length needs to be a multiple of two! */
974 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
975 
976 	buf = usbd_alloc_buffer(xfer, xferlen);
977 	if (buf == NULL) {
978 		usbd_free_xfer(xfer);
979 		return ENOMEM;
980 	}
981 
982 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd,
983 	    USBD_FORCE_SHORT_XFER | USBD_SYNCHRONOUS, RAL_TX_TIMEOUT, NULL);
984 
985 	error = usbd_transfer(xfer);
986 	if (error != 0) {
987 		usbd_free_xfer(xfer);
988 		return error;
989 	}
990 
991 	desc = (struct ural_tx_desc *)buf;
992 
993 	m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE);
994 	ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP,
995 	    m0->m_pkthdr.len, rate);
996 
997 	DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n",
998 	    m0->m_pkthdr.len, rate, xferlen));
999 
1000 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen,
1001 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY | USBD_SYNCHRONOUS,
1002 	    RAL_TX_TIMEOUT, NULL);
1003 
1004 	error = usbd_transfer(xfer);
1005 	usbd_free_xfer(xfer);
1006 
1007 	return error;
1008 }
1009 #endif
1010 
1011 int
1012 ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
1013 {
1014 	struct ieee80211com *ic = &sc->sc_ic;
1015 	struct ural_tx_desc *desc;
1016 	struct ural_tx_data *data;
1017 	struct ieee80211_frame *wh;
1018 	struct ieee80211_key *k;
1019 	uint32_t flags = RAL_TX_NEWSEQ;
1020 	uint16_t dur;
1021 	usbd_status error;
1022 	int rate, xferlen, pktlen, needrts = 0, needcts = 0;
1023 
1024 	wh = mtod(m0, struct ieee80211_frame *);
1025 
1026 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
1027 		k = ieee80211_get_txkey(ic, wh, ni);
1028 
1029 		if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL)
1030 			return ENOBUFS;
1031 
1032 		/* packet header may have moved, reset our local pointer */
1033 		wh = mtod(m0, struct ieee80211_frame *);
1034 	}
1035 
1036 	/* compute actual packet length (including CRC and crypto overhead) */
1037 	pktlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN;
1038 
1039 	/* pickup a rate */
1040 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
1041 	    ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1042 	     IEEE80211_FC0_TYPE_MGT)) {
1043 		/* mgmt/multicast frames are sent at the lowest avail. rate */
1044 		rate = ni->ni_rates.rs_rates[0];
1045 	} else if (ic->ic_fixed_rate != -1) {
1046 		rate = ic->ic_sup_rates[ic->ic_curmode].
1047 		    rs_rates[ic->ic_fixed_rate];
1048 	} else
1049 			rate = ni->ni_rates.rs_rates[ni->ni_txrate];
1050 	if (rate == 0)
1051 		rate = 2;	/* XXX should not happen */
1052 	rate &= IEEE80211_RATE_VAL;
1053 
1054 	/* check if RTS/CTS or CTS-to-self protection must be used */
1055 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1056 		/* multicast frames are not sent at OFDM rates in 802.11b/g */
1057 		if (pktlen > ic->ic_rtsthreshold) {
1058 			needrts = 1;	/* RTS/CTS based on frame length */
1059 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1060 		    RAL_RATE_IS_OFDM(rate)) {
1061 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
1062 				needcts = 1;	/* CTS-to-self */
1063 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
1064 				needrts = 1;	/* RTS/CTS */
1065 		}
1066 	}
1067 	if (needrts || needcts) {
1068 		struct mbuf *mprot;
1069 		int protrate, ackrate;
1070 		uint16_t dur;
1071 
1072 		protrate = 2;
1073 		ackrate  = ural_ack_rate(ic, rate);
1074 
1075 		dur = ural_txtime(pktlen, rate, ic->ic_flags) +
1076 		      ural_txtime(RAL_ACK_SIZE, ackrate, ic->ic_flags) +
1077 		      2 * RAL_SIFS;
1078 		if (needrts) {
1079 			dur += ural_txtime(RAL_CTS_SIZE, ural_ack_rate(ic,
1080 			    protrate), ic->ic_flags) + RAL_SIFS;
1081 			mprot = ieee80211_get_rts(ic, wh, dur);
1082 		} else {
1083 			mprot = ieee80211_get_cts_to_self(ic, dur);
1084 		}
1085 		if (mprot == NULL) {
1086 			printf("%s: could not allocate protection frame\n",
1087 			    sc->sc_dev.dv_xname);
1088 			m_freem(m0);
1089 			return ENOBUFS;
1090 		}
1091 
1092 		data = &sc->tx_data[sc->tx_cur];
1093 		desc = (struct ural_tx_desc *)data->buf;
1094 
1095 		/* avoid multiple free() of the same node for each fragment */
1096 		data->ni = ieee80211_ref_node(ni);
1097 
1098 		m_copydata(mprot, 0, mprot->m_pkthdr.len,
1099 		    data->buf + RAL_TX_DESC_SIZE);
1100 		ural_setup_tx_desc(sc, desc,
1101 		    (needrts ? RAL_TX_NEED_ACK : 0) | RAL_TX_RETRY(7),
1102 		    mprot->m_pkthdr.len, protrate);
1103 
1104 		/* no roundup necessary here */
1105 		xferlen = RAL_TX_DESC_SIZE + mprot->m_pkthdr.len;
1106 
1107 		/* XXX may want to pass the protection frame to BPF */
1108 
1109 		/* mbuf is no longer needed */
1110 		m_freem(mprot);
1111 
1112 		usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf,
1113 		    xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
1114 		    RAL_TX_TIMEOUT, ural_txeof);
1115 		error = usbd_transfer(data->xfer);
1116 		if (error != 0 && error != USBD_IN_PROGRESS) {
1117 			m_freem(m0);
1118 			return error;
1119 		}
1120 
1121 		sc->tx_queued++;
1122 		sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1123 
1124 		flags |= RAL_TX_IFS_SIFS;
1125 	}
1126 
1127 	data = &sc->tx_data[sc->tx_cur];
1128 	desc = (struct ural_tx_desc *)data->buf;
1129 
1130 	data->ni = ni;
1131 
1132 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1133 		flags |= RAL_TX_NEED_ACK;
1134 		flags |= RAL_TX_RETRY(7);
1135 
1136 		dur = ural_txtime(RAL_ACK_SIZE, ural_ack_rate(ic, rate),
1137 		    ic->ic_flags) + RAL_SIFS;
1138 		*(uint16_t *)wh->i_dur = htole16(dur);
1139 
1140 #ifndef IEEE80211_STA_ONLY
1141 		/* tell hardware to set timestamp in probe responses */
1142 		if ((wh->i_fc[0] &
1143 		    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
1144 		    (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
1145 			flags |= RAL_TX_TIMESTAMP;
1146 #endif
1147 	}
1148 
1149 #if NBPFILTER > 0
1150 	if (sc->sc_drvbpf != NULL) {
1151 		struct mbuf mb;
1152 		struct ural_tx_radiotap_header *tap = &sc->sc_txtap;
1153 
1154 		tap->wt_flags = 0;
1155 		tap->wt_rate = rate;
1156 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1157 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1158 		tap->wt_antenna = sc->tx_ant;
1159 
1160 		mb.m_data = (caddr_t)tap;
1161 		mb.m_len = sc->sc_txtap_len;
1162 		mb.m_next = m0;
1163 		mb.m_nextpkt = NULL;
1164 		mb.m_type = 0;
1165 		mb.m_flags = 0;
1166 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1167 	}
1168 #endif
1169 
1170 	m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE);
1171 	ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate);
1172 
1173 	/* align end on a 2-bytes boundary */
1174 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
1175 
1176 	/*
1177 	 * No space left in the last URB to store the extra 2 bytes, force
1178 	 * sending of another URB.
1179 	 */
1180 	if ((xferlen % 64) == 0)
1181 		xferlen += 2;
1182 
1183 	DPRINTFN(10, ("sending frame len=%u rate=%u xfer len=%u\n",
1184 	    m0->m_pkthdr.len, rate, xferlen));
1185 
1186 	/* mbuf is no longer needed */
1187 	m_freem(m0);
1188 
1189 	usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen,
1190 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof);
1191 	error = usbd_transfer(data->xfer);
1192 	if (error != 0 && error != USBD_IN_PROGRESS)
1193 		return error;
1194 
1195 	sc->tx_queued++;
1196 	sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1197 
1198 	return 0;
1199 }
1200 
1201 void
1202 ural_start(struct ifnet *ifp)
1203 {
1204 	struct ural_softc *sc = ifp->if_softc;
1205 	struct ieee80211com *ic = &sc->sc_ic;
1206 	struct ieee80211_node *ni;
1207 	struct mbuf *m0;
1208 
1209 	/*
1210 	 * net80211 may still try to send management frames even if the
1211 	 * IFF_RUNNING flag is not set...
1212 	 */
1213 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1214 		return;
1215 
1216 	for (;;) {
1217 		if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) {
1218 			ifq_set_oactive(&ifp->if_snd);
1219 			break;
1220 		}
1221 
1222 		m0 = mq_dequeue(&ic->ic_mgtq);
1223 		if (m0 != NULL) {
1224 			ni = m0->m_pkthdr.ph_cookie;
1225 #if NBPFILTER > 0
1226 			if (ic->ic_rawbpf != NULL)
1227 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1228 #endif
1229 			if (ural_tx_data(sc, m0, ni) != 0)
1230 				break;
1231 
1232 		} else {
1233 			if (ic->ic_state != IEEE80211_S_RUN)
1234 				break;
1235 
1236 			m0 = ifq_dequeue(&ifp->if_snd);
1237 			if (m0 == NULL)
1238 				break;
1239 #if NBPFILTER > 0
1240 			if (ifp->if_bpf != NULL)
1241 				bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1242 #endif
1243 			m0 = ieee80211_encap(ifp, m0, &ni);
1244 			if (m0 == NULL)
1245 				continue;
1246 #if NBPFILTER > 0
1247 			if (ic->ic_rawbpf != NULL)
1248 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1249 #endif
1250 			if (ural_tx_data(sc, m0, ni) != 0) {
1251 				if (ni != NULL)
1252 					ieee80211_release_node(ic, ni);
1253 				ifp->if_oerrors++;
1254 				break;
1255 			}
1256 		}
1257 
1258 		sc->sc_tx_timer = 5;
1259 		ifp->if_timer = 1;
1260 	}
1261 }
1262 
1263 void
1264 ural_watchdog(struct ifnet *ifp)
1265 {
1266 	struct ural_softc *sc = ifp->if_softc;
1267 
1268 	ifp->if_timer = 0;
1269 
1270 	if (sc->sc_tx_timer > 0) {
1271 		if (--sc->sc_tx_timer == 0) {
1272 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1273 			/*ural_init(ifp); XXX needs a process context! */
1274 			ifp->if_oerrors++;
1275 			return;
1276 		}
1277 		ifp->if_timer = 1;
1278 	}
1279 
1280 	ieee80211_watchdog(ifp);
1281 }
1282 
1283 int
1284 ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1285 {
1286 	struct ural_softc *sc = ifp->if_softc;
1287 	struct ieee80211com *ic = &sc->sc_ic;
1288 	int s, error = 0;
1289 
1290 	if (usbd_is_dying(sc->sc_udev))
1291 		return ENXIO;
1292 
1293 	usbd_ref_incr(sc->sc_udev);
1294 
1295 	s = splnet();
1296 
1297 	switch (cmd) {
1298 	case SIOCSIFADDR:
1299 		ifp->if_flags |= IFF_UP;
1300 		/* FALLTHROUGH */
1301 	case SIOCSIFFLAGS:
1302 		if (ifp->if_flags & IFF_UP) {
1303 			if (ifp->if_flags & IFF_RUNNING)
1304 				ural_update_promisc(sc);
1305 			else
1306 				ural_init(ifp);
1307 		} else {
1308 			if (ifp->if_flags & IFF_RUNNING)
1309 				ural_stop(ifp, 1);
1310 		}
1311 		break;
1312 
1313 	case SIOCS80211CHANNEL:
1314 		/*
1315 		 * This allows for fast channel switching in monitor mode
1316 		 * (used by kismet). In IBSS mode, we must explicitly reset
1317 		 * the interface to generate a new beacon frame.
1318 		 */
1319 		error = ieee80211_ioctl(ifp, cmd, data);
1320 		if (error == ENETRESET &&
1321 		    ic->ic_opmode == IEEE80211_M_MONITOR) {
1322 			if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1323 			    (IFF_UP | IFF_RUNNING))
1324 				ural_set_chan(sc, ic->ic_ibss_chan);
1325 			error = 0;
1326 		}
1327 		break;
1328 
1329 	default:
1330 		error = ieee80211_ioctl(ifp, cmd, data);
1331 	}
1332 
1333 	if (error == ENETRESET) {
1334 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1335 		    (IFF_UP | IFF_RUNNING))
1336 			ural_init(ifp);
1337 		error = 0;
1338 	}
1339 
1340 	splx(s);
1341 
1342 	usbd_ref_decr(sc->sc_udev);
1343 
1344 	return error;
1345 }
1346 
1347 void
1348 ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len)
1349 {
1350 	usb_device_request_t req;
1351 	usbd_status error;
1352 
1353 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1354 	req.bRequest = RAL_READ_EEPROM;
1355 	USETW(req.wValue, 0);
1356 	USETW(req.wIndex, addr);
1357 	USETW(req.wLength, len);
1358 
1359 	error = usbd_do_request(sc->sc_udev, &req, buf);
1360 	if (error != 0) {
1361 		printf("%s: could not read EEPROM: %s\n",
1362 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1363 	}
1364 }
1365 
1366 uint16_t
1367 ural_read(struct ural_softc *sc, uint16_t reg)
1368 {
1369 	usb_device_request_t req;
1370 	usbd_status error;
1371 	uint16_t val;
1372 
1373 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1374 	req.bRequest = RAL_READ_MAC;
1375 	USETW(req.wValue, 0);
1376 	USETW(req.wIndex, reg);
1377 	USETW(req.wLength, sizeof (uint16_t));
1378 
1379 	error = usbd_do_request(sc->sc_udev, &req, &val);
1380 	if (error != 0) {
1381 		printf("%s: could not read MAC register: %s\n",
1382 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1383 		return 0;
1384 	}
1385 	return letoh16(val);
1386 }
1387 
1388 void
1389 ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1390 {
1391 	usb_device_request_t req;
1392 	usbd_status error;
1393 
1394 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1395 	req.bRequest = RAL_READ_MULTI_MAC;
1396 	USETW(req.wValue, 0);
1397 	USETW(req.wIndex, reg);
1398 	USETW(req.wLength, len);
1399 
1400 	error = usbd_do_request(sc->sc_udev, &req, buf);
1401 	if (error != 0) {
1402 		printf("%s: could not read MAC register: %s\n",
1403 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1404 	}
1405 }
1406 
1407 void
1408 ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val)
1409 {
1410 	usb_device_request_t req;
1411 	usbd_status error;
1412 
1413 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1414 	req.bRequest = RAL_WRITE_MAC;
1415 	USETW(req.wValue, val);
1416 	USETW(req.wIndex, reg);
1417 	USETW(req.wLength, 0);
1418 
1419 	error = usbd_do_request(sc->sc_udev, &req, NULL);
1420 	if (error != 0) {
1421 		printf("%s: could not write MAC register: %s\n",
1422 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1423 	}
1424 }
1425 
1426 void
1427 ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1428 {
1429 	usb_device_request_t req;
1430 	usbd_status error;
1431 
1432 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1433 	req.bRequest = RAL_WRITE_MULTI_MAC;
1434 	USETW(req.wValue, 0);
1435 	USETW(req.wIndex, reg);
1436 	USETW(req.wLength, len);
1437 
1438 	error = usbd_do_request(sc->sc_udev, &req, buf);
1439 	if (error != 0) {
1440 		printf("%s: could not write MAC register: %s\n",
1441 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1442 	}
1443 }
1444 
1445 void
1446 ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val)
1447 {
1448 	uint16_t tmp;
1449 	int ntries;
1450 
1451 	for (ntries = 0; ntries < 5; ntries++) {
1452 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1453 			break;
1454 	}
1455 	if (ntries == 5) {
1456 		printf("%s: could not write to BBP\n", sc->sc_dev.dv_xname);
1457 		return;
1458 	}
1459 
1460 	tmp = reg << 8 | val;
1461 	ural_write(sc, RAL_PHY_CSR7, tmp);
1462 }
1463 
1464 uint8_t
1465 ural_bbp_read(struct ural_softc *sc, uint8_t reg)
1466 {
1467 	uint16_t val;
1468 	int ntries;
1469 
1470 	val = RAL_BBP_WRITE | reg << 8;
1471 	ural_write(sc, RAL_PHY_CSR7, val);
1472 
1473 	for (ntries = 0; ntries < 5; ntries++) {
1474 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1475 			break;
1476 	}
1477 	if (ntries == 5) {
1478 		printf("%s: could not read BBP\n", sc->sc_dev.dv_xname);
1479 		return 0;
1480 	}
1481 	return ural_read(sc, RAL_PHY_CSR7) & 0xff;
1482 }
1483 
1484 void
1485 ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val)
1486 {
1487 	uint32_t tmp;
1488 	int ntries;
1489 
1490 	for (ntries = 0; ntries < 5; ntries++) {
1491 		if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY))
1492 			break;
1493 	}
1494 	if (ntries == 5) {
1495 		printf("%s: could not write to RF\n", sc->sc_dev.dv_xname);
1496 		return;
1497 	}
1498 
1499 	tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3);
1500 	ural_write(sc, RAL_PHY_CSR9,  tmp & 0xffff);
1501 	ural_write(sc, RAL_PHY_CSR10, tmp >> 16);
1502 
1503 	/* remember last written value in sc */
1504 	sc->rf_regs[reg] = val;
1505 
1506 	DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff));
1507 }
1508 
1509 void
1510 ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c)
1511 {
1512 	struct ieee80211com *ic = &sc->sc_ic;
1513 	uint8_t power, tmp;
1514 	u_int chan;
1515 
1516 	chan = ieee80211_chan2ieee(ic, c);
1517 	if (chan == 0 || chan == IEEE80211_CHAN_ANY)
1518 		return;
1519 
1520 	power = min(sc->txpow[chan - 1], 31);
1521 
1522 	DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power));
1523 
1524 	switch (sc->rf_rev) {
1525 	case RAL_RF_2522:
1526 		ural_rf_write(sc, RAL_RF1, 0x00814);
1527 		ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]);
1528 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1529 		break;
1530 
1531 	case RAL_RF_2523:
1532 		ural_rf_write(sc, RAL_RF1, 0x08804);
1533 		ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]);
1534 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044);
1535 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1536 		break;
1537 
1538 	case RAL_RF_2524:
1539 		ural_rf_write(sc, RAL_RF1, 0x0c808);
1540 		ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]);
1541 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1542 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1543 		break;
1544 
1545 	case RAL_RF_2525:
1546 		ural_rf_write(sc, RAL_RF1, 0x08808);
1547 		ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]);
1548 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1549 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1550 
1551 		ural_rf_write(sc, RAL_RF1, 0x08808);
1552 		ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]);
1553 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1554 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1555 		break;
1556 
1557 	case RAL_RF_2525E:
1558 		ural_rf_write(sc, RAL_RF1, 0x08808);
1559 		ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]);
1560 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1561 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282);
1562 		break;
1563 
1564 	case RAL_RF_2526:
1565 		ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]);
1566 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1567 		ural_rf_write(sc, RAL_RF1, 0x08804);
1568 
1569 		ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]);
1570 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1571 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1572 		break;
1573 	}
1574 
1575 	if (ic->ic_opmode != IEEE80211_M_MONITOR &&
1576 	    ic->ic_state != IEEE80211_S_SCAN) {
1577 		/* set Japan filter bit for channel 14 */
1578 		tmp = ural_bbp_read(sc, 70);
1579 
1580 		tmp &= ~RAL_JAPAN_FILTER;
1581 		if (chan == 14)
1582 			tmp |= RAL_JAPAN_FILTER;
1583 
1584 		ural_bbp_write(sc, 70, tmp);
1585 
1586 		/* clear CRC errors */
1587 		ural_read(sc, RAL_STA_CSR0);
1588 
1589 		DELAY(1000); /* RF needs a 1ms delay here */
1590 		ural_disable_rf_tune(sc);
1591 	}
1592 }
1593 
1594 /*
1595  * Disable RF auto-tuning.
1596  */
1597 void
1598 ural_disable_rf_tune(struct ural_softc *sc)
1599 {
1600 	uint32_t tmp;
1601 
1602 	if (sc->rf_rev != RAL_RF_2523) {
1603 		tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE;
1604 		ural_rf_write(sc, RAL_RF1, tmp);
1605 	}
1606 
1607 	tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE;
1608 	ural_rf_write(sc, RAL_RF3, tmp);
1609 
1610 	DPRINTFN(2, ("disabling RF autotune\n"));
1611 }
1612 
1613 /*
1614  * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF
1615  * synchronization.
1616  */
1617 void
1618 ural_enable_tsf_sync(struct ural_softc *sc)
1619 {
1620 	struct ieee80211com *ic = &sc->sc_ic;
1621 	uint16_t logcwmin, preload, tmp;
1622 
1623 	/* first, disable TSF synchronization */
1624 	ural_write(sc, RAL_TXRX_CSR19, 0);
1625 
1626 	tmp = (16 * ic->ic_bss->ni_intval) << 4;
1627 	ural_write(sc, RAL_TXRX_CSR18, tmp);
1628 
1629 #ifndef IEEE80211_STA_ONLY
1630 	if (ic->ic_opmode == IEEE80211_M_IBSS) {
1631 		logcwmin = 2;
1632 		preload = 320;
1633 	} else
1634 #endif
1635 	{
1636 		logcwmin = 0;
1637 		preload = 6;
1638 	}
1639 	tmp = logcwmin << 12 | preload;
1640 	ural_write(sc, RAL_TXRX_CSR20, tmp);
1641 
1642 	/* finally, enable TSF synchronization */
1643 	tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN;
1644 	if (ic->ic_opmode == IEEE80211_M_STA)
1645 		tmp |= RAL_ENABLE_TSF_SYNC(1);
1646 #ifndef IEEE80211_STA_ONLY
1647 	else
1648 		tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR;
1649 #endif
1650 	ural_write(sc, RAL_TXRX_CSR19, tmp);
1651 
1652 	DPRINTF(("enabling TSF synchronization\n"));
1653 }
1654 
1655 void
1656 ural_update_slot(struct ural_softc *sc)
1657 {
1658 	struct ieee80211com *ic = &sc->sc_ic;
1659 	uint16_t slottime, sifs, eifs;
1660 
1661 	slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ?
1662 	    IEEE80211_DUR_DS_SHSLOT : IEEE80211_DUR_DS_SLOT;
1663 
1664 	/*
1665 	 * These settings may sound a bit inconsistent but this is what the
1666 	 * reference driver does.
1667 	 */
1668 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1669 		sifs = 16 - RAL_RXTX_TURNAROUND;
1670 		eifs = 364;
1671 	} else {
1672 		sifs = 10 - RAL_RXTX_TURNAROUND;
1673 		eifs = 64;
1674 	}
1675 
1676 	ural_write(sc, RAL_MAC_CSR10, slottime);
1677 	ural_write(sc, RAL_MAC_CSR11, sifs);
1678 	ural_write(sc, RAL_MAC_CSR12, eifs);
1679 }
1680 
1681 void
1682 ural_set_txpreamble(struct ural_softc *sc)
1683 {
1684 	uint16_t tmp;
1685 
1686 	tmp = ural_read(sc, RAL_TXRX_CSR10);
1687 
1688 	tmp &= ~RAL_SHORT_PREAMBLE;
1689 	if (sc->sc_ic.ic_flags & IEEE80211_F_SHPREAMBLE)
1690 		tmp |= RAL_SHORT_PREAMBLE;
1691 
1692 	ural_write(sc, RAL_TXRX_CSR10, tmp);
1693 }
1694 
1695 void
1696 ural_set_basicrates(struct ural_softc *sc)
1697 {
1698 	struct ieee80211com *ic = &sc->sc_ic;
1699 
1700 	/* update basic rate set */
1701 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1702 		/* 11b basic rates: 1, 2Mbps */
1703 		ural_write(sc, RAL_TXRX_CSR11, 0x3);
1704 	} else {
1705 		/* 11b/g basic rates: 1, 2, 5.5, 11Mbps */
1706 		ural_write(sc, RAL_TXRX_CSR11, 0xf);
1707 	}
1708 }
1709 
1710 void
1711 ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid)
1712 {
1713 	uint16_t tmp;
1714 
1715 	tmp = bssid[0] | bssid[1] << 8;
1716 	ural_write(sc, RAL_MAC_CSR5, tmp);
1717 
1718 	tmp = bssid[2] | bssid[3] << 8;
1719 	ural_write(sc, RAL_MAC_CSR6, tmp);
1720 
1721 	tmp = bssid[4] | bssid[5] << 8;
1722 	ural_write(sc, RAL_MAC_CSR7, tmp);
1723 
1724 	DPRINTF(("setting BSSID to %s\n", ether_sprintf((uint8_t *)bssid)));
1725 }
1726 
1727 void
1728 ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr)
1729 {
1730 	uint16_t tmp;
1731 
1732 	tmp = addr[0] | addr[1] << 8;
1733 	ural_write(sc, RAL_MAC_CSR2, tmp);
1734 
1735 	tmp = addr[2] | addr[3] << 8;
1736 	ural_write(sc, RAL_MAC_CSR3, tmp);
1737 
1738 	tmp = addr[4] | addr[5] << 8;
1739 	ural_write(sc, RAL_MAC_CSR4, tmp);
1740 
1741 	DPRINTF(("setting MAC address to %s\n",
1742 	    ether_sprintf((uint8_t *)addr)));
1743 }
1744 
1745 void
1746 ural_update_promisc(struct ural_softc *sc)
1747 {
1748 	struct ifnet *ifp = &sc->sc_ic.ic_if;
1749 	uint16_t tmp;
1750 
1751 	tmp = ural_read(sc, RAL_TXRX_CSR2);
1752 
1753 	tmp &= ~RAL_DROP_NOT_TO_ME;
1754 	if (!(ifp->if_flags & IFF_PROMISC))
1755 		tmp |= RAL_DROP_NOT_TO_ME;
1756 
1757 	ural_write(sc, RAL_TXRX_CSR2, tmp);
1758 
1759 	DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ?
1760 	    "entering" : "leaving"));
1761 }
1762 
1763 const char *
1764 ural_get_rf(int rev)
1765 {
1766 	switch (rev) {
1767 	case RAL_RF_2522:	return "RT2522";
1768 	case RAL_RF_2523:	return "RT2523";
1769 	case RAL_RF_2524:	return "RT2524";
1770 	case RAL_RF_2525:	return "RT2525";
1771 	case RAL_RF_2525E:	return "RT2525e";
1772 	case RAL_RF_2526:	return "RT2526";
1773 	case RAL_RF_5222:	return "RT5222";
1774 	default:		return "unknown";
1775 	}
1776 }
1777 
1778 void
1779 ural_read_eeprom(struct ural_softc *sc)
1780 {
1781 	struct ieee80211com *ic = &sc->sc_ic;
1782 	uint16_t val;
1783 
1784 	/* retrieve MAC/BBP type */
1785 	ural_eeprom_read(sc, RAL_EEPROM_MACBBP, &val, 2);
1786 	sc->macbbp_rev = letoh16(val);
1787 
1788 	ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2);
1789 	val = letoh16(val);
1790 	sc->rf_rev =   (val >> 11) & 0x7;
1791 	sc->hw_radio = (val >> 10) & 0x1;
1792 	sc->led_mode = (val >> 6)  & 0x7;
1793 	sc->rx_ant =   (val >> 4)  & 0x3;
1794 	sc->tx_ant =   (val >> 2)  & 0x3;
1795 	sc->nb_ant =   val & 0x3;
1796 
1797 	/* read MAC address */
1798 	ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6);
1799 
1800 	/* read default values for BBP registers */
1801 	ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16);
1802 
1803 	/* read Tx power for all b/g channels */
1804 	ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14);
1805 }
1806 
1807 int
1808 ural_bbp_init(struct ural_softc *sc)
1809 {
1810 	int i, ntries;
1811 
1812 	/* wait for BBP to be ready */
1813 	for (ntries = 0; ntries < 100; ntries++) {
1814 		if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0)
1815 			break;
1816 		DELAY(1000);
1817 	}
1818 	if (ntries == 100) {
1819 		printf("%s: timeout waiting for BBP\n", sc->sc_dev.dv_xname);
1820 		return EIO;
1821 	}
1822 
1823 	/* initialize BBP registers to default values */
1824 	for (i = 0; i < nitems(ural_def_bbp); i++)
1825 		ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val);
1826 
1827 #if 0
1828 	/* initialize BBP registers to values stored in EEPROM */
1829 	for (i = 0; i < 16; i++) {
1830 		if (sc->bbp_prom[i].reg == 0xff)
1831 			continue;
1832 		ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
1833 	}
1834 #endif
1835 
1836 	return 0;
1837 }
1838 
1839 void
1840 ural_set_txantenna(struct ural_softc *sc, int antenna)
1841 {
1842 	uint16_t tmp;
1843 	uint8_t tx;
1844 
1845 	tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK;
1846 	if (antenna == 1)
1847 		tx |= RAL_BBP_ANTA;
1848 	else if (antenna == 2)
1849 		tx |= RAL_BBP_ANTB;
1850 	else
1851 		tx |= RAL_BBP_DIVERSITY;
1852 
1853 	/* need to force I/Q flip for RF 2525e, 2526 and 5222 */
1854 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 ||
1855 	    sc->rf_rev == RAL_RF_5222)
1856 		tx |= RAL_BBP_FLIPIQ;
1857 
1858 	ural_bbp_write(sc, RAL_BBP_TX, tx);
1859 
1860 	/* update flags in PHY_CSR5 and PHY_CSR6 too */
1861 	tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7;
1862 	ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7));
1863 
1864 	tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7;
1865 	ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7));
1866 }
1867 
1868 void
1869 ural_set_rxantenna(struct ural_softc *sc, int antenna)
1870 {
1871 	uint8_t rx;
1872 
1873 	rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK;
1874 	if (antenna == 1)
1875 		rx |= RAL_BBP_ANTA;
1876 	else if (antenna == 2)
1877 		rx |= RAL_BBP_ANTB;
1878 	else
1879 		rx |= RAL_BBP_DIVERSITY;
1880 
1881 	/* need to force no I/Q flip for RF 2525e and 2526 */
1882 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526)
1883 		rx &= ~RAL_BBP_FLIPIQ;
1884 
1885 	ural_bbp_write(sc, RAL_BBP_RX, rx);
1886 }
1887 
1888 int
1889 ural_init(struct ifnet *ifp)
1890 {
1891 	struct ural_softc *sc = ifp->if_softc;
1892 	struct ieee80211com *ic = &sc->sc_ic;
1893 	uint16_t tmp;
1894 	usbd_status error;
1895 	int i, ntries;
1896 
1897 	ural_stop(ifp, 0);
1898 
1899 	/* initialize MAC registers to default values */
1900 	for (i = 0; i < nitems(ural_def_mac); i++)
1901 		ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val);
1902 
1903 	/* wait for BBP and RF to wake up (this can take a long time!) */
1904 	for (ntries = 0; ntries < 100; ntries++) {
1905 		tmp = ural_read(sc, RAL_MAC_CSR17);
1906 		if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) ==
1907 		    (RAL_BBP_AWAKE | RAL_RF_AWAKE))
1908 			break;
1909 		DELAY(1000);
1910 	}
1911 	if (ntries == 100) {
1912 		printf("%s: timeout waiting for BBP/RF to wakeup\n",
1913 		    sc->sc_dev.dv_xname);
1914 		error = EIO;
1915 		goto fail;
1916 	}
1917 
1918 	/* we're ready! */
1919 	ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY);
1920 
1921 	/* set basic rate set (will be updated later) */
1922 	ural_write(sc, RAL_TXRX_CSR11, 0x153);
1923 
1924 	error = ural_bbp_init(sc);
1925 	if (error != 0)
1926 		goto fail;
1927 
1928 	/* set default BSS channel */
1929 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
1930 	ural_set_chan(sc, ic->ic_bss->ni_chan);
1931 
1932 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
1933 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
1934 
1935 	/* set default sensitivity */
1936 	ural_bbp_write(sc, 17, 0x48);
1937 
1938 	ural_set_txantenna(sc, 1);
1939 	ural_set_rxantenna(sc, 1);
1940 
1941 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
1942 	ural_set_macaddr(sc, ic->ic_myaddr);
1943 
1944 	/*
1945 	 * Copy WEP keys into adapter's memory (SEC_CSR0 to SEC_CSR31).
1946 	 */
1947 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1948 		struct ieee80211_key *k = &ic->ic_nw_keys[i];
1949 		ural_write_multi(sc, RAL_SEC_CSR0 + i * IEEE80211_KEYBUF_SIZE,
1950 		    k->k_key, IEEE80211_KEYBUF_SIZE);
1951 	}
1952 
1953 	/*
1954 	 * Allocate xfer for AMRR statistics requests.
1955 	 */
1956 	sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev);
1957 	if (sc->amrr_xfer == NULL) {
1958 		printf("%s: could not allocate AMRR xfer\n",
1959 		    sc->sc_dev.dv_xname);
1960 		goto fail;
1961 	}
1962 
1963 	/*
1964 	 * Open Tx and Rx USB bulk pipes.
1965 	 */
1966 	error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE,
1967 	    &sc->sc_tx_pipeh);
1968 	if (error != 0) {
1969 		printf("%s: could not open Tx pipe: %s\n",
1970 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1971 		goto fail;
1972 	}
1973 	error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE,
1974 	    &sc->sc_rx_pipeh);
1975 	if (error != 0) {
1976 		printf("%s: could not open Rx pipe: %s\n",
1977 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1978 		goto fail;
1979 	}
1980 
1981 	/*
1982 	 * Allocate Tx and Rx xfer queues.
1983 	 */
1984 	error = ural_alloc_tx_list(sc);
1985 	if (error != 0) {
1986 		printf("%s: could not allocate Tx list\n",
1987 		    sc->sc_dev.dv_xname);
1988 		goto fail;
1989 	}
1990 	error = ural_alloc_rx_list(sc);
1991 	if (error != 0) {
1992 		printf("%s: could not allocate Rx list\n",
1993 		    sc->sc_dev.dv_xname);
1994 		goto fail;
1995 	}
1996 
1997 	/*
1998 	 * Start up the receive pipe.
1999 	 */
2000 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
2001 		struct ural_rx_data *data = &sc->rx_data[i];
2002 
2003 		usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf,
2004 		    MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
2005 		error = usbd_transfer(data->xfer);
2006 		if (error != 0 && error != USBD_IN_PROGRESS) {
2007 			printf("%s: could not queue Rx transfer\n",
2008 			    sc->sc_dev.dv_xname);
2009 			goto fail;
2010 		}
2011 	}
2012 
2013 	/* kick Rx */
2014 	tmp = RAL_DROP_PHY_ERROR | RAL_DROP_CRC_ERROR;
2015 	if (ic->ic_opmode != IEEE80211_M_MONITOR) {
2016 		tmp |= RAL_DROP_CTL | RAL_DROP_VERSION_ERROR;
2017 #ifndef IEEE80211_STA_ONLY
2018 		if (ic->ic_opmode != IEEE80211_M_HOSTAP)
2019 #endif
2020 			tmp |= RAL_DROP_TODS;
2021 		if (!(ifp->if_flags & IFF_PROMISC))
2022 			tmp |= RAL_DROP_NOT_TO_ME;
2023 	}
2024 	ural_write(sc, RAL_TXRX_CSR2, tmp);
2025 
2026 	ifq_clr_oactive(&ifp->if_snd);
2027 	ifp->if_flags |= IFF_RUNNING;
2028 
2029 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
2030 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2031 	else
2032 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2033 
2034 	return 0;
2035 
2036 fail:	ural_stop(ifp, 1);
2037 	return error;
2038 }
2039 
2040 void
2041 ural_stop(struct ifnet *ifp, int disable)
2042 {
2043 	struct ural_softc *sc = ifp->if_softc;
2044 	struct ieee80211com *ic = &sc->sc_ic;
2045 
2046 	sc->sc_tx_timer = 0;
2047 	ifp->if_timer = 0;
2048 	ifp->if_flags &= ~IFF_RUNNING;
2049 	ifq_clr_oactive(&ifp->if_snd);
2050 
2051 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);	/* free all nodes */
2052 
2053 	/* disable Rx */
2054 	ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX);
2055 
2056 	/* reset ASIC and BBP (but won't reset MAC registers!) */
2057 	ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP);
2058 	ural_write(sc, RAL_MAC_CSR1, 0);
2059 
2060 	if (sc->amrr_xfer != NULL) {
2061 		usbd_free_xfer(sc->amrr_xfer);
2062 		sc->amrr_xfer = NULL;
2063 	}
2064 	if (sc->sc_rx_pipeh != NULL) {
2065 		usbd_close_pipe(sc->sc_rx_pipeh);
2066 		sc->sc_rx_pipeh = NULL;
2067 	}
2068 	if (sc->sc_tx_pipeh != NULL) {
2069 		usbd_close_pipe(sc->sc_tx_pipeh);
2070 		sc->sc_tx_pipeh = NULL;
2071 	}
2072 
2073 	ural_free_rx_list(sc);
2074 	ural_free_tx_list(sc);
2075 }
2076 
2077 void
2078 ural_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
2079 {
2080 	/* start with lowest Tx rate */
2081 	ni->ni_txrate = 0;
2082 }
2083 
2084 void
2085 ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni)
2086 {
2087 	int i;
2088 
2089 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
2090 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
2091 
2092 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
2093 
2094 	/* set rate to some reasonable initial value */
2095 	for (i = ni->ni_rates.rs_nrates - 1;
2096 	     i > 0 && (ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) > 72;
2097 	     i--);
2098 	ni->ni_txrate = i;
2099 
2100 	if (!usbd_is_dying(sc->sc_udev))
2101 		timeout_add_sec(&sc->amrr_to, 1);
2102 }
2103 
2104 void
2105 ural_amrr_timeout(void *arg)
2106 {
2107 	struct ural_softc *sc = arg;
2108 	usb_device_request_t req;
2109 	int s;
2110 
2111 	if (usbd_is_dying(sc->sc_udev))
2112 		return;
2113 
2114 	usbd_ref_incr(sc->sc_udev);
2115 
2116 	s = splusb();
2117 
2118 	/*
2119 	 * Asynchronously read statistic registers (cleared by read).
2120 	 */
2121 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
2122 	req.bRequest = RAL_READ_MULTI_MAC;
2123 	USETW(req.wValue, 0);
2124 	USETW(req.wIndex, RAL_STA_CSR0);
2125 	USETW(req.wLength, sizeof sc->sta);
2126 
2127 	usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, sc,
2128 	    USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0,
2129 	    ural_amrr_update);
2130 	(void)usbd_transfer(sc->amrr_xfer);
2131 
2132 	splx(s);
2133 
2134 	usbd_ref_decr(sc->sc_udev);
2135 }
2136 
2137 void
2138 ural_amrr_update(struct usbd_xfer *xfer, void *priv,
2139     usbd_status status)
2140 {
2141 	struct ural_softc *sc = (struct ural_softc *)priv;
2142 	struct ifnet *ifp = &sc->sc_ic.ic_if;
2143 
2144 	if (status != USBD_NORMAL_COMPLETION) {
2145 		printf("%s: could not retrieve Tx statistics - cancelling "
2146 		    "automatic rate control\n", sc->sc_dev.dv_xname);
2147 		return;
2148 	}
2149 
2150 	/* count TX retry-fail as Tx errors */
2151 	ifp->if_oerrors += letoh16(sc->sta[9]);
2152 
2153 	sc->amn.amn_retrycnt =
2154 	    letoh16(sc->sta[7]) +	/* TX one-retry ok count */
2155 	    letoh16(sc->sta[8]) +	/* TX more-retry ok count */
2156 	    letoh16(sc->sta[9]);	/* TX retry-fail count */
2157 
2158 	sc->amn.amn_txcnt =
2159 	    sc->amn.amn_retrycnt +
2160 	    letoh16(sc->sta[6]);	/* TX no-retry ok count */
2161 
2162 	ieee80211_amrr_choose(&sc->amrr, sc->sc_ic.ic_bss, &sc->amn);
2163 
2164 	if (!usbd_is_dying(sc->sc_udev))
2165 		timeout_add_sec(&sc->amrr_to, 1);
2166 }
2167