xref: /openbsd/sys/dev/usb/if_ral.c (revision 6f40fd34)
1 /*	$OpenBSD: if_ral.c,v 1.143 2017/07/03 09:21:09 kevlo Exp $	*/
2 
3 /*-
4  * Copyright (c) 2005, 2006
5  *	Damien Bergamini <damien.bergamini@free.fr>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Ralink Technology RT2500USB chipset driver
22  * http://www.ralinktech.com.tw/
23  */
24 
25 #include "bpfilter.h"
26 
27 #include <sys/param.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/systm.h>
33 #include <sys/timeout.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/endian.h>
37 
38 #include <machine/intr.h>
39 
40 #if NBPFILTER > 0
41 #include <net/bpf.h>
42 #endif
43 #include <net/if.h>
44 #include <net/if_dl.h>
45 #include <net/if_media.h>
46 
47 #include <netinet/in.h>
48 #include <netinet/if_ether.h>
49 
50 #include <net80211/ieee80211_var.h>
51 #include <net80211/ieee80211_amrr.h>
52 #include <net80211/ieee80211_radiotap.h>
53 
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usbdi.h>
56 #include <dev/usb/usbdi_util.h>
57 #include <dev/usb/usbdevs.h>
58 
59 #include <dev/usb/if_ralreg.h>
60 #include <dev/usb/if_ralvar.h>
61 
62 #ifdef URAL_DEBUG
63 #define DPRINTF(x)	do { if (ural_debug) printf x; } while (0)
64 #define DPRINTFN(n, x)	do { if (ural_debug >= (n)) printf x; } while (0)
65 int ural_debug = 0;
66 #else
67 #define DPRINTF(x)
68 #define DPRINTFN(n, x)
69 #endif
70 
71 /* various supported device vendors/products */
72 static const struct usb_devno ural_devs[] = {
73 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570 },
74 	{ USB_VENDOR_ASUS,		USB_PRODUCT_ASUS_RT2570_2 },
75 	{ USB_VENDOR_BELKIN,		USB_PRODUCT_BELKIN_F5D7050 },
76 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54G },
77 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_WUSB54GP },
78 	{ USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_HU200TS },
79 	{ USB_VENDOR_CONCEPTRONIC2,	USB_PRODUCT_CONCEPTRONIC2_C54RU },
80 	{ USB_VENDOR_DLINK,		USB_PRODUCT_DLINK_RT2570 },
81 	{ USB_VENDOR_GIGABYTE,		USB_PRODUCT_GIGABYTE_GNWBKG },
82 	{ USB_VENDOR_GUILLEMOT,		USB_PRODUCT_GUILLEMOT_HWGUSB254 },
83 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54 },
84 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54AI },
85 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_KG54YB },
86 	{ USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_NINWIFI },
87 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570 },
88 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_2 },
89 	{ USB_VENDOR_MSI,		USB_PRODUCT_MSI_RT2570_3 },
90 	{ USB_VENDOR_NOVATECH,		USB_PRODUCT_NOVATECH_NV902W },
91 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570 },
92 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_2 },
93 	{ USB_VENDOR_RALINK,		USB_PRODUCT_RALINK_RT2570_3 },
94 	{ USB_VENDOR_SPHAIRON,		USB_PRODUCT_SPHAIRON_UB801R },
95 	{ USB_VENDOR_SURECOM,		USB_PRODUCT_SURECOM_RT2570 },
96 	{ USB_VENDOR_VTECH,		USB_PRODUCT_VTECH_RT2570 },
97 	{ USB_VENDOR_ZINWELL,		USB_PRODUCT_ZINWELL_RT2570 }
98 };
99 
100 int		ural_alloc_tx_list(struct ural_softc *);
101 void		ural_free_tx_list(struct ural_softc *);
102 int		ural_alloc_rx_list(struct ural_softc *);
103 void		ural_free_rx_list(struct ural_softc *);
104 int		ural_media_change(struct ifnet *);
105 void		ural_next_scan(void *);
106 void		ural_task(void *);
107 int		ural_newstate(struct ieee80211com *, enum ieee80211_state,
108 		    int);
109 void		ural_txeof(struct usbd_xfer *, void *, usbd_status);
110 void		ural_rxeof(struct usbd_xfer *, void *, usbd_status);
111 #if NBPFILTER > 0
112 uint8_t		ural_rxrate(const struct ural_rx_desc *);
113 #endif
114 int		ural_ack_rate(struct ieee80211com *, int);
115 uint16_t	ural_txtime(int, int, uint32_t);
116 uint8_t		ural_plcp_signal(int);
117 void		ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *,
118 		    uint32_t, int, int);
119 #ifndef IEEE80211_STA_ONLY
120 int		ural_tx_bcn(struct ural_softc *, struct mbuf *,
121 		    struct ieee80211_node *);
122 #endif
123 int		ural_tx_data(struct ural_softc *, struct mbuf *,
124 		    struct ieee80211_node *);
125 void		ural_start(struct ifnet *);
126 void		ural_watchdog(struct ifnet *);
127 int		ural_ioctl(struct ifnet *, u_long, caddr_t);
128 void		ural_eeprom_read(struct ural_softc *, uint16_t, void *, int);
129 uint16_t	ural_read(struct ural_softc *, uint16_t);
130 void		ural_read_multi(struct ural_softc *, uint16_t, void *, int);
131 void		ural_write(struct ural_softc *, uint16_t, uint16_t);
132 void		ural_write_multi(struct ural_softc *, uint16_t, void *, int);
133 void		ural_bbp_write(struct ural_softc *, uint8_t, uint8_t);
134 uint8_t		ural_bbp_read(struct ural_softc *, uint8_t);
135 void		ural_rf_write(struct ural_softc *, uint8_t, uint32_t);
136 void		ural_set_chan(struct ural_softc *, struct ieee80211_channel *);
137 void		ural_disable_rf_tune(struct ural_softc *);
138 void		ural_enable_tsf_sync(struct ural_softc *);
139 void		ural_update_slot(struct ural_softc *);
140 void		ural_set_txpreamble(struct ural_softc *);
141 void		ural_set_basicrates(struct ural_softc *);
142 void		ural_set_bssid(struct ural_softc *, const uint8_t *);
143 void		ural_set_macaddr(struct ural_softc *, const uint8_t *);
144 void		ural_update_promisc(struct ural_softc *);
145 const char	*ural_get_rf(int);
146 void		ural_read_eeprom(struct ural_softc *);
147 int		ural_bbp_init(struct ural_softc *);
148 void		ural_set_txantenna(struct ural_softc *, int);
149 void		ural_set_rxantenna(struct ural_softc *, int);
150 int		ural_init(struct ifnet *);
151 void		ural_stop(struct ifnet *, int);
152 void		ural_newassoc(struct ieee80211com *, struct ieee80211_node *,
153 		    int);
154 void		ural_amrr_start(struct ural_softc *, struct ieee80211_node *);
155 void		ural_amrr_timeout(void *);
156 void		ural_amrr_update(struct usbd_xfer *, void *,
157 		    usbd_status status);
158 
159 static const struct {
160 	uint16_t	reg;
161 	uint16_t	val;
162 } ural_def_mac[] = {
163 	RAL_DEF_MAC
164 };
165 
166 static const struct {
167 	uint8_t	reg;
168 	uint8_t	val;
169 } ural_def_bbp[] = {
170 	RAL_DEF_BBP
171 };
172 
173 static const uint32_t ural_rf2522_r2[] =    RAL_RF2522_R2;
174 static const uint32_t ural_rf2523_r2[] =    RAL_RF2523_R2;
175 static const uint32_t ural_rf2524_r2[] =    RAL_RF2524_R2;
176 static const uint32_t ural_rf2525_r2[] =    RAL_RF2525_R2;
177 static const uint32_t ural_rf2525_hi_r2[] = RAL_RF2525_HI_R2;
178 static const uint32_t ural_rf2525e_r2[] =   RAL_RF2525E_R2;
179 static const uint32_t ural_rf2526_hi_r2[] = RAL_RF2526_HI_R2;
180 static const uint32_t ural_rf2526_r2[] =    RAL_RF2526_R2;
181 
182 int ural_match(struct device *, void *, void *);
183 void ural_attach(struct device *, struct device *, void *);
184 int ural_detach(struct device *, int);
185 
186 struct cfdriver ural_cd = {
187 	NULL, "ural", DV_IFNET
188 };
189 
190 const struct cfattach ural_ca = {
191 	sizeof(struct ural_softc), ural_match, ural_attach, ural_detach
192 };
193 
194 int
195 ural_match(struct device *parent, void *match, void *aux)
196 {
197 	struct usb_attach_arg *uaa = aux;
198 
199 	if (uaa->iface == NULL || uaa->configno != RAL_CONFIG_NO)
200 		return UMATCH_NONE;
201 
202 	return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ?
203 	    UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
204 }
205 
206 void
207 ural_attach(struct device *parent, struct device *self, void *aux)
208 {
209 	struct ural_softc *sc = (struct ural_softc *)self;
210 	struct usb_attach_arg *uaa = aux;
211 	struct ieee80211com *ic = &sc->sc_ic;
212 	struct ifnet *ifp = &ic->ic_if;
213 	usb_interface_descriptor_t *id;
214 	usb_endpoint_descriptor_t *ed;
215 	usbd_status error;
216 	int i;
217 
218 	sc->sc_udev = uaa->device;
219 
220 	/* get the first interface handle */
221 	error = usbd_device2interface_handle(sc->sc_udev, RAL_IFACE_INDEX,
222 	    &sc->sc_iface);
223 	if (error != 0) {
224 		printf("%s: could not get interface handle\n",
225 		    sc->sc_dev.dv_xname);
226 		return;
227 	}
228 
229 	/*
230 	 * Find endpoints.
231 	 */
232 	id = usbd_get_interface_descriptor(sc->sc_iface);
233 
234 	sc->sc_rx_no = sc->sc_tx_no = -1;
235 	for (i = 0; i < id->bNumEndpoints; i++) {
236 		ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i);
237 		if (ed == NULL) {
238 			printf("%s: no endpoint descriptor for iface %d\n",
239 			    sc->sc_dev.dv_xname, i);
240 			return;
241 		}
242 
243 		if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
244 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
245 			sc->sc_rx_no = ed->bEndpointAddress;
246 		else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
247 		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK)
248 			sc->sc_tx_no = ed->bEndpointAddress;
249 	}
250 	if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) {
251 		printf("%s: missing endpoint\n", sc->sc_dev.dv_xname);
252 		return;
253 	}
254 
255 	usb_init_task(&sc->sc_task, ural_task, sc, USB_TASK_TYPE_GENERIC);
256 	timeout_set(&sc->scan_to, ural_next_scan, sc);
257 
258 	sc->amrr.amrr_min_success_threshold =  1;
259 	sc->amrr.amrr_max_success_threshold = 10;
260 	timeout_set(&sc->amrr_to, ural_amrr_timeout, sc);
261 
262 	/* retrieve RT2570 rev. no */
263 	sc->asic_rev = ural_read(sc, RAL_MAC_CSR0);
264 
265 	/* retrieve MAC address and various other things from EEPROM */
266 	ural_read_eeprom(sc);
267 
268 	printf("%s: MAC/BBP RT%04x (rev 0x%02x), RF %s, address %s\n",
269 	    sc->sc_dev.dv_xname, sc->macbbp_rev, sc->asic_rev,
270 	    ural_get_rf(sc->rf_rev), ether_sprintf(ic->ic_myaddr));
271 
272 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
273 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
274 	ic->ic_state = IEEE80211_S_INIT;
275 
276 	/* set device capabilities */
277 	ic->ic_caps =
278 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
279 #ifndef IEEE80211_STA_ONLY
280 	    IEEE80211_C_IBSS |		/* IBSS mode supported */
281 	    IEEE80211_C_HOSTAP |	/* HostAp mode supported */
282 #endif
283 	    IEEE80211_C_TXPMGT |	/* tx power management */
284 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
285 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
286 	    IEEE80211_C_WEP |		/* s/w WEP */
287 	    IEEE80211_C_RSN;		/* WPA/RSN */
288 
289 	/* set supported .11b and .11g rates */
290 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
291 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
292 
293 	/* set supported .11b and .11g channels (1 through 14) */
294 	for (i = 1; i <= 14; i++) {
295 		ic->ic_channels[i].ic_freq =
296 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
297 		ic->ic_channels[i].ic_flags =
298 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
299 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
300 	}
301 
302 	ifp->if_softc = sc;
303 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
304 	ifp->if_ioctl = ural_ioctl;
305 	ifp->if_start = ural_start;
306 	ifp->if_watchdog = ural_watchdog;
307 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
308 
309 	if_attach(ifp);
310 	ieee80211_ifattach(ifp);
311 	ic->ic_newassoc = ural_newassoc;
312 
313 	/* override state transition machine */
314 	sc->sc_newstate = ic->ic_newstate;
315 	ic->ic_newstate = ural_newstate;
316 	ieee80211_media_init(ifp, ural_media_change, ieee80211_media_status);
317 
318 #if NBPFILTER > 0
319 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
320 	    sizeof (struct ieee80211_frame) + 64);
321 
322 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
323 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
324 	sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT);
325 
326 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
327 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
328 	sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT);
329 #endif
330 }
331 
332 int
333 ural_detach(struct device *self, int flags)
334 {
335 	struct ural_softc *sc = (struct ural_softc *)self;
336 	struct ifnet *ifp = &sc->sc_ic.ic_if;
337 	int s;
338 
339 	s = splusb();
340 
341 	if (timeout_initialized(&sc->scan_to))
342 		timeout_del(&sc->scan_to);
343 	if (timeout_initialized(&sc->amrr_to))
344 		timeout_del(&sc->amrr_to);
345 
346 	usb_rem_wait_task(sc->sc_udev, &sc->sc_task);
347 
348 	usbd_ref_wait(sc->sc_udev);
349 
350 	if (ifp->if_softc != NULL) {
351 		ieee80211_ifdetach(ifp);	/* free all nodes */
352 		if_detach(ifp);
353 	}
354 
355 	if (sc->amrr_xfer != NULL) {
356 		usbd_free_xfer(sc->amrr_xfer);
357 		sc->amrr_xfer = NULL;
358 	}
359 
360 	if (sc->sc_rx_pipeh != NULL) {
361 		usbd_abort_pipe(sc->sc_rx_pipeh);
362 		usbd_close_pipe(sc->sc_rx_pipeh);
363 	}
364 
365 	if (sc->sc_tx_pipeh != NULL) {
366 		usbd_abort_pipe(sc->sc_tx_pipeh);
367 		usbd_close_pipe(sc->sc_tx_pipeh);
368 	}
369 
370 	ural_free_rx_list(sc);
371 	ural_free_tx_list(sc);
372 
373 	splx(s);
374 
375 	return 0;
376 }
377 
378 int
379 ural_alloc_tx_list(struct ural_softc *sc)
380 {
381 	int i, error;
382 
383 	sc->tx_cur = sc->tx_queued = 0;
384 
385 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
386 		struct ural_tx_data *data = &sc->tx_data[i];
387 
388 		data->sc = sc;
389 
390 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
391 		if (data->xfer == NULL) {
392 			printf("%s: could not allocate tx xfer\n",
393 			    sc->sc_dev.dv_xname);
394 			error = ENOMEM;
395 			goto fail;
396 		}
397 		data->buf = usbd_alloc_buffer(data->xfer,
398 		    RAL_TX_DESC_SIZE + IEEE80211_MAX_LEN);
399 		if (data->buf == NULL) {
400 			printf("%s: could not allocate tx buffer\n",
401 			    sc->sc_dev.dv_xname);
402 			error = ENOMEM;
403 			goto fail;
404 		}
405 	}
406 
407 	return 0;
408 
409 fail:	ural_free_tx_list(sc);
410 	return error;
411 }
412 
413 void
414 ural_free_tx_list(struct ural_softc *sc)
415 {
416 	int i;
417 
418 	for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
419 		struct ural_tx_data *data = &sc->tx_data[i];
420 
421 		if (data->xfer != NULL) {
422 			usbd_free_xfer(data->xfer);
423 			data->xfer = NULL;
424 		}
425 		/*
426 		 * The node has already been freed at that point so don't call
427 		 * ieee80211_release_node() here.
428 		 */
429 		data->ni = NULL;
430 	}
431 }
432 
433 int
434 ural_alloc_rx_list(struct ural_softc *sc)
435 {
436 	int i, error;
437 
438 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
439 		struct ural_rx_data *data = &sc->rx_data[i];
440 
441 		data->sc = sc;
442 
443 		data->xfer = usbd_alloc_xfer(sc->sc_udev);
444 		if (data->xfer == NULL) {
445 			printf("%s: could not allocate rx xfer\n",
446 			    sc->sc_dev.dv_xname);
447 			error = ENOMEM;
448 			goto fail;
449 		}
450 		if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) {
451 			printf("%s: could not allocate rx buffer\n",
452 			    sc->sc_dev.dv_xname);
453 			error = ENOMEM;
454 			goto fail;
455 		}
456 
457 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
458 		if (data->m == NULL) {
459 			printf("%s: could not allocate rx mbuf\n",
460 			    sc->sc_dev.dv_xname);
461 			error = ENOMEM;
462 			goto fail;
463 		}
464 		MCLGET(data->m, M_DONTWAIT);
465 		if (!(data->m->m_flags & M_EXT)) {
466 			printf("%s: could not allocate rx mbuf cluster\n",
467 			    sc->sc_dev.dv_xname);
468 			error = ENOMEM;
469 			goto fail;
470 		}
471 		data->buf = mtod(data->m, uint8_t *);
472 	}
473 
474 	return 0;
475 
476 fail:	ural_free_rx_list(sc);
477 	return error;
478 }
479 
480 void
481 ural_free_rx_list(struct ural_softc *sc)
482 {
483 	int i;
484 
485 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
486 		struct ural_rx_data *data = &sc->rx_data[i];
487 
488 		if (data->xfer != NULL) {
489 			usbd_free_xfer(data->xfer);
490 			data->xfer = NULL;
491 		}
492 		if (data->m != NULL) {
493 			m_freem(data->m);
494 			data->m = NULL;
495 		}
496 	}
497 }
498 
499 int
500 ural_media_change(struct ifnet *ifp)
501 {
502 	int error;
503 
504 	error = ieee80211_media_change(ifp);
505 	if (error != ENETRESET)
506 		return error;
507 
508 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING))
509 		ural_init(ifp);
510 
511 	return 0;
512 }
513 
514 /*
515  * This function is called periodically (every 200ms) during scanning to
516  * switch from one channel to another.
517  */
518 void
519 ural_next_scan(void *arg)
520 {
521 	struct ural_softc *sc = arg;
522 	struct ieee80211com *ic = &sc->sc_ic;
523 	struct ifnet *ifp = &ic->ic_if;
524 
525 	if (usbd_is_dying(sc->sc_udev))
526 		return;
527 
528 	usbd_ref_incr(sc->sc_udev);
529 
530 	if (ic->ic_state == IEEE80211_S_SCAN)
531 		ieee80211_next_scan(ifp);
532 
533 	usbd_ref_decr(sc->sc_udev);
534 }
535 
536 void
537 ural_task(void *arg)
538 {
539 	struct ural_softc *sc = arg;
540 	struct ieee80211com *ic = &sc->sc_ic;
541 	enum ieee80211_state ostate;
542 	struct ieee80211_node *ni;
543 
544 	if (usbd_is_dying(sc->sc_udev))
545 		return;
546 
547 	ostate = ic->ic_state;
548 
549 	switch (sc->sc_state) {
550 	case IEEE80211_S_INIT:
551 		if (ostate == IEEE80211_S_RUN) {
552 			/* abort TSF synchronization */
553 			ural_write(sc, RAL_TXRX_CSR19, 0);
554 
555 			/* force tx led to stop blinking */
556 			ural_write(sc, RAL_MAC_CSR20, 0);
557 		}
558 		break;
559 
560 	case IEEE80211_S_SCAN:
561 		ural_set_chan(sc, ic->ic_bss->ni_chan);
562 		if (!usbd_is_dying(sc->sc_udev))
563 			timeout_add_msec(&sc->scan_to, 200);
564 		break;
565 
566 	case IEEE80211_S_AUTH:
567 		ural_set_chan(sc, ic->ic_bss->ni_chan);
568 		break;
569 
570 	case IEEE80211_S_ASSOC:
571 		ural_set_chan(sc, ic->ic_bss->ni_chan);
572 		break;
573 
574 	case IEEE80211_S_RUN:
575 		ural_set_chan(sc, ic->ic_bss->ni_chan);
576 
577 		ni = ic->ic_bss;
578 
579 		if (ic->ic_opmode != IEEE80211_M_MONITOR) {
580 			ural_update_slot(sc);
581 			ural_set_txpreamble(sc);
582 			ural_set_basicrates(sc);
583 			ural_set_bssid(sc, ni->ni_bssid);
584 		}
585 
586 #ifndef IEEE80211_STA_ONLY
587 		if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
588 		    ic->ic_opmode == IEEE80211_M_IBSS) {
589 			struct mbuf *m = ieee80211_beacon_alloc(ic, ni);
590 			if (m == NULL) {
591 				printf("%s: could not allocate beacon\n",
592 				    sc->sc_dev.dv_xname);
593 				return;
594 			}
595 
596 			if (ural_tx_bcn(sc, m, ni) != 0) {
597 				m_freem(m);
598 				printf("%s: could not transmit beacon\n",
599 				    sc->sc_dev.dv_xname);
600 				return;
601 			}
602 
603 			/* beacon is no longer needed */
604 			m_freem(m);
605 		}
606 #endif
607 
608 		/* make tx led blink on tx (controlled by ASIC) */
609 		ural_write(sc, RAL_MAC_CSR20, 1);
610 
611 		if (ic->ic_opmode != IEEE80211_M_MONITOR)
612 			ural_enable_tsf_sync(sc);
613 
614 		if (ic->ic_opmode == IEEE80211_M_STA) {
615 			/* fake a join to init the tx rate */
616 			ural_newassoc(ic, ic->ic_bss, 1);
617 
618 			/* enable automatic rate control in STA mode */
619 			if (ic->ic_fixed_rate == -1)
620 				ural_amrr_start(sc, ic->ic_bss);
621 		}
622 
623 		break;
624 	}
625 
626 	sc->sc_newstate(ic, sc->sc_state, sc->sc_arg);
627 }
628 
629 int
630 ural_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
631 {
632 	struct ural_softc *sc = ic->ic_if.if_softc;
633 
634 	usb_rem_task(sc->sc_udev, &sc->sc_task);
635 	timeout_del(&sc->scan_to);
636 	timeout_del(&sc->amrr_to);
637 
638 	/* do it in a process context */
639 	sc->sc_state = nstate;
640 	sc->sc_arg = arg;
641 	usb_add_task(sc->sc_udev, &sc->sc_task);
642 	return 0;
643 }
644 
645 /* quickly determine if a given rate is CCK or OFDM */
646 #define RAL_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
647 
648 #define RAL_ACK_SIZE	14	/* 10 + 4(FCS) */
649 #define RAL_CTS_SIZE	14	/* 10 + 4(FCS) */
650 
651 #define RAL_SIFS		10	/* us */
652 
653 #define RAL_RXTX_TURNAROUND	5	/* us */
654 
655 void
656 ural_txeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
657 {
658 	struct ural_tx_data *data = priv;
659 	struct ural_softc *sc = data->sc;
660 	struct ieee80211com *ic = &sc->sc_ic;
661 	struct ifnet *ifp = &ic->ic_if;
662 	int s;
663 
664 	if (status != USBD_NORMAL_COMPLETION) {
665 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
666 			return;
667 
668 		printf("%s: could not transmit buffer: %s\n",
669 		    sc->sc_dev.dv_xname, usbd_errstr(status));
670 
671 		if (status == USBD_STALLED)
672 			usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh);
673 
674 		ifp->if_oerrors++;
675 		return;
676 	}
677 
678 	s = splnet();
679 
680 	ieee80211_release_node(ic, data->ni);
681 	data->ni = NULL;
682 
683 	sc->tx_queued--;
684 
685 	DPRINTFN(10, ("tx done\n"));
686 
687 	sc->sc_tx_timer = 0;
688 	ifq_clr_oactive(&ifp->if_snd);
689 	ural_start(ifp);
690 
691 	splx(s);
692 }
693 
694 void
695 ural_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
696 {
697 	struct ural_rx_data *data = priv;
698 	struct ural_softc *sc = data->sc;
699 	struct ieee80211com *ic = &sc->sc_ic;
700 	struct ifnet *ifp = &ic->ic_if;
701 	const struct ural_rx_desc *desc;
702 	struct ieee80211_frame *wh;
703 	struct ieee80211_rxinfo rxi;
704 	struct ieee80211_node *ni;
705 	struct mbuf *mnew, *m;
706 	int s, len;
707 
708 	if (status != USBD_NORMAL_COMPLETION) {
709 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
710 			return;
711 
712 		if (status == USBD_STALLED)
713 			usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh);
714 		goto skip;
715 	}
716 
717 	usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL);
718 
719 	if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) {
720 		DPRINTF(("%s: xfer too short %d\n", sc->sc_dev.dv_xname,
721 		    len));
722 		ifp->if_ierrors++;
723 		goto skip;
724 	}
725 
726 	/* rx descriptor is located at the end */
727 	desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE);
728 
729 	if (letoh32(desc->flags) & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) {
730 		/*
731 		 * This should not happen since we did not request to receive
732 		 * those frames when we filled RAL_TXRX_CSR2.
733 		 */
734 		DPRINTFN(5, ("PHY or CRC error\n"));
735 		ifp->if_ierrors++;
736 		goto skip;
737 	}
738 
739 	MGETHDR(mnew, M_DONTWAIT, MT_DATA);
740 	if (mnew == NULL) {
741 		printf("%s: could not allocate rx mbuf\n",
742 		    sc->sc_dev.dv_xname);
743 		ifp->if_ierrors++;
744 		goto skip;
745 	}
746 	MCLGET(mnew, M_DONTWAIT);
747 	if (!(mnew->m_flags & M_EXT)) {
748 		printf("%s: could not allocate rx mbuf cluster\n",
749 		    sc->sc_dev.dv_xname);
750 		m_freem(mnew);
751 		ifp->if_ierrors++;
752 		goto skip;
753 	}
754 	m = data->m;
755 	data->m = mnew;
756 	data->buf = mtod(data->m, uint8_t *);
757 
758 	/* finalize mbuf */
759 	m->m_pkthdr.len = m->m_len = (letoh32(desc->flags) >> 16) & 0xfff;
760 
761 	s = splnet();
762 
763 #if NBPFILTER > 0
764 	if (sc->sc_drvbpf != NULL) {
765 		struct mbuf mb;
766 		struct ural_rx_radiotap_header *tap = &sc->sc_rxtap;
767 
768 		tap->wr_flags = IEEE80211_RADIOTAP_F_FCS;
769 		tap->wr_rate = ural_rxrate(desc);
770 		tap->wr_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
771 		tap->wr_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
772 		tap->wr_antenna = sc->rx_ant;
773 		tap->wr_antsignal = desc->rssi;
774 
775 		mb.m_data = (caddr_t)tap;
776 		mb.m_len = sc->sc_rxtap_len;
777 		mb.m_next = m;
778 		mb.m_nextpkt = NULL;
779 		mb.m_type = 0;
780 		mb.m_flags = 0;
781 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
782 	}
783 #endif
784 	m_adj(m, -IEEE80211_CRC_LEN);	/* trim FCS */
785 
786 	wh = mtod(m, struct ieee80211_frame *);
787 	ni = ieee80211_find_rxnode(ic, wh);
788 
789 	/* send the frame to the 802.11 layer */
790 	rxi.rxi_flags = 0;
791 	rxi.rxi_rssi = desc->rssi;
792 	rxi.rxi_tstamp = 0;	/* unused */
793 	ieee80211_input(ifp, m, ni, &rxi);
794 
795 	/* node is no longer needed */
796 	ieee80211_release_node(ic, ni);
797 
798 	splx(s);
799 
800 	DPRINTFN(15, ("rx done\n"));
801 
802 skip:	/* setup a new transfer */
803 	usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES,
804 	    USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
805 	(void)usbd_transfer(xfer);
806 }
807 
808 /*
809  * This function is only used by the Rx radiotap code. It returns the rate at
810  * which a given frame was received.
811  */
812 #if NBPFILTER > 0
813 uint8_t
814 ural_rxrate(const struct ural_rx_desc *desc)
815 {
816 	if (letoh32(desc->flags) & RAL_RX_OFDM) {
817 		/* reverse function of ural_plcp_signal */
818 		switch (desc->rate) {
819 		case 0xb:	return 12;
820 		case 0xf:	return 18;
821 		case 0xa:	return 24;
822 		case 0xe:	return 36;
823 		case 0x9:	return 48;
824 		case 0xd:	return 72;
825 		case 0x8:	return 96;
826 		case 0xc:	return 108;
827 		}
828 	} else {
829 		if (desc->rate == 10)
830 			return 2;
831 		if (desc->rate == 20)
832 			return 4;
833 		if (desc->rate == 55)
834 			return 11;
835 		if (desc->rate == 110)
836 			return 22;
837 	}
838 	return 2;	/* should not get there */
839 }
840 #endif
841 
842 /*
843  * Return the expected ack rate for a frame transmitted at rate `rate'.
844  */
845 int
846 ural_ack_rate(struct ieee80211com *ic, int rate)
847 {
848 	switch (rate) {
849 	/* CCK rates */
850 	case 2:
851 		return 2;
852 	case 4:
853 	case 11:
854 	case 22:
855 		return (ic->ic_curmode == IEEE80211_MODE_11B) ? 4 : rate;
856 
857 	/* OFDM rates */
858 	case 12:
859 	case 18:
860 		return 12;
861 	case 24:
862 	case 36:
863 		return 24;
864 	case 48:
865 	case 72:
866 	case 96:
867 	case 108:
868 		return 48;
869 	}
870 
871 	/* default to 1Mbps */
872 	return 2;
873 }
874 
875 /*
876  * Compute the duration (in us) needed to transmit `len' bytes at rate `rate'.
877  * The function automatically determines the operating mode depending on the
878  * given rate. `flags' indicates whether short preamble is in use or not.
879  */
880 uint16_t
881 ural_txtime(int len, int rate, uint32_t flags)
882 {
883 	uint16_t txtime;
884 
885 	if (RAL_RATE_IS_OFDM(rate)) {
886 		/* IEEE Std 802.11g-2003, pp. 44 */
887 		txtime = (8 + 4 * len + 3 + rate - 1) / rate;
888 		txtime = 16 + 4 + 4 * txtime + 6;
889 	} else {
890 		/* IEEE Std 802.11b-1999, pp. 28 */
891 		txtime = (16 * len + rate - 1) / rate;
892 		if (rate != 2 && (flags & IEEE80211_F_SHPREAMBLE))
893 			txtime +=  72 + 24;
894 		else
895 			txtime += 144 + 48;
896 	}
897 	return txtime;
898 }
899 
900 uint8_t
901 ural_plcp_signal(int rate)
902 {
903 	switch (rate) {
904 	/* CCK rates (returned values are device-dependent) */
905 	case 2:		return 0x0;
906 	case 4:		return 0x1;
907 	case 11:	return 0x2;
908 	case 22:	return 0x3;
909 
910 	/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
911 	case 12:	return 0xb;
912 	case 18:	return 0xf;
913 	case 24:	return 0xa;
914 	case 36:	return 0xe;
915 	case 48:	return 0x9;
916 	case 72:	return 0xd;
917 	case 96:	return 0x8;
918 	case 108:	return 0xc;
919 
920 	/* unsupported rates (should not get there) */
921 	default:	return 0xff;
922 	}
923 }
924 
925 void
926 ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc,
927     uint32_t flags, int len, int rate)
928 {
929 	struct ieee80211com *ic = &sc->sc_ic;
930 	uint16_t plcp_length;
931 	int remainder;
932 
933 	desc->flags = htole32(flags);
934 	desc->flags |= htole32(len << 16);
935 
936 	desc->wme = htole16(
937 	    RAL_AIFSN(2) |
938 	    RAL_LOGCWMIN(3) |
939 	    RAL_LOGCWMAX(5));
940 
941 	/* setup PLCP fields */
942 	desc->plcp_signal  = ural_plcp_signal(rate);
943 	desc->plcp_service = 4;
944 
945 	len += IEEE80211_CRC_LEN;
946 	if (RAL_RATE_IS_OFDM(rate)) {
947 		desc->flags |= htole32(RAL_TX_OFDM);
948 
949 		plcp_length = len & 0xfff;
950 		desc->plcp_length_hi = plcp_length >> 6;
951 		desc->plcp_length_lo = plcp_length & 0x3f;
952 	} else {
953 		plcp_length = (16 * len + rate - 1) / rate;
954 		if (rate == 22) {
955 			remainder = (16 * len) % 22;
956 			if (remainder != 0 && remainder < 7)
957 				desc->plcp_service |= RAL_PLCP_LENGEXT;
958 		}
959 		desc->plcp_length_hi = plcp_length >> 8;
960 		desc->plcp_length_lo = plcp_length & 0xff;
961 
962 		if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
963 			desc->plcp_signal |= 0x08;
964 	}
965 
966 	desc->iv = 0;
967 	desc->eiv = 0;
968 }
969 
970 #define RAL_TX_TIMEOUT	5000
971 
972 #ifndef IEEE80211_STA_ONLY
973 int
974 ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
975 {
976 	struct ural_tx_desc *desc;
977 	struct usbd_xfer *xfer;
978 	usbd_status error;
979 	uint8_t cmd = 0;
980 	uint8_t *buf;
981 	int xferlen, rate = 2;
982 
983 	xfer = usbd_alloc_xfer(sc->sc_udev);
984 	if (xfer == NULL)
985 		return ENOMEM;
986 
987 	/* xfer length needs to be a multiple of two! */
988 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
989 
990 	buf = usbd_alloc_buffer(xfer, xferlen);
991 	if (buf == NULL) {
992 		usbd_free_xfer(xfer);
993 		return ENOMEM;
994 	}
995 
996 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd,
997 	    USBD_FORCE_SHORT_XFER | USBD_SYNCHRONOUS, RAL_TX_TIMEOUT, NULL);
998 
999 	error = usbd_transfer(xfer);
1000 	if (error != 0) {
1001 		usbd_free_xfer(xfer);
1002 		return error;
1003 	}
1004 
1005 	desc = (struct ural_tx_desc *)buf;
1006 
1007 	m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE);
1008 	ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP,
1009 	    m0->m_pkthdr.len, rate);
1010 
1011 	DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n",
1012 	    m0->m_pkthdr.len, rate, xferlen));
1013 
1014 	usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen,
1015 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY | USBD_SYNCHRONOUS,
1016 	    RAL_TX_TIMEOUT, NULL);
1017 
1018 	error = usbd_transfer(xfer);
1019 	usbd_free_xfer(xfer);
1020 
1021 	return error;
1022 }
1023 #endif
1024 
1025 int
1026 ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
1027 {
1028 	struct ieee80211com *ic = &sc->sc_ic;
1029 	struct ural_tx_desc *desc;
1030 	struct ural_tx_data *data;
1031 	struct ieee80211_frame *wh;
1032 	struct ieee80211_key *k;
1033 	uint32_t flags = RAL_TX_NEWSEQ;
1034 	uint16_t dur;
1035 	usbd_status error;
1036 	int rate, xferlen, pktlen, needrts = 0, needcts = 0;
1037 
1038 	wh = mtod(m0, struct ieee80211_frame *);
1039 
1040 	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
1041 		k = ieee80211_get_txkey(ic, wh, ni);
1042 
1043 		if ((m0 = ieee80211_encrypt(ic, m0, k)) == NULL)
1044 			return ENOBUFS;
1045 
1046 		/* packet header may have moved, reset our local pointer */
1047 		wh = mtod(m0, struct ieee80211_frame *);
1048 	}
1049 
1050 	/* compute actual packet length (including CRC and crypto overhead) */
1051 	pktlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN;
1052 
1053 	/* pickup a rate */
1054 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
1055 	    ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1056 	     IEEE80211_FC0_TYPE_MGT)) {
1057 		/* mgmt/multicast frames are sent at the lowest avail. rate */
1058 		rate = ni->ni_rates.rs_rates[0];
1059 	} else if (ic->ic_fixed_rate != -1) {
1060 		rate = ic->ic_sup_rates[ic->ic_curmode].
1061 		    rs_rates[ic->ic_fixed_rate];
1062 	} else
1063 			rate = ni->ni_rates.rs_rates[ni->ni_txrate];
1064 	if (rate == 0)
1065 		rate = 2;	/* XXX should not happen */
1066 	rate &= IEEE80211_RATE_VAL;
1067 
1068 	/* check if RTS/CTS or CTS-to-self protection must be used */
1069 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1070 		/* multicast frames are not sent at OFDM rates in 802.11b/g */
1071 		if (pktlen > ic->ic_rtsthreshold) {
1072 			needrts = 1;	/* RTS/CTS based on frame length */
1073 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1074 		    RAL_RATE_IS_OFDM(rate)) {
1075 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
1076 				needcts = 1;	/* CTS-to-self */
1077 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
1078 				needrts = 1;	/* RTS/CTS */
1079 		}
1080 	}
1081 	if (needrts || needcts) {
1082 		struct mbuf *mprot;
1083 		int protrate, ackrate;
1084 		uint16_t dur;
1085 
1086 		protrate = 2;
1087 		ackrate  = ural_ack_rate(ic, rate);
1088 
1089 		dur = ural_txtime(pktlen, rate, ic->ic_flags) +
1090 		      ural_txtime(RAL_ACK_SIZE, ackrate, ic->ic_flags) +
1091 		      2 * RAL_SIFS;
1092 		if (needrts) {
1093 			dur += ural_txtime(RAL_CTS_SIZE, ural_ack_rate(ic,
1094 			    protrate), ic->ic_flags) + RAL_SIFS;
1095 			mprot = ieee80211_get_rts(ic, wh, dur);
1096 		} else {
1097 			mprot = ieee80211_get_cts_to_self(ic, dur);
1098 		}
1099 		if (mprot == NULL) {
1100 			printf("%s: could not allocate protection frame\n",
1101 			    sc->sc_dev.dv_xname);
1102 			m_freem(m0);
1103 			return ENOBUFS;
1104 		}
1105 
1106 		data = &sc->tx_data[sc->tx_cur];
1107 		desc = (struct ural_tx_desc *)data->buf;
1108 
1109 		/* avoid multiple free() of the same node for each fragment */
1110 		data->ni = ieee80211_ref_node(ni);
1111 
1112 		m_copydata(mprot, 0, mprot->m_pkthdr.len,
1113 		    data->buf + RAL_TX_DESC_SIZE);
1114 		ural_setup_tx_desc(sc, desc,
1115 		    (needrts ? RAL_TX_NEED_ACK : 0) | RAL_TX_RETRY(7),
1116 		    mprot->m_pkthdr.len, protrate);
1117 
1118 		/* no roundup necessary here */
1119 		xferlen = RAL_TX_DESC_SIZE + mprot->m_pkthdr.len;
1120 
1121 		/* XXX may want to pass the protection frame to BPF */
1122 
1123 		/* mbuf is no longer needed */
1124 		m_freem(mprot);
1125 
1126 		usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf,
1127 		    xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY,
1128 		    RAL_TX_TIMEOUT, ural_txeof);
1129 		error = usbd_transfer(data->xfer);
1130 		if (error != 0 && error != USBD_IN_PROGRESS) {
1131 			m_freem(m0);
1132 			return error;
1133 		}
1134 
1135 		sc->tx_queued++;
1136 		sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1137 
1138 		flags |= RAL_TX_IFS_SIFS;
1139 	}
1140 
1141 	data = &sc->tx_data[sc->tx_cur];
1142 	desc = (struct ural_tx_desc *)data->buf;
1143 
1144 	data->ni = ni;
1145 
1146 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1147 		flags |= RAL_TX_NEED_ACK;
1148 		flags |= RAL_TX_RETRY(7);
1149 
1150 		dur = ural_txtime(RAL_ACK_SIZE, ural_ack_rate(ic, rate),
1151 		    ic->ic_flags) + RAL_SIFS;
1152 		*(uint16_t *)wh->i_dur = htole16(dur);
1153 
1154 #ifndef IEEE80211_STA_ONLY
1155 		/* tell hardware to set timestamp in probe responses */
1156 		if ((wh->i_fc[0] &
1157 		    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
1158 		    (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
1159 			flags |= RAL_TX_TIMESTAMP;
1160 #endif
1161 	}
1162 
1163 #if NBPFILTER > 0
1164 	if (sc->sc_drvbpf != NULL) {
1165 		struct mbuf mb;
1166 		struct ural_tx_radiotap_header *tap = &sc->sc_txtap;
1167 
1168 		tap->wt_flags = 0;
1169 		tap->wt_rate = rate;
1170 		tap->wt_chan_freq = htole16(ic->ic_bss->ni_chan->ic_freq);
1171 		tap->wt_chan_flags = htole16(ic->ic_bss->ni_chan->ic_flags);
1172 		tap->wt_antenna = sc->tx_ant;
1173 
1174 		mb.m_data = (caddr_t)tap;
1175 		mb.m_len = sc->sc_txtap_len;
1176 		mb.m_next = m0;
1177 		mb.m_nextpkt = NULL;
1178 		mb.m_type = 0;
1179 		mb.m_flags = 0;
1180 		bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
1181 	}
1182 #endif
1183 
1184 	m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE);
1185 	ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate);
1186 
1187 	/* align end on a 2-bytes boundary */
1188 	xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1;
1189 
1190 	/*
1191 	 * No space left in the last URB to store the extra 2 bytes, force
1192 	 * sending of another URB.
1193 	 */
1194 	if ((xferlen % 64) == 0)
1195 		xferlen += 2;
1196 
1197 	DPRINTFN(10, ("sending frame len=%u rate=%u xfer len=%u\n",
1198 	    m0->m_pkthdr.len, rate, xferlen));
1199 
1200 	/* mbuf is no longer needed */
1201 	m_freem(m0);
1202 
1203 	usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen,
1204 	    USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof);
1205 	error = usbd_transfer(data->xfer);
1206 	if (error != 0 && error != USBD_IN_PROGRESS)
1207 		return error;
1208 
1209 	sc->tx_queued++;
1210 	sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT;
1211 
1212 	return 0;
1213 }
1214 
1215 void
1216 ural_start(struct ifnet *ifp)
1217 {
1218 	struct ural_softc *sc = ifp->if_softc;
1219 	struct ieee80211com *ic = &sc->sc_ic;
1220 	struct ieee80211_node *ni;
1221 	struct mbuf *m0;
1222 
1223 	/*
1224 	 * net80211 may still try to send management frames even if the
1225 	 * IFF_RUNNING flag is not set...
1226 	 */
1227 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1228 		return;
1229 
1230 	for (;;) {
1231 		if (sc->tx_queued >= RAL_TX_LIST_COUNT - 1) {
1232 			ifq_set_oactive(&ifp->if_snd);
1233 			break;
1234 		}
1235 
1236 		m0 = mq_dequeue(&ic->ic_mgtq);
1237 		if (m0 != NULL) {
1238 			ni = m0->m_pkthdr.ph_cookie;
1239 #if NBPFILTER > 0
1240 			if (ic->ic_rawbpf != NULL)
1241 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1242 #endif
1243 			if (ural_tx_data(sc, m0, ni) != 0)
1244 				break;
1245 
1246 		} else {
1247 			if (ic->ic_state != IEEE80211_S_RUN)
1248 				break;
1249 
1250 			IFQ_DEQUEUE(&ifp->if_snd, m0);
1251 			if (m0 == NULL)
1252 				break;
1253 #if NBPFILTER > 0
1254 			if (ifp->if_bpf != NULL)
1255 				bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1256 #endif
1257 			m0 = ieee80211_encap(ifp, m0, &ni);
1258 			if (m0 == NULL)
1259 				continue;
1260 #if NBPFILTER > 0
1261 			if (ic->ic_rawbpf != NULL)
1262 				bpf_mtap(ic->ic_rawbpf, m0, BPF_DIRECTION_OUT);
1263 #endif
1264 			if (ural_tx_data(sc, m0, ni) != 0) {
1265 				if (ni != NULL)
1266 					ieee80211_release_node(ic, ni);
1267 				ifp->if_oerrors++;
1268 				break;
1269 			}
1270 		}
1271 
1272 		sc->sc_tx_timer = 5;
1273 		ifp->if_timer = 1;
1274 	}
1275 }
1276 
1277 void
1278 ural_watchdog(struct ifnet *ifp)
1279 {
1280 	struct ural_softc *sc = ifp->if_softc;
1281 
1282 	ifp->if_timer = 0;
1283 
1284 	if (sc->sc_tx_timer > 0) {
1285 		if (--sc->sc_tx_timer == 0) {
1286 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1287 			/*ural_init(ifp); XXX needs a process context! */
1288 			ifp->if_oerrors++;
1289 			return;
1290 		}
1291 		ifp->if_timer = 1;
1292 	}
1293 
1294 	ieee80211_watchdog(ifp);
1295 }
1296 
1297 int
1298 ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1299 {
1300 	struct ural_softc *sc = ifp->if_softc;
1301 	struct ieee80211com *ic = &sc->sc_ic;
1302 	struct ifreq *ifr;
1303 	int s, error = 0;
1304 
1305 	if (usbd_is_dying(sc->sc_udev))
1306 		return ENXIO;
1307 
1308 	usbd_ref_incr(sc->sc_udev);
1309 
1310 	s = splnet();
1311 
1312 	switch (cmd) {
1313 	case SIOCSIFADDR:
1314 		ifp->if_flags |= IFF_UP;
1315 		/* FALLTHROUGH */
1316 	case SIOCSIFFLAGS:
1317 		if (ifp->if_flags & IFF_UP) {
1318 			if (ifp->if_flags & IFF_RUNNING)
1319 				ural_update_promisc(sc);
1320 			else
1321 				ural_init(ifp);
1322 		} else {
1323 			if (ifp->if_flags & IFF_RUNNING)
1324 				ural_stop(ifp, 1);
1325 		}
1326 		break;
1327 
1328 	case SIOCADDMULTI:
1329 	case SIOCDELMULTI:
1330 		ifr = (struct ifreq *)data;
1331 		error = (cmd == SIOCADDMULTI) ?
1332 		    ether_addmulti(ifr, &ic->ic_ac) :
1333 		    ether_delmulti(ifr, &ic->ic_ac);
1334 
1335 		if (error == ENETRESET)
1336 			error = 0;
1337 		break;
1338 
1339 	case SIOCS80211CHANNEL:
1340 		/*
1341 		 * This allows for fast channel switching in monitor mode
1342 		 * (used by kismet). In IBSS mode, we must explicitly reset
1343 		 * the interface to generate a new beacon frame.
1344 		 */
1345 		error = ieee80211_ioctl(ifp, cmd, data);
1346 		if (error == ENETRESET &&
1347 		    ic->ic_opmode == IEEE80211_M_MONITOR) {
1348 			if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1349 			    (IFF_UP | IFF_RUNNING))
1350 				ural_set_chan(sc, ic->ic_ibss_chan);
1351 			error = 0;
1352 		}
1353 		break;
1354 
1355 	default:
1356 		error = ieee80211_ioctl(ifp, cmd, data);
1357 	}
1358 
1359 	if (error == ENETRESET) {
1360 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1361 		    (IFF_UP | IFF_RUNNING))
1362 			ural_init(ifp);
1363 		error = 0;
1364 	}
1365 
1366 	splx(s);
1367 
1368 	usbd_ref_decr(sc->sc_udev);
1369 
1370 	return error;
1371 }
1372 
1373 void
1374 ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len)
1375 {
1376 	usb_device_request_t req;
1377 	usbd_status error;
1378 
1379 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1380 	req.bRequest = RAL_READ_EEPROM;
1381 	USETW(req.wValue, 0);
1382 	USETW(req.wIndex, addr);
1383 	USETW(req.wLength, len);
1384 
1385 	error = usbd_do_request(sc->sc_udev, &req, buf);
1386 	if (error != 0) {
1387 		printf("%s: could not read EEPROM: %s\n",
1388 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1389 	}
1390 }
1391 
1392 uint16_t
1393 ural_read(struct ural_softc *sc, uint16_t reg)
1394 {
1395 	usb_device_request_t req;
1396 	usbd_status error;
1397 	uint16_t val;
1398 
1399 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1400 	req.bRequest = RAL_READ_MAC;
1401 	USETW(req.wValue, 0);
1402 	USETW(req.wIndex, reg);
1403 	USETW(req.wLength, sizeof (uint16_t));
1404 
1405 	error = usbd_do_request(sc->sc_udev, &req, &val);
1406 	if (error != 0) {
1407 		printf("%s: could not read MAC register: %s\n",
1408 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1409 		return 0;
1410 	}
1411 	return letoh16(val);
1412 }
1413 
1414 void
1415 ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1416 {
1417 	usb_device_request_t req;
1418 	usbd_status error;
1419 
1420 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
1421 	req.bRequest = RAL_READ_MULTI_MAC;
1422 	USETW(req.wValue, 0);
1423 	USETW(req.wIndex, reg);
1424 	USETW(req.wLength, len);
1425 
1426 	error = usbd_do_request(sc->sc_udev, &req, buf);
1427 	if (error != 0) {
1428 		printf("%s: could not read MAC register: %s\n",
1429 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1430 	}
1431 }
1432 
1433 void
1434 ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val)
1435 {
1436 	usb_device_request_t req;
1437 	usbd_status error;
1438 
1439 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1440 	req.bRequest = RAL_WRITE_MAC;
1441 	USETW(req.wValue, val);
1442 	USETW(req.wIndex, reg);
1443 	USETW(req.wLength, 0);
1444 
1445 	error = usbd_do_request(sc->sc_udev, &req, NULL);
1446 	if (error != 0) {
1447 		printf("%s: could not write MAC register: %s\n",
1448 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1449 	}
1450 }
1451 
1452 void
1453 ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
1454 {
1455 	usb_device_request_t req;
1456 	usbd_status error;
1457 
1458 	req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
1459 	req.bRequest = RAL_WRITE_MULTI_MAC;
1460 	USETW(req.wValue, 0);
1461 	USETW(req.wIndex, reg);
1462 	USETW(req.wLength, len);
1463 
1464 	error = usbd_do_request(sc->sc_udev, &req, buf);
1465 	if (error != 0) {
1466 		printf("%s: could not write MAC register: %s\n",
1467 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1468 	}
1469 }
1470 
1471 void
1472 ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val)
1473 {
1474 	uint16_t tmp;
1475 	int ntries;
1476 
1477 	for (ntries = 0; ntries < 5; ntries++) {
1478 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1479 			break;
1480 	}
1481 	if (ntries == 5) {
1482 		printf("%s: could not write to BBP\n", sc->sc_dev.dv_xname);
1483 		return;
1484 	}
1485 
1486 	tmp = reg << 8 | val;
1487 	ural_write(sc, RAL_PHY_CSR7, tmp);
1488 }
1489 
1490 uint8_t
1491 ural_bbp_read(struct ural_softc *sc, uint8_t reg)
1492 {
1493 	uint16_t val;
1494 	int ntries;
1495 
1496 	val = RAL_BBP_WRITE | reg << 8;
1497 	ural_write(sc, RAL_PHY_CSR7, val);
1498 
1499 	for (ntries = 0; ntries < 5; ntries++) {
1500 		if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
1501 			break;
1502 	}
1503 	if (ntries == 5) {
1504 		printf("%s: could not read BBP\n", sc->sc_dev.dv_xname);
1505 		return 0;
1506 	}
1507 	return ural_read(sc, RAL_PHY_CSR7) & 0xff;
1508 }
1509 
1510 void
1511 ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val)
1512 {
1513 	uint32_t tmp;
1514 	int ntries;
1515 
1516 	for (ntries = 0; ntries < 5; ntries++) {
1517 		if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY))
1518 			break;
1519 	}
1520 	if (ntries == 5) {
1521 		printf("%s: could not write to RF\n", sc->sc_dev.dv_xname);
1522 		return;
1523 	}
1524 
1525 	tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3);
1526 	ural_write(sc, RAL_PHY_CSR9,  tmp & 0xffff);
1527 	ural_write(sc, RAL_PHY_CSR10, tmp >> 16);
1528 
1529 	/* remember last written value in sc */
1530 	sc->rf_regs[reg] = val;
1531 
1532 	DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff));
1533 }
1534 
1535 void
1536 ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c)
1537 {
1538 	struct ieee80211com *ic = &sc->sc_ic;
1539 	uint8_t power, tmp;
1540 	u_int chan;
1541 
1542 	chan = ieee80211_chan2ieee(ic, c);
1543 	if (chan == 0 || chan == IEEE80211_CHAN_ANY)
1544 		return;
1545 
1546 	power = min(sc->txpow[chan - 1], 31);
1547 
1548 	DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power));
1549 
1550 	switch (sc->rf_rev) {
1551 	case RAL_RF_2522:
1552 		ural_rf_write(sc, RAL_RF1, 0x00814);
1553 		ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]);
1554 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1555 		break;
1556 
1557 	case RAL_RF_2523:
1558 		ural_rf_write(sc, RAL_RF1, 0x08804);
1559 		ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]);
1560 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044);
1561 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1562 		break;
1563 
1564 	case RAL_RF_2524:
1565 		ural_rf_write(sc, RAL_RF1, 0x0c808);
1566 		ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]);
1567 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
1568 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1569 		break;
1570 
1571 	case RAL_RF_2525:
1572 		ural_rf_write(sc, RAL_RF1, 0x08808);
1573 		ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]);
1574 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1575 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1576 
1577 		ural_rf_write(sc, RAL_RF1, 0x08808);
1578 		ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]);
1579 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1580 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
1581 		break;
1582 
1583 	case RAL_RF_2525E:
1584 		ural_rf_write(sc, RAL_RF1, 0x08808);
1585 		ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]);
1586 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1587 		ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282);
1588 		break;
1589 
1590 	case RAL_RF_2526:
1591 		ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]);
1592 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1593 		ural_rf_write(sc, RAL_RF1, 0x08804);
1594 
1595 		ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]);
1596 		ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
1597 		ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
1598 		break;
1599 	}
1600 
1601 	if (ic->ic_opmode != IEEE80211_M_MONITOR &&
1602 	    ic->ic_state != IEEE80211_S_SCAN) {
1603 		/* set Japan filter bit for channel 14 */
1604 		tmp = ural_bbp_read(sc, 70);
1605 
1606 		tmp &= ~RAL_JAPAN_FILTER;
1607 		if (chan == 14)
1608 			tmp |= RAL_JAPAN_FILTER;
1609 
1610 		ural_bbp_write(sc, 70, tmp);
1611 
1612 		/* clear CRC errors */
1613 		ural_read(sc, RAL_STA_CSR0);
1614 
1615 		DELAY(1000); /* RF needs a 1ms delay here */
1616 		ural_disable_rf_tune(sc);
1617 	}
1618 }
1619 
1620 /*
1621  * Disable RF auto-tuning.
1622  */
1623 void
1624 ural_disable_rf_tune(struct ural_softc *sc)
1625 {
1626 	uint32_t tmp;
1627 
1628 	if (sc->rf_rev != RAL_RF_2523) {
1629 		tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE;
1630 		ural_rf_write(sc, RAL_RF1, tmp);
1631 	}
1632 
1633 	tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE;
1634 	ural_rf_write(sc, RAL_RF3, tmp);
1635 
1636 	DPRINTFN(2, ("disabling RF autotune\n"));
1637 }
1638 
1639 /*
1640  * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF
1641  * synchronization.
1642  */
1643 void
1644 ural_enable_tsf_sync(struct ural_softc *sc)
1645 {
1646 	struct ieee80211com *ic = &sc->sc_ic;
1647 	uint16_t logcwmin, preload, tmp;
1648 
1649 	/* first, disable TSF synchronization */
1650 	ural_write(sc, RAL_TXRX_CSR19, 0);
1651 
1652 	tmp = (16 * ic->ic_bss->ni_intval) << 4;
1653 	ural_write(sc, RAL_TXRX_CSR18, tmp);
1654 
1655 #ifndef IEEE80211_STA_ONLY
1656 	if (ic->ic_opmode == IEEE80211_M_IBSS) {
1657 		logcwmin = 2;
1658 		preload = 320;
1659 	} else
1660 #endif
1661 	{
1662 		logcwmin = 0;
1663 		preload = 6;
1664 	}
1665 	tmp = logcwmin << 12 | preload;
1666 	ural_write(sc, RAL_TXRX_CSR20, tmp);
1667 
1668 	/* finally, enable TSF synchronization */
1669 	tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN;
1670 	if (ic->ic_opmode == IEEE80211_M_STA)
1671 		tmp |= RAL_ENABLE_TSF_SYNC(1);
1672 #ifndef IEEE80211_STA_ONLY
1673 	else
1674 		tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR;
1675 #endif
1676 	ural_write(sc, RAL_TXRX_CSR19, tmp);
1677 
1678 	DPRINTF(("enabling TSF synchronization\n"));
1679 }
1680 
1681 void
1682 ural_update_slot(struct ural_softc *sc)
1683 {
1684 	struct ieee80211com *ic = &sc->sc_ic;
1685 	uint16_t slottime, sifs, eifs;
1686 
1687 	slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ?
1688 	    IEEE80211_DUR_DS_SHSLOT : IEEE80211_DUR_DS_SLOT;
1689 
1690 	/*
1691 	 * These settings may sound a bit inconsistent but this is what the
1692 	 * reference driver does.
1693 	 */
1694 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1695 		sifs = 16 - RAL_RXTX_TURNAROUND;
1696 		eifs = 364;
1697 	} else {
1698 		sifs = 10 - RAL_RXTX_TURNAROUND;
1699 		eifs = 64;
1700 	}
1701 
1702 	ural_write(sc, RAL_MAC_CSR10, slottime);
1703 	ural_write(sc, RAL_MAC_CSR11, sifs);
1704 	ural_write(sc, RAL_MAC_CSR12, eifs);
1705 }
1706 
1707 void
1708 ural_set_txpreamble(struct ural_softc *sc)
1709 {
1710 	uint16_t tmp;
1711 
1712 	tmp = ural_read(sc, RAL_TXRX_CSR10);
1713 
1714 	tmp &= ~RAL_SHORT_PREAMBLE;
1715 	if (sc->sc_ic.ic_flags & IEEE80211_F_SHPREAMBLE)
1716 		tmp |= RAL_SHORT_PREAMBLE;
1717 
1718 	ural_write(sc, RAL_TXRX_CSR10, tmp);
1719 }
1720 
1721 void
1722 ural_set_basicrates(struct ural_softc *sc)
1723 {
1724 	struct ieee80211com *ic = &sc->sc_ic;
1725 
1726 	/* update basic rate set */
1727 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
1728 		/* 11b basic rates: 1, 2Mbps */
1729 		ural_write(sc, RAL_TXRX_CSR11, 0x3);
1730 	} else {
1731 		/* 11b/g basic rates: 1, 2, 5.5, 11Mbps */
1732 		ural_write(sc, RAL_TXRX_CSR11, 0xf);
1733 	}
1734 }
1735 
1736 void
1737 ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid)
1738 {
1739 	uint16_t tmp;
1740 
1741 	tmp = bssid[0] | bssid[1] << 8;
1742 	ural_write(sc, RAL_MAC_CSR5, tmp);
1743 
1744 	tmp = bssid[2] | bssid[3] << 8;
1745 	ural_write(sc, RAL_MAC_CSR6, tmp);
1746 
1747 	tmp = bssid[4] | bssid[5] << 8;
1748 	ural_write(sc, RAL_MAC_CSR7, tmp);
1749 
1750 	DPRINTF(("setting BSSID to %s\n", ether_sprintf((uint8_t *)bssid)));
1751 }
1752 
1753 void
1754 ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr)
1755 {
1756 	uint16_t tmp;
1757 
1758 	tmp = addr[0] | addr[1] << 8;
1759 	ural_write(sc, RAL_MAC_CSR2, tmp);
1760 
1761 	tmp = addr[2] | addr[3] << 8;
1762 	ural_write(sc, RAL_MAC_CSR3, tmp);
1763 
1764 	tmp = addr[4] | addr[5] << 8;
1765 	ural_write(sc, RAL_MAC_CSR4, tmp);
1766 
1767 	DPRINTF(("setting MAC address to %s\n",
1768 	    ether_sprintf((uint8_t *)addr)));
1769 }
1770 
1771 void
1772 ural_update_promisc(struct ural_softc *sc)
1773 {
1774 	struct ifnet *ifp = &sc->sc_ic.ic_if;
1775 	uint16_t tmp;
1776 
1777 	tmp = ural_read(sc, RAL_TXRX_CSR2);
1778 
1779 	tmp &= ~RAL_DROP_NOT_TO_ME;
1780 	if (!(ifp->if_flags & IFF_PROMISC))
1781 		tmp |= RAL_DROP_NOT_TO_ME;
1782 
1783 	ural_write(sc, RAL_TXRX_CSR2, tmp);
1784 
1785 	DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ?
1786 	    "entering" : "leaving"));
1787 }
1788 
1789 const char *
1790 ural_get_rf(int rev)
1791 {
1792 	switch (rev) {
1793 	case RAL_RF_2522:	return "RT2522";
1794 	case RAL_RF_2523:	return "RT2523";
1795 	case RAL_RF_2524:	return "RT2524";
1796 	case RAL_RF_2525:	return "RT2525";
1797 	case RAL_RF_2525E:	return "RT2525e";
1798 	case RAL_RF_2526:	return "RT2526";
1799 	case RAL_RF_5222:	return "RT5222";
1800 	default:		return "unknown";
1801 	}
1802 }
1803 
1804 void
1805 ural_read_eeprom(struct ural_softc *sc)
1806 {
1807 	struct ieee80211com *ic = &sc->sc_ic;
1808 	uint16_t val;
1809 
1810 	/* retrieve MAC/BBP type */
1811 	ural_eeprom_read(sc, RAL_EEPROM_MACBBP, &val, 2);
1812 	sc->macbbp_rev = letoh16(val);
1813 
1814 	ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2);
1815 	val = letoh16(val);
1816 	sc->rf_rev =   (val >> 11) & 0x7;
1817 	sc->hw_radio = (val >> 10) & 0x1;
1818 	sc->led_mode = (val >> 6)  & 0x7;
1819 	sc->rx_ant =   (val >> 4)  & 0x3;
1820 	sc->tx_ant =   (val >> 2)  & 0x3;
1821 	sc->nb_ant =   val & 0x3;
1822 
1823 	/* read MAC address */
1824 	ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6);
1825 
1826 	/* read default values for BBP registers */
1827 	ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16);
1828 
1829 	/* read Tx power for all b/g channels */
1830 	ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14);
1831 }
1832 
1833 int
1834 ural_bbp_init(struct ural_softc *sc)
1835 {
1836 	int i, ntries;
1837 
1838 	/* wait for BBP to be ready */
1839 	for (ntries = 0; ntries < 100; ntries++) {
1840 		if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0)
1841 			break;
1842 		DELAY(1000);
1843 	}
1844 	if (ntries == 100) {
1845 		printf("%s: timeout waiting for BBP\n", sc->sc_dev.dv_xname);
1846 		return EIO;
1847 	}
1848 
1849 	/* initialize BBP registers to default values */
1850 	for (i = 0; i < nitems(ural_def_bbp); i++)
1851 		ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val);
1852 
1853 #if 0
1854 	/* initialize BBP registers to values stored in EEPROM */
1855 	for (i = 0; i < 16; i++) {
1856 		if (sc->bbp_prom[i].reg == 0xff)
1857 			continue;
1858 		ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
1859 	}
1860 #endif
1861 
1862 	return 0;
1863 }
1864 
1865 void
1866 ural_set_txantenna(struct ural_softc *sc, int antenna)
1867 {
1868 	uint16_t tmp;
1869 	uint8_t tx;
1870 
1871 	tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK;
1872 	if (antenna == 1)
1873 		tx |= RAL_BBP_ANTA;
1874 	else if (antenna == 2)
1875 		tx |= RAL_BBP_ANTB;
1876 	else
1877 		tx |= RAL_BBP_DIVERSITY;
1878 
1879 	/* need to force I/Q flip for RF 2525e, 2526 and 5222 */
1880 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 ||
1881 	    sc->rf_rev == RAL_RF_5222)
1882 		tx |= RAL_BBP_FLIPIQ;
1883 
1884 	ural_bbp_write(sc, RAL_BBP_TX, tx);
1885 
1886 	/* update flags in PHY_CSR5 and PHY_CSR6 too */
1887 	tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7;
1888 	ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7));
1889 
1890 	tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7;
1891 	ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7));
1892 }
1893 
1894 void
1895 ural_set_rxantenna(struct ural_softc *sc, int antenna)
1896 {
1897 	uint8_t rx;
1898 
1899 	rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK;
1900 	if (antenna == 1)
1901 		rx |= RAL_BBP_ANTA;
1902 	else if (antenna == 2)
1903 		rx |= RAL_BBP_ANTB;
1904 	else
1905 		rx |= RAL_BBP_DIVERSITY;
1906 
1907 	/* need to force no I/Q flip for RF 2525e and 2526 */
1908 	if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526)
1909 		rx &= ~RAL_BBP_FLIPIQ;
1910 
1911 	ural_bbp_write(sc, RAL_BBP_RX, rx);
1912 }
1913 
1914 int
1915 ural_init(struct ifnet *ifp)
1916 {
1917 	struct ural_softc *sc = ifp->if_softc;
1918 	struct ieee80211com *ic = &sc->sc_ic;
1919 	uint16_t tmp;
1920 	usbd_status error;
1921 	int i, ntries;
1922 
1923 	ural_stop(ifp, 0);
1924 
1925 	/* initialize MAC registers to default values */
1926 	for (i = 0; i < nitems(ural_def_mac); i++)
1927 		ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val);
1928 
1929 	/* wait for BBP and RF to wake up (this can take a long time!) */
1930 	for (ntries = 0; ntries < 100; ntries++) {
1931 		tmp = ural_read(sc, RAL_MAC_CSR17);
1932 		if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) ==
1933 		    (RAL_BBP_AWAKE | RAL_RF_AWAKE))
1934 			break;
1935 		DELAY(1000);
1936 	}
1937 	if (ntries == 100) {
1938 		printf("%s: timeout waiting for BBP/RF to wakeup\n",
1939 		    sc->sc_dev.dv_xname);
1940 		error = EIO;
1941 		goto fail;
1942 	}
1943 
1944 	/* we're ready! */
1945 	ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY);
1946 
1947 	/* set basic rate set (will be updated later) */
1948 	ural_write(sc, RAL_TXRX_CSR11, 0x153);
1949 
1950 	error = ural_bbp_init(sc);
1951 	if (error != 0)
1952 		goto fail;
1953 
1954 	/* set default BSS channel */
1955 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
1956 	ural_set_chan(sc, ic->ic_bss->ni_chan);
1957 
1958 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
1959 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
1960 
1961 	/* set default sensitivity */
1962 	ural_bbp_write(sc, 17, 0x48);
1963 
1964 	ural_set_txantenna(sc, 1);
1965 	ural_set_rxantenna(sc, 1);
1966 
1967 	IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
1968 	ural_set_macaddr(sc, ic->ic_myaddr);
1969 
1970 	/*
1971 	 * Copy WEP keys into adapter's memory (SEC_CSR0 to SEC_CSR31).
1972 	 */
1973 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1974 		struct ieee80211_key *k = &ic->ic_nw_keys[i];
1975 		ural_write_multi(sc, RAL_SEC_CSR0 + i * IEEE80211_KEYBUF_SIZE,
1976 		    k->k_key, IEEE80211_KEYBUF_SIZE);
1977 	}
1978 
1979 	/*
1980 	 * Allocate xfer for AMRR statistics requests.
1981 	 */
1982 	sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev);
1983 	if (sc->amrr_xfer == NULL) {
1984 		printf("%s: could not allocate AMRR xfer\n",
1985 		    sc->sc_dev.dv_xname);
1986 		goto fail;
1987 	}
1988 
1989 	/*
1990 	 * Open Tx and Rx USB bulk pipes.
1991 	 */
1992 	error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE,
1993 	    &sc->sc_tx_pipeh);
1994 	if (error != 0) {
1995 		printf("%s: could not open Tx pipe: %s\n",
1996 		    sc->sc_dev.dv_xname, usbd_errstr(error));
1997 		goto fail;
1998 	}
1999 	error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE,
2000 	    &sc->sc_rx_pipeh);
2001 	if (error != 0) {
2002 		printf("%s: could not open Rx pipe: %s\n",
2003 		    sc->sc_dev.dv_xname, usbd_errstr(error));
2004 		goto fail;
2005 	}
2006 
2007 	/*
2008 	 * Allocate Tx and Rx xfer queues.
2009 	 */
2010 	error = ural_alloc_tx_list(sc);
2011 	if (error != 0) {
2012 		printf("%s: could not allocate Tx list\n",
2013 		    sc->sc_dev.dv_xname);
2014 		goto fail;
2015 	}
2016 	error = ural_alloc_rx_list(sc);
2017 	if (error != 0) {
2018 		printf("%s: could not allocate Rx list\n",
2019 		    sc->sc_dev.dv_xname);
2020 		goto fail;
2021 	}
2022 
2023 	/*
2024 	 * Start up the receive pipe.
2025 	 */
2026 	for (i = 0; i < RAL_RX_LIST_COUNT; i++) {
2027 		struct ural_rx_data *data = &sc->rx_data[i];
2028 
2029 		usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf,
2030 		    MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof);
2031 		error = usbd_transfer(data->xfer);
2032 		if (error != 0 && error != USBD_IN_PROGRESS) {
2033 			printf("%s: could not queue Rx transfer\n",
2034 			    sc->sc_dev.dv_xname);
2035 			goto fail;
2036 		}
2037 	}
2038 
2039 	/* kick Rx */
2040 	tmp = RAL_DROP_PHY_ERROR | RAL_DROP_CRC_ERROR;
2041 	if (ic->ic_opmode != IEEE80211_M_MONITOR) {
2042 		tmp |= RAL_DROP_CTL | RAL_DROP_VERSION_ERROR;
2043 #ifndef IEEE80211_STA_ONLY
2044 		if (ic->ic_opmode != IEEE80211_M_HOSTAP)
2045 #endif
2046 			tmp |= RAL_DROP_TODS;
2047 		if (!(ifp->if_flags & IFF_PROMISC))
2048 			tmp |= RAL_DROP_NOT_TO_ME;
2049 	}
2050 	ural_write(sc, RAL_TXRX_CSR2, tmp);
2051 
2052 	ifq_clr_oactive(&ifp->if_snd);
2053 	ifp->if_flags |= IFF_RUNNING;
2054 
2055 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
2056 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2057 	else
2058 		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2059 
2060 	return 0;
2061 
2062 fail:	ural_stop(ifp, 1);
2063 	return error;
2064 }
2065 
2066 void
2067 ural_stop(struct ifnet *ifp, int disable)
2068 {
2069 	struct ural_softc *sc = ifp->if_softc;
2070 	struct ieee80211com *ic = &sc->sc_ic;
2071 
2072 	sc->sc_tx_timer = 0;
2073 	ifp->if_timer = 0;
2074 	ifp->if_flags &= ~IFF_RUNNING;
2075 	ifq_clr_oactive(&ifp->if_snd);
2076 
2077 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);	/* free all nodes */
2078 
2079 	/* disable Rx */
2080 	ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX);
2081 
2082 	/* reset ASIC and BBP (but won't reset MAC registers!) */
2083 	ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP);
2084 	ural_write(sc, RAL_MAC_CSR1, 0);
2085 
2086 	if (sc->amrr_xfer != NULL) {
2087 		usbd_free_xfer(sc->amrr_xfer);
2088 		sc->amrr_xfer = NULL;
2089 	}
2090 	if (sc->sc_rx_pipeh != NULL) {
2091 		usbd_abort_pipe(sc->sc_rx_pipeh);
2092 		usbd_close_pipe(sc->sc_rx_pipeh);
2093 		sc->sc_rx_pipeh = NULL;
2094 	}
2095 	if (sc->sc_tx_pipeh != NULL) {
2096 		usbd_abort_pipe(sc->sc_tx_pipeh);
2097 		usbd_close_pipe(sc->sc_tx_pipeh);
2098 		sc->sc_tx_pipeh = NULL;
2099 	}
2100 
2101 	ural_free_rx_list(sc);
2102 	ural_free_tx_list(sc);
2103 }
2104 
2105 void
2106 ural_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni, int isnew)
2107 {
2108 	/* start with lowest Tx rate */
2109 	ni->ni_txrate = 0;
2110 }
2111 
2112 void
2113 ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni)
2114 {
2115 	int i;
2116 
2117 	/* clear statistic registers (STA_CSR0 to STA_CSR10) */
2118 	ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
2119 
2120 	ieee80211_amrr_node_init(&sc->amrr, &sc->amn);
2121 
2122 	/* set rate to some reasonable initial value */
2123 	for (i = ni->ni_rates.rs_nrates - 1;
2124 	     i > 0 && (ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) > 72;
2125 	     i--);
2126 	ni->ni_txrate = i;
2127 
2128 	if (!usbd_is_dying(sc->sc_udev))
2129 		timeout_add_sec(&sc->amrr_to, 1);
2130 }
2131 
2132 void
2133 ural_amrr_timeout(void *arg)
2134 {
2135 	struct ural_softc *sc = arg;
2136 	usb_device_request_t req;
2137 	int s;
2138 
2139 	if (usbd_is_dying(sc->sc_udev))
2140 		return;
2141 
2142 	usbd_ref_incr(sc->sc_udev);
2143 
2144 	s = splusb();
2145 
2146 	/*
2147 	 * Asynchronously read statistic registers (cleared by read).
2148 	 */
2149 	req.bmRequestType = UT_READ_VENDOR_DEVICE;
2150 	req.bRequest = RAL_READ_MULTI_MAC;
2151 	USETW(req.wValue, 0);
2152 	USETW(req.wIndex, RAL_STA_CSR0);
2153 	USETW(req.wLength, sizeof sc->sta);
2154 
2155 	usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, sc,
2156 	    USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0,
2157 	    ural_amrr_update);
2158 	(void)usbd_transfer(sc->amrr_xfer);
2159 
2160 	splx(s);
2161 
2162 	usbd_ref_decr(sc->sc_udev);
2163 }
2164 
2165 void
2166 ural_amrr_update(struct usbd_xfer *xfer, void *priv,
2167     usbd_status status)
2168 {
2169 	struct ural_softc *sc = (struct ural_softc *)priv;
2170 	struct ifnet *ifp = &sc->sc_ic.ic_if;
2171 
2172 	if (status != USBD_NORMAL_COMPLETION) {
2173 		printf("%s: could not retrieve Tx statistics - cancelling "
2174 		    "automatic rate control\n", sc->sc_dev.dv_xname);
2175 		return;
2176 	}
2177 
2178 	/* count TX retry-fail as Tx errors */
2179 	ifp->if_oerrors += letoh16(sc->sta[9]);
2180 
2181 	sc->amn.amn_retrycnt =
2182 	    letoh16(sc->sta[7]) +	/* TX one-retry ok count */
2183 	    letoh16(sc->sta[8]) +	/* TX more-retry ok count */
2184 	    letoh16(sc->sta[9]);	/* TX retry-fail count */
2185 
2186 	sc->amn.amn_txcnt =
2187 	    sc->amn.amn_retrycnt +
2188 	    letoh16(sc->sta[6]);	/* TX no-retry ok count */
2189 
2190 	ieee80211_amrr_choose(&sc->amrr, sc->sc_ic.ic_bss, &sc->amn);
2191 
2192 	if (!usbd_is_dying(sc->sc_udev))
2193 		timeout_add_sec(&sc->amrr_to, 1);
2194 }
2195