xref: /openbsd/sys/dev/ic/pgt.c (revision 404b540a)
1 /*	$OpenBSD: pgt.c,v 1.53 2009/01/26 19:09:41 damien Exp $  */
2 
3 /*
4  * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Copyright (c) 2004 Fujitsu Laboratories of America, Inc.
22  * Copyright (c) 2004 Brian Fundakowski Feldman
23  * All rights reserved.
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  * 1. Redistributions of source code must retain the above copyright
29  *    notice, this list of conditions and the following disclaimer.
30  * 2. Redistributions in binary form must reproduce the above copyright
31  *    notice, this list of conditions and the following disclaimer in the
32  *    documentation and/or other materials provided with the distribution.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
35  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
38  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44  * SUCH DAMAGE.
45  */
46 
47 #include <sys/cdefs.h>
48 #include "bpfilter.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/socket.h>
55 #include <sys/mbuf.h>
56 #include <sys/endian.h>
57 #include <sys/sockio.h>
58 #include <sys/sysctl.h>
59 #include <sys/kthread.h>
60 #include <sys/time.h>
61 #include <sys/ioctl.h>
62 #include <sys/device.h>
63 
64 #include <machine/bus.h>
65 #include <machine/endian.h>
66 #include <machine/intr.h>
67 
68 #include <net/if.h>
69 #include <net/if_arp.h>
70 #include <net/if_dl.h>
71 #include <net/if_llc.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 
75 #if NBPFILTER > 0
76 #include <net/bpf.h>
77 #endif
78 
79 #ifdef INET
80 #include <netinet/in.h>
81 #include <netinet/in_systm.h>
82 #include <netinet/in_var.h>
83 #include <netinet/if_ether.h>
84 #include <netinet/ip.h>
85 #endif
86 
87 #include <net80211/ieee80211_var.h>
88 #include <net80211/ieee80211_radiotap.h>
89 
90 #include <dev/ic/pgtreg.h>
91 #include <dev/ic/pgtvar.h>
92 
93 #include <dev/ic/if_wireg.h>
94 #include <dev/ic/if_wi_ieee.h>
95 #include <dev/ic/if_wivar.h>
96 
97 #ifdef PGT_DEBUG
98 #define DPRINTF(x)	do { printf x; } while (0)
99 #else
100 #define DPRINTF(x)
101 #endif
102 
103 #define	SETOID(oid, var, size) {					\
104 	if (pgt_oid_set(sc, oid, var, size) != 0)			\
105 		break;							\
106 }
107 
108 /*
109  * This is a driver for the Intersil Prism family of 802.11g network cards,
110  * based upon version 1.2 of the Linux driver and firmware found at
111  * http://www.prism54.org/.
112  */
113 
114 #define SCAN_TIMEOUT			5	/* 5 seconds */
115 
116 struct cfdriver pgt_cd = {
117         NULL, "pgt", DV_IFNET
118 };
119 
120 void	 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr);
121 int	 pgt_media_change(struct ifnet *ifp);
122 void	 pgt_write_memory_barrier(struct pgt_softc *);
123 uint32_t pgt_read_4(struct pgt_softc *, uint16_t);
124 void	 pgt_write_4(struct pgt_softc *, uint16_t, uint32_t);
125 void	 pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t);
126 void	 pgt_debug_events(struct pgt_softc *, const char *);
127 uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue);
128 void	 pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *);
129 int	 pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue,
130 	     struct pgt_desc *);
131 void	 pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *);
132 int	 pgt_load_firmware(struct pgt_softc *);
133 void	 pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue,
134 	     struct pgt_frag []);
135 int	 pgt_reset(struct pgt_softc *);
136 void	 pgt_stop(struct pgt_softc *, unsigned int);
137 void	 pgt_reboot(struct pgt_softc *);
138 void	 pgt_init_intr(struct pgt_softc *);
139 void	 pgt_update_intr(struct pgt_softc *, int);
140 struct mbuf
141 	*pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *,
142 	     struct mbuf *, struct ieee80211_node **);
143 void	 pgt_input_frames(struct pgt_softc *, struct mbuf *);
144 void	 pgt_wakeup_intr(struct pgt_softc *);
145 void	 pgt_sleep_intr(struct pgt_softc *);
146 void	 pgt_empty_traps(struct pgt_softc_kthread *);
147 void	 pgt_per_device_kthread(void *);
148 void	 pgt_async_reset(struct pgt_softc *);
149 void	 pgt_async_update(struct pgt_softc *);
150 void	 pgt_txdone(struct pgt_softc *, enum pgt_queue);
151 void	 pgt_rxdone(struct pgt_softc *, enum pgt_queue);
152 void	 pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t);
153 void	 pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *);
154 struct mbuf
155 	*pgt_datarx_completion(struct pgt_softc *, enum pgt_queue);
156 int	 pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t);
157 int	 pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t);
158 int	 pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t);
159 void	 pgt_state_dump(struct pgt_softc *);
160 int	 pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *);
161 void	 pgt_desc_transmit(struct pgt_softc *, enum pgt_queue,
162 	     struct pgt_desc *, uint16_t, int);
163 void	 pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue);
164 struct ieee80211_node
165 	*pgt_ieee80211_node_alloc(struct ieee80211com *);
166 void	 pgt_ieee80211_newassoc(struct ieee80211com *,
167 	     struct ieee80211_node *, int);
168 void	 pgt_ieee80211_node_free(struct ieee80211com *,
169 	    struct ieee80211_node *);
170 void	 pgt_ieee80211_node_copy(struct ieee80211com *,
171 	     struct ieee80211_node *,
172 	     const struct ieee80211_node *);
173 int	 pgt_ieee80211_send_mgmt(struct ieee80211com *,
174 	     struct ieee80211_node *, int, int, int);
175 int	 pgt_net_attach(struct pgt_softc *);
176 void	 pgt_start(struct ifnet *);
177 int	 pgt_ioctl(struct ifnet *, u_long, caddr_t);
178 void	 pgt_obj_bss2scanres(struct pgt_softc *,
179 	     struct pgt_obj_bss *, struct wi_scan_res *, uint32_t);
180 void	 node_mark_active_ap(void *, struct ieee80211_node *);
181 void	 node_mark_active_adhoc(void *, struct ieee80211_node *);
182 void	 pgt_watchdog(struct ifnet *);
183 int	 pgt_init(struct ifnet *);
184 void	 pgt_update_hw_from_sw(struct pgt_softc *, int, int);
185 void	 pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t,
186 	     struct pgt_obj_mlme *);
187 void	 pgt_update_sw_from_hw(struct pgt_softc *,
188 	     struct pgt_async_trap *, struct mbuf *);
189 int	 pgt_newstate(struct ieee80211com *, enum ieee80211_state, int);
190 int	 pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue);
191 int	 pgt_dma_alloc(struct pgt_softc *);
192 int	 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq);
193 void	 pgt_dma_free(struct pgt_softc *);
194 void	 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq);
195 void	 pgt_shutdown(void *);
196 void	 pgt_power(int, void *);
197 
198 void
199 pgt_write_memory_barrier(struct pgt_softc *sc)
200 {
201 	bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0,
202 	    BUS_SPACE_BARRIER_WRITE);
203 }
204 
205 u_int32_t
206 pgt_read_4(struct pgt_softc *sc, uint16_t offset)
207 {
208 	return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset));
209 }
210 
211 void
212 pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value)
213 {
214 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
215 }
216 
217 /*
218  * Write out 4 bytes and cause a PCI flush by reading back in on a
219  * harmless register.
220  */
221 void
222 pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value)
223 {
224 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
225 	(void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN);
226 }
227 
228 /*
229  * Print the state of events in the queues from an interrupt or a trigger.
230  */
231 void
232 pgt_debug_events(struct pgt_softc *sc, const char *when)
233 {
234 #define	COUNT(i)							\
235 	letoh32(sc->sc_cb->pcb_driver_curfrag[i]) -			\
236 	letoh32(sc->sc_cb->pcb_device_curfrag[i])
237 	if (sc->sc_debug & SC_DEBUG_EVENTS)
238 		DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n",
239 		    sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2),
240 		    COUNT(3), COUNT(4), COUNT(5)));
241 #undef COUNT
242 }
243 
244 uint32_t
245 pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq)
246 {
247 	return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) -
248 	    letoh32(sc->sc_cb->pcb_device_curfrag[pq]));
249 }
250 
251 void
252 pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
253 {
254 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
255 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
256 	pd->pd_fragp->pf_flags = 0;
257 
258 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
259 	    BUS_DMASYNC_POSTWRITE);
260 }
261 
262 int
263 pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq,
264     struct pgt_desc *pd)
265 {
266 	int error;
267 
268 	error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem,
269 	    PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
270 	if (error) {
271 		DPRINTF(("%s: unable to load %s tx DMA: %d\n",
272 		    sc->sc_dev.dv_xname,
273 		    pgt_queue_is_data(pq) ? "data" : "mgmt", error));
274 		return (error);
275 	}
276 	pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
277 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
278 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
279 	pd->pd_fragp->pf_flags = htole16(0);
280 
281 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
282 	    BUS_DMASYNC_POSTWRITE);
283 
284 	return (0);
285 }
286 
287 void
288 pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
289 {
290         bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
291 	pd->pd_dmaaddr = 0;
292 }
293 
294 int
295 pgt_load_firmware(struct pgt_softc *sc)
296 {
297 	int error, reg, dirreg, fwoff, ucodeoff, fwlen;
298 	uint8_t *ucode;
299 	uint32_t *uc;
300 	size_t size;
301 	char *name;
302 
303 	if (sc->sc_flags & SC_ISL3877)
304 		name = "pgt-isl3877";
305 	else
306 		name = "pgt-isl3890";	/* includes isl3880 */
307 
308 	error = loadfirmware(name, &ucode, &size);
309 
310 	if (error != 0) {
311 		DPRINTF(("%s: error %d, could not read firmware %s\n",
312 		    sc->sc_dev.dv_xname, error, name));
313 		return (EIO);
314 	}
315 
316 	if (size & 3) {
317 		DPRINTF(("%s: bad firmware size %u\n",
318 		    sc->sc_dev.dv_xname, size));
319 		free(ucode, M_DEVBUF);
320 		return (EINVAL);
321 	}
322 
323 	pgt_reboot(sc);
324 
325 	fwoff = 0;
326 	ucodeoff = 0;
327 	uc = (uint32_t *)ucode;
328 	reg = PGT_FIRMWARE_INTERNAL_OFFSET;
329 	while (fwoff < size) {
330 		pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE, reg);
331 
332 		if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE)
333 			fwlen = PGT_DIRECT_MEMORY_SIZE;
334 		else
335 			fwlen = size - fwoff;
336 
337 		dirreg = PGT_DIRECT_MEMORY_OFFSET;
338 		while (fwlen > 4) {
339 			pgt_write_4(sc, dirreg, uc[ucodeoff]);
340 			fwoff += 4;
341 			dirreg += 4;
342 			reg += 4;
343 			fwlen -= 4;
344 			ucodeoff++;
345 		}
346 		pgt_write_4_flush(sc, dirreg, uc[ucodeoff]);
347 		fwoff += 4;
348 		dirreg += 4;
349 		reg += 4;
350 		fwlen -= 4;
351 		ucodeoff++;
352 	}
353 	DPRINTF(("%s: %d bytes microcode loaded from %s\n",
354 	    sc->sc_dev.dv_xname, fwoff, name));
355 
356 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
357 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_CLOCKRUN);
358 	reg |= PGT_CTRL_STAT_RAMBOOT;
359 	pgt_write_4_flush(sc, PGT_REG_CTRL_STAT, reg);
360 	pgt_write_memory_barrier(sc);
361 	DELAY(PGT_WRITEIO_DELAY);
362 
363 	reg |= PGT_CTRL_STAT_RESET;
364 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
365 	pgt_write_memory_barrier(sc);
366 	DELAY(PGT_WRITEIO_DELAY);
367 
368 	reg &= ~PGT_CTRL_STAT_RESET;
369 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
370 	pgt_write_memory_barrier(sc);
371 	DELAY(PGT_WRITEIO_DELAY);
372 
373 	free(ucode, M_DEVBUF);
374 
375 	return (0);
376 }
377 
378 void
379 pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq,
380     struct pgt_frag pqfrags[])
381 {
382 	struct pgt_desc *pd;
383 	unsigned int i;
384 
385 	sc->sc_cb->pcb_device_curfrag[pq] = 0;
386 	i = 0;
387 	/* XXX why only freeq ??? */
388 	TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link) {
389 		pd->pd_fragnum = i;
390 		pd->pd_fragp = &pqfrags[i];
391 		if (pgt_queue_is_rx(pq))
392 			pgt_reinit_rx_desc_frag(sc, pd);
393 		i++;
394 	}
395 	sc->sc_freeq_count[pq] = i;
396 	/*
397 	 * The ring buffer describes how many free buffers are available from
398 	 * the host (for receive queues) or how many are pending (for
399 	 * transmit queues).
400 	 */
401 	if (pgt_queue_is_rx(pq))
402 		sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i);
403 	else
404 		sc->sc_cb->pcb_driver_curfrag[pq] = 0;
405 }
406 
407 /*
408  * Turn off interrupts, reset the device (possibly loading firmware),
409  * and put everything in a known state.
410  */
411 int
412 pgt_reset(struct pgt_softc *sc)
413 {
414 	int error;
415 
416 	/* disable all interrupts */
417 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
418 	DELAY(PGT_WRITEIO_DELAY);
419 
420 	/*
421 	 * Set up the management receive queue, assuming there are no
422 	 * requests in progress.
423 	 */
424 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
425 	    sc->sc_cbdmam->dm_mapsize,
426 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
427 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX,
428 	    &sc->sc_cb->pcb_data_low_rx[0]);
429 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX,
430 	    &sc->sc_cb->pcb_data_low_tx[0]);
431 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX,
432 	    &sc->sc_cb->pcb_data_high_rx[0]);
433 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX,
434 	    &sc->sc_cb->pcb_data_high_tx[0]);
435 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX,
436 	    &sc->sc_cb->pcb_mgmt_rx[0]);
437 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX,
438 	    &sc->sc_cb->pcb_mgmt_tx[0]);
439 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
440 	    sc->sc_cbdmam->dm_mapsize,
441 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
442 
443 	/* load firmware */
444 	if (sc->sc_flags & SC_NEEDS_FIRMWARE) {
445 		error = pgt_load_firmware(sc);
446 		if (error) {
447 			printf("%s: firmware load failed\n",
448 			    sc->sc_dev.dv_xname);
449 			return (error);
450 		}
451 		sc->sc_flags &= ~SC_NEEDS_FIRMWARE;
452 		DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname));
453 	}
454 
455 	/* upload the control block's DMA address */
456 	pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE,
457 	    htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr));
458 	DELAY(PGT_WRITEIO_DELAY);
459 
460 	/* send a reset event */
461 	pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_RESET);
462 	DELAY(PGT_WRITEIO_DELAY);
463 
464 	/* await only the initialization interrupt */
465 	pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_INIT);
466 	DELAY(PGT_WRITEIO_DELAY);
467 
468 	return (0);
469 }
470 
471 /*
472  * If we're trying to reset and the device has seemingly not been detached,
473  * we'll spend a minute seeing if we can't do the reset.
474  */
475 void
476 pgt_stop(struct pgt_softc *sc, unsigned int flag)
477 {
478 	struct ieee80211com *ic;
479 	unsigned int wokeup;
480 	int tryagain = 0;
481 
482 	ic = &sc->sc_ic;
483 
484 	ic->ic_if.if_flags &= ~IFF_RUNNING;
485 	sc->sc_flags |= SC_UNINITIALIZED;
486 	sc->sc_flags |= flag;
487 
488 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
489 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
490 	pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX);
491 
492 trying_again:
493 	/* disable all interrupts */
494 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
495 	DELAY(PGT_WRITEIO_DELAY);
496 
497 	/* reboot card */
498 	pgt_reboot(sc);
499 
500 	do {
501 		wokeup = 0;
502 		/*
503 		 * We don't expect to be woken up, just to drop the lock
504 		 * and time out.  Only tx queues can have anything valid
505 		 * on them outside of an interrupt.
506 		 */
507 		while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) {
508 			struct pgt_mgmt_desc *pmd;
509 
510 			pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
511 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
512 			pmd->pmd_error = ENETRESET;
513 			wakeup_one(pmd);
514 			if (sc->sc_debug & SC_DEBUG_MGMT)
515 				DPRINTF(("%s: queue: mgmt %p <- %#x "
516 				    "(drained)\n", sc->sc_dev.dv_xname,
517 				    pmd, pmd->pmd_oid));
518 			wokeup++;
519 		}
520 		if (wokeup > 0) {
521 			if (flag == SC_NEEDS_RESET && sc->sc_flags & SC_DYING) {
522 				sc->sc_flags &= ~flag;
523 				return;
524 			}
525 		}
526 	} while (wokeup > 0);
527 
528 	if (flag == SC_NEEDS_RESET) {
529 		int error;
530 
531 		DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname));
532 		sc->sc_flags &= ~SC_POWERSAVE;
533 		sc->sc_flags |= SC_NEEDS_FIRMWARE;
534 		error = pgt_reset(sc);
535 		if (error == 0) {
536 			tsleep(&sc->sc_flags, 0, "pgtres", hz);
537 			if (sc->sc_flags & SC_UNINITIALIZED) {
538 				printf("%s: not responding\n",
539 				    sc->sc_dev.dv_xname);
540 				/* Thud.  It was probably removed. */
541 				if (tryagain)
542 					panic("pgt went for lunch"); /* XXX */
543 				tryagain = 1;
544 			} else {
545 				/* await all interrupts */
546 				pgt_write_4_flush(sc, PGT_REG_INT_EN,
547 				    PGT_INT_STAT_SOURCES);
548 				DELAY(PGT_WRITEIO_DELAY);
549 				ic->ic_if.if_flags |= IFF_RUNNING;
550 			}
551 		}
552 
553 		if (tryagain)
554 			goto trying_again;
555 
556 		sc->sc_flags &= ~flag;
557 		if (ic->ic_if.if_flags & IFF_RUNNING)
558 			pgt_update_hw_from_sw(sc,
559 			    ic->ic_state != IEEE80211_S_INIT,
560 			    ic->ic_opmode != IEEE80211_M_MONITOR);
561 	}
562 
563 	ic->ic_if.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
564 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
565 }
566 
567 void
568 pgt_attach(void *xsc)
569 {
570 	struct pgt_softc *sc = xsc;
571 	int error;
572 
573 	/* debug flags */
574 	//sc->sc_debug |= SC_DEBUG_QUEUES;	/* super verbose */
575 	//sc->sc_debug |= SC_DEBUG_MGMT;
576 	sc->sc_debug |= SC_DEBUG_UNEXPECTED;
577 	//sc->sc_debug |= SC_DEBUG_TRIGGER;	/* verbose */
578 	//sc->sc_debug |= SC_DEBUG_EVENTS;	/* super verbose */
579 	//sc->sc_debug |= SC_DEBUG_POWER;
580 	sc->sc_debug |= SC_DEBUG_TRAP;
581 	sc->sc_debug |= SC_DEBUG_LINK;
582 	//sc->sc_debug |= SC_DEBUG_RXANNEX;
583 	//sc->sc_debug |= SC_DEBUG_RXFRAG;
584 	//sc->sc_debug |= SC_DEBUG_RXETHER;
585 
586 	/* enable card if possible */
587 	if (sc->sc_enable != NULL)
588 		(*sc->sc_enable)(sc);
589 
590 	error = pgt_dma_alloc(sc);
591 	if (error)
592 		return;
593 
594 	sc->sc_ic.ic_if.if_softc = sc;
595 	TAILQ_INIT(&sc->sc_mgmtinprog);
596 	TAILQ_INIT(&sc->sc_kthread.sck_traps);
597 	sc->sc_flags |= SC_NEEDS_FIRMWARE | SC_UNINITIALIZED;
598 	sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN;
599 
600 	error = pgt_reset(sc);
601 	if (error)
602 		return;
603 
604 	tsleep(&sc->sc_flags, 0, "pgtres", hz);
605 	if (sc->sc_flags & SC_UNINITIALIZED) {
606 		printf("%s: not responding\n", sc->sc_dev.dv_xname);
607 		return;
608 	} else {
609 		/* await all interrupts */
610 		pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_SOURCES);
611 		DELAY(PGT_WRITEIO_DELAY);
612 	}
613 
614 	error = pgt_net_attach(sc);
615 	if (error)
616 		return;
617 
618 	if (kthread_create(pgt_per_device_kthread, sc, NULL,
619 	    sc->sc_dev.dv_xname) != 0)
620 		return;
621 
622 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
623 }
624 
625 int
626 pgt_detach(struct pgt_softc *sc)
627 {
628 	if (sc->sc_flags & SC_NEEDS_FIRMWARE || sc->sc_flags & SC_UNINITIALIZED)
629 		/* device was not initialized correctly, so leave early */
630 		goto out;
631 
632 	/* stop card */
633 	pgt_stop(sc, SC_DYING);
634 	pgt_reboot(sc);
635 
636 	/*
637 	 * Disable shutdown and power hooks
638 	 */
639         if (sc->sc_shutdown_hook != NULL)
640                 shutdownhook_disestablish(sc->sc_shutdown_hook);
641         if (sc->sc_power_hook != NULL)
642                 powerhook_disestablish(sc->sc_power_hook);
643 
644 	ieee80211_ifdetach(&sc->sc_ic.ic_if);
645 	if_detach(&sc->sc_ic.ic_if);
646 
647 out:
648 	/* disable card if possible */
649 	if (sc->sc_disable != NULL)
650 		(*sc->sc_disable)(sc);
651 
652 	pgt_dma_free(sc);
653 
654 	return (0);
655 }
656 
657 void
658 pgt_reboot(struct pgt_softc *sc)
659 {
660 	uint32_t reg;
661 
662 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
663 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_RAMBOOT);
664 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
665 	pgt_write_memory_barrier(sc);
666 	DELAY(PGT_WRITEIO_DELAY);
667 
668 	reg |= PGT_CTRL_STAT_RESET;
669 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
670 	pgt_write_memory_barrier(sc);
671 	DELAY(PGT_WRITEIO_DELAY);
672 
673 	reg &= ~PGT_CTRL_STAT_RESET;
674 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
675 	pgt_write_memory_barrier(sc);
676 	DELAY(PGT_RESET_DELAY);
677 }
678 
679 void
680 pgt_init_intr(struct pgt_softc *sc)
681 {
682 	if ((sc->sc_flags & SC_UNINITIALIZED) == 0) {
683 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
684 			DPRINTF(("%s: spurious initialization\n",
685 			    sc->sc_dev.dv_xname));
686 	} else {
687 		sc->sc_flags &= ~SC_UNINITIALIZED;
688 		wakeup(&sc->sc_flags);
689 	}
690 }
691 
692 /*
693  * If called with a NULL last_nextpkt, only the mgmt queue will be checked
694  * for new packets.
695  */
696 void
697 pgt_update_intr(struct pgt_softc *sc, int hack)
698 {
699 	/* priority order */
700 	enum pgt_queue pqs[PGT_QUEUE_COUNT] = {
701 	    PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX,
702 	    PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX,
703 	    PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX
704 	};
705 	struct mbuf *m;
706 	uint32_t npend;
707 	unsigned int dirtycount;
708 	int i;
709 
710 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
711 	    sc->sc_cbdmam->dm_mapsize,
712 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
713 	pgt_debug_events(sc, "intr");
714 	/*
715 	 * Check for completion of tx in their dirty queues.
716 	 * Check completion of rx into their dirty queues.
717 	 */
718 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
719 		size_t qdirty, qfree, qtotal;
720 
721 		qdirty = sc->sc_dirtyq_count[pqs[i]];
722 		qfree = sc->sc_freeq_count[pqs[i]];
723 		qtotal = qdirty + qfree;
724 		/*
725 		 * We want the wrap-around here.
726 		 */
727 		if (pgt_queue_is_rx(pqs[i])) {
728 			int data;
729 
730 			data = pgt_queue_is_data(pqs[i]);
731 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
732 			if (hack && data)
733 				continue;
734 #endif
735 			npend = pgt_queue_frags_pending(sc, pqs[i]);
736 			/*
737 			 * Receive queues clean up below, so qfree must
738 			 * always be qtotal (qdirty is 0).
739 			 */
740 			if (npend > qfree) {
741 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
742 					DPRINTF(("%s: rx queue [%u] "
743 					    "overflowed by %u\n",
744 					    sc->sc_dev.dv_xname, pqs[i],
745 					    npend - qfree));
746 				sc->sc_flags |= SC_INTR_RESET;
747 				break;
748 			}
749 			while (qfree-- > npend)
750 				pgt_rxdone(sc, pqs[i]);
751 		} else {
752 			npend = pgt_queue_frags_pending(sc, pqs[i]);
753 			if (npend > qdirty) {
754 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
755 					DPRINTF(("%s: tx queue [%u] "
756 					    "underflowed by %u\n",
757 					    sc->sc_dev.dv_xname, pqs[i],
758 					    npend - qdirty));
759 				sc->sc_flags |= SC_INTR_RESET;
760 				break;
761 			}
762 			/*
763 			 * If the free queue was empty, or the data transmit
764 			 * queue just became empty, wake up any waiters.
765 			 */
766 			if (qdirty > npend) {
767 				if (pgt_queue_is_data(pqs[i])) {
768 					sc->sc_ic.ic_if.if_timer = 0;
769 					sc->sc_ic.ic_if.if_flags &=
770 					    ~IFF_OACTIVE;
771 				}
772 				while (qdirty-- > npend)
773 					pgt_txdone(sc, pqs[i]);
774 			}
775 		}
776 	}
777 
778 	/*
779 	 * This is the deferred completion for received management frames
780 	 * and where we queue network frames for stack input.
781 	 */
782 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX];
783 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])) {
784 		struct pgt_mgmt_desc *pmd;
785 
786 		pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
787 		/*
788 		 * If there is no mgmt request in progress or the operation
789 		 * returned is explicitly a trap, this pmd will essentially
790 		 * be ignored.
791 		 */
792 		pgt_mgmtrx_completion(sc, pmd);
793 	}
794 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] =
795 	    htole32(dirtycount +
796 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX]));
797 
798 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX];
799 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])) {
800 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX)))
801 			pgt_input_frames(sc, m);
802 	}
803 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] =
804 	    htole32(dirtycount +
805 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX]));
806 
807 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX];
808 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])) {
809 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX)))
810 			pgt_input_frames(sc, m);
811 	}
812 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] =
813 	    htole32(dirtycount +
814 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX]));
815 
816 	/*
817 	 * Write out what we've finished with.
818 	 */
819 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
820 	    sc->sc_cbdmam->dm_mapsize,
821 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
822 }
823 
824 struct mbuf *
825 pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh,
826     struct mbuf *m, struct ieee80211_node **ni)
827 {
828 	struct ieee80211com *ic;
829 	struct ieee80211_frame *frame;
830 	struct llc *snap;
831 
832 	ic = &sc->sc_ic;
833 	if (ni != NULL && ic->ic_opmode == IEEE80211_M_MONITOR) {
834 		*ni = ieee80211_ref_node(ic->ic_bss);
835 		(*ni)->ni_inact = 0;
836 		return (m);
837 	}
838 
839 	M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT);
840 	if (m == NULL)
841 		return (m);
842 	if (m->m_len < sizeof(*frame) + sizeof(*snap)) {
843 		m = m_pullup(m, sizeof(*frame) + sizeof(*snap));
844 		if (m == NULL)
845 			return (m);
846 	}
847 	frame = mtod(m, struct ieee80211_frame *);
848 	snap = (struct llc *)&frame[1];
849 	if (ni != NULL) {
850 		if (ic->ic_opmode == IEEE80211_M_STA) {
851 			*ni = ieee80211_ref_node(ic->ic_bss);
852 		}
853 #ifndef IEEE80211_STA_ONLY
854 		else {
855 			*ni = ieee80211_find_node(ic, eh->ether_shost);
856 			/*
857 			 * Make up associations for ad-hoc mode.  To support
858 			 * ad-hoc WPA, we'll need to maintain a bounded
859 			 * pool of ad-hoc stations.
860 			 */
861 			if (*ni == NULL &&
862 			    ic->ic_opmode != IEEE80211_M_HOSTAP) {
863 				*ni = ieee80211_dup_bss(ic, eh->ether_shost);
864 				if (*ni != NULL) {
865 					(*ni)->ni_associd = 1;
866 					ic->ic_newassoc(ic, *ni, 1);
867 				}
868 			}
869 			if (*ni == NULL) {
870 				m_freem(m);
871 				return (NULL);
872 			}
873 		}
874 #endif
875 		(*ni)->ni_inact = 0;
876 	}
877 	snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP;
878 	snap->llc_control = LLC_UI;
879 	snap->llc_snap.org_code[0] = 0;
880 	snap->llc_snap.org_code[1] = 0;
881 	snap->llc_snap.org_code[2] = 0;
882 	snap->llc_snap.ether_type = eh->ether_type;
883 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
884 	/* Doesn't look like much of the 802.11 header is available. */
885 	*(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0;
886 	/*
887 	 * Translate the addresses; WDS is not handled.
888 	 */
889 	switch (ic->ic_opmode) {
890 	case IEEE80211_M_STA:
891 		frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
892 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
893 		IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid);
894 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost);
895 		break;
896 #ifndef IEEE80211_STA_ONLY
897 	case IEEE80211_M_IBSS:
898 	case IEEE80211_M_AHDEMO:
899 		frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
900 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
901 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
902 		IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid);
903 		break;
904 	case IEEE80211_M_HOSTAP:
905 		/* HostAP forwarding defaults to being done on firmware. */
906 		frame->i_fc[1] = IEEE80211_FC1_DIR_TODS;
907 		IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid);
908 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
909 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost);
910 		break;
911 #endif
912 	default:
913 		break;
914 	}
915 	return (m);
916 }
917 
918 void
919 pgt_input_frames(struct pgt_softc *sc, struct mbuf *m)
920 {
921 	struct ether_header eh;
922 	struct ifnet *ifp;
923 	struct ieee80211_channel *chan;
924 	struct ieee80211_rxinfo rxi;
925 	struct ieee80211_node *ni;
926 	struct ieee80211com *ic;
927 	struct pgt_rx_annex *pra;
928 	struct pgt_rx_header *pha;
929 	struct mbuf *next;
930 	unsigned int n;
931 	uint32_t rstamp;
932 	uint8_t rate, rssi;
933 
934 	ic = &sc->sc_ic;
935 	ifp = &ic->ic_if;
936 	for (next = m; m != NULL; m = next) {
937 		next = m->m_nextpkt;
938 		m->m_nextpkt = NULL;
939 
940 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
941 			if (m->m_len < sizeof(*pha)) {
942 				m = m_pullup(m, sizeof(*pha));
943 				if (m == NULL) {
944 					if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
945 						DPRINTF(("%s: m_pullup "
946 						    "failure\n",
947 						    sc->sc_dev.dv_xname));
948 					ifp->if_ierrors++;
949 					continue;
950 				}
951 			}
952 			pha = mtod(m, struct pgt_rx_header *);
953 			pra = NULL;
954 			goto input;
955 		}
956 
957 		if (m->m_len < sizeof(*pra)) {
958 			m = m_pullup(m, sizeof(*pra));
959 			if (m == NULL) {
960 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
961 					DPRINTF(("%s: m_pullup failure\n",
962 					    sc->sc_dev.dv_xname));
963 				ifp->if_ierrors++;
964 				continue;
965 			}
966 		}
967 		pra = mtod(m, struct pgt_rx_annex *);
968 		pha = &pra->pra_header;
969 		if (sc->sc_debug & SC_DEBUG_RXANNEX)
970 			DPRINTF(("%s: rx annex: ? %04x "
971 			    "len %u clock %u flags %02x ? %02x rate %u ? %02x "
972 			    "freq %u ? %04x rssi %u pad %02x%02x%02x\n",
973 			    sc->sc_dev.dv_xname,
974 			    letoh16(pha->pra_unknown0),
975 			    letoh16(pha->pra_length),
976 			    letoh32(pha->pra_clock), pha->pra_flags,
977 			    pha->pra_unknown1, pha->pra_rate,
978 			    pha->pra_unknown2, letoh32(pha->pra_frequency),
979 			    pha->pra_unknown3, pha->pra_rssi,
980 			    pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2]));
981 		if (sc->sc_debug & SC_DEBUG_RXETHER)
982 			DPRINTF(("%s: rx ether: %s < %s 0x%04x\n",
983 			    sc->sc_dev.dv_xname,
984 			    ether_sprintf(pra->pra_ether_dhost),
985 			    ether_sprintf(pra->pra_ether_shost),
986 			    ntohs(pra->pra_ether_type)));
987 
988 		memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN);
989 		memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN);
990 		eh.ether_type = pra->pra_ether_type;
991 
992 input:
993 		/*
994 		 * This flag is set if e.g. packet could not be decrypted.
995 		 */
996 		if (pha->pra_flags & PRA_FLAG_BAD) {
997 			ifp->if_ierrors++;
998 			m_freem(m);
999 			continue;
1000 		}
1001 
1002 		/*
1003 		 * After getting what we want, chop off the annex, then
1004 		 * turn into something that looks like it really was
1005 		 * 802.11.
1006 		 */
1007 		rssi = pha->pra_rssi;
1008 		rstamp = letoh32(pha->pra_clock);
1009 		rate = pha->pra_rate;
1010 		n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency), 0);
1011 		if (n <= IEEE80211_CHAN_MAX)
1012 			chan = &ic->ic_channels[n];
1013 		else
1014 			chan = ic->ic_bss->ni_chan;
1015 		/* Send to 802.3 listeners. */
1016 		if (pra) {
1017 			m_adj(m, sizeof(*pra));
1018 		} else
1019 			m_adj(m, sizeof(*pha));
1020 
1021 		m = pgt_ieee80211_encap(sc, &eh, m, &ni);
1022 		if (m != NULL) {
1023 #if NBPFILTER > 0
1024 			if (sc->sc_drvbpf != NULL) {
1025 				struct mbuf mb;
1026 				struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtap;
1027 
1028 				tap->wr_flags = 0;
1029 				tap->wr_chan_freq = htole16(chan->ic_freq);
1030 				tap->wr_chan_flags = htole16(chan->ic_flags);
1031 				tap->wr_rssi = rssi;
1032 				tap->wr_max_rssi = ic->ic_max_rssi;
1033 
1034 				mb.m_data = (caddr_t)tap;
1035 				mb.m_len = sc->sc_rxtap_len;
1036 				mb.m_next = m;
1037 				mb.m_nextpkt = NULL;
1038 				mb.m_type = 0;
1039 				mb.m_flags = 0;
1040 				bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
1041 			}
1042 #endif
1043 			rxi.rxi_flags = 0;
1044 			ni->ni_rssi = rxi.rxi_rssi = rssi;
1045 			ni->ni_rstamp = rxi.rxi_tstamp = rstamp;
1046 			ieee80211_input(ifp, m, ni, &rxi);
1047 			/*
1048 			 * The frame may have caused the node to be marked for
1049 			 * reclamation (e.g. in response to a DEAUTH message)
1050 			 * so use free_node here instead of unref_node.
1051 			 */
1052 			if (ni == ic->ic_bss)
1053 				ieee80211_unref_node(&ni);
1054 			else
1055 				ieee80211_release_node(&sc->sc_ic, ni);
1056 		} else {
1057 			ifp->if_ierrors++;
1058 		}
1059 	}
1060 }
1061 
1062 void
1063 pgt_wakeup_intr(struct pgt_softc *sc)
1064 {
1065 	int shouldupdate;
1066 	int i;
1067 
1068 	shouldupdate = 0;
1069 	/* Check for any queues being empty before updating. */
1070 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1071 	    sc->sc_cbdmam->dm_mapsize,
1072 	    BUS_DMASYNC_POSTREAD);
1073 	for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT; i++) {
1074 		if (pgt_queue_is_tx(i))
1075 			shouldupdate = pgt_queue_frags_pending(sc, i);
1076 		else
1077 			shouldupdate = pgt_queue_frags_pending(sc, i) <
1078 			    sc->sc_freeq_count[i];
1079 	}
1080 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1081 		shouldupdate = 1;
1082 	if (sc->sc_debug & SC_DEBUG_POWER)
1083 		DPRINTF(("%s: wakeup interrupt (update = %d)\n",
1084 		    sc->sc_dev.dv_xname, shouldupdate));
1085 	sc->sc_flags &= ~SC_POWERSAVE;
1086 	if (shouldupdate) {
1087 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1088 		DELAY(PGT_WRITEIO_DELAY);
1089 	}
1090 }
1091 
1092 void
1093 pgt_sleep_intr(struct pgt_softc *sc)
1094 {
1095 	int allowed;
1096 	int i;
1097 
1098 	allowed = 1;
1099 	/* Check for any queues not being empty before allowing. */
1100 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1101 	    sc->sc_cbdmam->dm_mapsize,
1102 	    BUS_DMASYNC_POSTREAD);
1103 	for (i = 0; allowed && i < PGT_QUEUE_COUNT; i++) {
1104 		if (pgt_queue_is_tx(i))
1105 			allowed = pgt_queue_frags_pending(sc, i) == 0;
1106 		else
1107 			allowed = pgt_queue_frags_pending(sc, i) >=
1108 			    sc->sc_freeq_count[i];
1109 	}
1110 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1111 		allowed = 0;
1112 	if (sc->sc_debug & SC_DEBUG_POWER)
1113 		DPRINTF(("%s: sleep interrupt (allowed = %d)\n",
1114 		    sc->sc_dev.dv_xname, allowed));
1115 	if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) {
1116 		sc->sc_flags |= SC_POWERSAVE;
1117 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_SLEEP);
1118 		DELAY(PGT_WRITEIO_DELAY);
1119 	}
1120 }
1121 
1122 void
1123 pgt_empty_traps(struct pgt_softc_kthread *sck)
1124 {
1125 	struct pgt_async_trap *pa;
1126 	struct mbuf *m;
1127 
1128 	while (!TAILQ_EMPTY(&sck->sck_traps)) {
1129 		pa = TAILQ_FIRST(&sck->sck_traps);
1130 		TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1131 		m = pa->pa_mbuf;
1132 		m_freem(m);
1133 	}
1134 }
1135 
1136 void
1137 pgt_per_device_kthread(void *argp)
1138 {
1139 	struct pgt_softc *sc;
1140 	struct pgt_softc_kthread *sck;
1141 	struct pgt_async_trap *pa;
1142 	struct mbuf *m;
1143 	int s;
1144 
1145 	sc = argp;
1146 	sck = &sc->sc_kthread;
1147 	while (!sck->sck_exit) {
1148 		if (!sck->sck_update && !sck->sck_reset &&
1149 		    TAILQ_EMPTY(&sck->sck_traps))
1150 			tsleep(&sc->sc_kthread, 0, "pgtkth", 0);
1151 		if (sck->sck_reset) {
1152 			DPRINTF(("%s: [thread] async reset\n",
1153 			    sc->sc_dev.dv_xname));
1154 			sck->sck_reset = 0;
1155 			sck->sck_update = 0;
1156 			pgt_empty_traps(sck);
1157 			s = splnet();
1158 			pgt_stop(sc, SC_NEEDS_RESET);
1159 			splx(s);
1160 		} else if (!TAILQ_EMPTY(&sck->sck_traps)) {
1161 			DPRINTF(("%s: [thread] got a trap\n",
1162 			    sc->sc_dev.dv_xname));
1163 			pa = TAILQ_FIRST(&sck->sck_traps);
1164 			TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1165 			m = pa->pa_mbuf;
1166 			m_adj(m, sizeof(*pa));
1167 			pgt_update_sw_from_hw(sc, pa, m);
1168 			m_freem(m);
1169 		} else if (sck->sck_update) {
1170 			sck->sck_update = 0;
1171 			pgt_update_sw_from_hw(sc, NULL, NULL);
1172 		}
1173 	}
1174 	pgt_empty_traps(sck);
1175 	kthread_exit(0);
1176 }
1177 
1178 void
1179 pgt_async_reset(struct pgt_softc *sc)
1180 {
1181 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1182 		return;
1183 	sc->sc_kthread.sck_reset = 1;
1184 	wakeup(&sc->sc_kthread);
1185 }
1186 
1187 void
1188 pgt_async_update(struct pgt_softc *sc)
1189 {
1190 	if (sc->sc_flags & SC_DYING)
1191 		return;
1192 	sc->sc_kthread.sck_update = 1;
1193 	wakeup(&sc->sc_kthread);
1194 }
1195 
1196 int
1197 pgt_intr(void *arg)
1198 {
1199 	struct pgt_softc *sc;
1200 	struct ifnet *ifp;
1201 	u_int32_t reg;
1202 
1203 	sc = arg;
1204 	ifp = &sc->sc_ic.ic_if;
1205 
1206 	/*
1207 	 * Here the Linux driver ands in the value of the INT_EN register,
1208 	 * and masks off everything but the documented interrupt bits.  Why?
1209 	 *
1210 	 * Unknown bit 0x4000 is set upon initialization, 0x8000000 some
1211 	 * other times.
1212 	 */
1213 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON &&
1214 	    sc->sc_flags & SC_POWERSAVE) {
1215 		/*
1216 		 * Don't try handling the interrupt in sleep mode.
1217 		 */
1218 		reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1219 		if (reg & PGT_CTRL_STAT_SLEEPMODE)
1220 			return (0);
1221 	}
1222 	reg = pgt_read_4(sc, PGT_REG_INT_STAT);
1223 	if (reg == 0)
1224 		return (0); /* This interrupt is not from us */
1225 
1226 	pgt_write_4_flush(sc, PGT_REG_INT_ACK, reg);
1227 	if (reg & PGT_INT_STAT_INIT)
1228 		pgt_init_intr(sc);
1229 	if (reg & PGT_INT_STAT_UPDATE) {
1230 		pgt_update_intr(sc, 0);
1231 		/*
1232 		 * If we got an update, it's not really asleep.
1233 		 */
1234 		sc->sc_flags &= ~SC_POWERSAVE;
1235 		/*
1236 		 * Pretend I have any idea what the documentation
1237 		 * would say, and just give it a shot sending an
1238 		 * "update" after acknowledging the interrupt
1239 		 * bits and writing out the new control block.
1240 		 */
1241 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1242 		DELAY(PGT_WRITEIO_DELAY);
1243 	}
1244 	if (reg & PGT_INT_STAT_SLEEP && !(reg & PGT_INT_STAT_WAKEUP))
1245 		pgt_sleep_intr(sc);
1246 	if (reg & PGT_INT_STAT_WAKEUP)
1247 		pgt_wakeup_intr(sc);
1248 
1249 	if (sc->sc_flags & SC_INTR_RESET) {
1250 		sc->sc_flags &= ~SC_INTR_RESET;
1251 		pgt_async_reset(sc);
1252 	}
1253 
1254 	if (reg & ~PGT_INT_STAT_SOURCES && sc->sc_debug & SC_DEBUG_UNEXPECTED) {
1255 		DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n",
1256 		    sc->sc_dev.dv_xname,
1257 		    reg & ~PGT_INT_STAT_SOURCES,
1258 		    pgt_read_4(sc, PGT_REG_CTRL_STAT)));
1259 	}
1260 
1261 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1262 		pgt_start(ifp);
1263 
1264 	return (1);
1265 }
1266 
1267 void
1268 pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq)
1269 {
1270 	struct pgt_desc *pd;
1271 
1272 	pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
1273 	TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1274 	sc->sc_dirtyq_count[pq]--;
1275 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1276 	sc->sc_freeq_count[pq]++;
1277 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1278 	    pd->pd_dmam->dm_mapsize,
1279 	    BUS_DMASYNC_POSTREAD);
1280 	/* Management frames want completion information. */
1281 	if (sc->sc_debug & SC_DEBUG_QUEUES) {
1282 		DPRINTF(("%s: queue: tx %u <- [%u]\n",
1283 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1284 		if (sc->sc_debug & SC_DEBUG_MGMT && pgt_queue_is_mgmt(pq)) {
1285 			struct pgt_mgmt_frame *pmf;
1286 
1287 			pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1288 			DPRINTF(("%s: queue: txmgmt %p <- "
1289 			    "(ver %u, op %u, flags %#x)\n",
1290 			    sc->sc_dev.dv_xname,
1291 			    pd, pmf->pmf_version, pmf->pmf_operation,
1292 			    pmf->pmf_flags));
1293 		}
1294 	}
1295 	pgt_unload_tx_desc_frag(sc, pd);
1296 }
1297 
1298 void
1299 pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq)
1300 {
1301 	struct pgt_desc *pd;
1302 
1303 	pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
1304 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1305 	sc->sc_freeq_count[pq]--;
1306 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1307 	sc->sc_dirtyq_count[pq]++;
1308 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1309 	    pd->pd_dmam->dm_mapsize,
1310 	    BUS_DMASYNC_POSTREAD);
1311 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1312 		DPRINTF(("%s: queue: rx %u <- [%u]\n",
1313 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1314 	if (sc->sc_debug & SC_DEBUG_UNEXPECTED &&
1315 	    pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF))
1316 		DPRINTF(("%s: unknown flags on rx [%u]: %#x\n",
1317 		    sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags)));
1318 }
1319 
1320 /*
1321  * Traps are generally used for the firmware to report changes in state
1322  * back to the host.  Mostly this processes changes in link state, but
1323  * it needs to also be used to initiate WPA and other authentication
1324  * schemes in terms of client (station) or server (access point).
1325  */
1326 void
1327 pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata,
1328     size_t size)
1329 {
1330 	struct pgt_async_trap *pa;
1331 	struct mbuf *m;
1332 	char *p;
1333 	size_t total;
1334 
1335 	if (sc->sc_flags & SC_DYING)
1336 		return;
1337 
1338 	total = sizeof(oid) + size + sizeof(struct pgt_async_trap);
1339 	if (total > MLEN) {
1340 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1341 		if (m == NULL)
1342 			return;
1343 		MCLGET(m, M_DONTWAIT);
1344 		if (!(m->m_flags & M_EXT)) {
1345 			m_freem(m);
1346 			m = NULL;
1347 		}
1348 	} else
1349 		m = m_get(M_DONTWAIT, MT_DATA);
1350 
1351 	if (m == NULL)
1352 		return;
1353 	else
1354 		m->m_len = total;
1355 
1356 	pa = mtod(m, struct pgt_async_trap *);
1357 	p = mtod(m, char *) + sizeof(*pa);
1358 	*(uint32_t *)p = oid;
1359 	p += sizeof(uint32_t);
1360 	memcpy(p, trapdata, size);
1361 	pa->pa_mbuf = m;
1362 
1363 	TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link);
1364 	wakeup(&sc->sc_kthread);
1365 }
1366 
1367 /*
1368  * Process a completed management response (all requests should be
1369  * responded to, quickly) or an event (trap).
1370  */
1371 void
1372 pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1373 {
1374 	struct pgt_desc *pd;
1375 	struct pgt_mgmt_frame *pmf;
1376 	uint32_t oid, size;
1377 
1378 	pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX]);
1379 	TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link);
1380 	sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--;
1381 	TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX],
1382 	    pd, pd_link);
1383 	sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++;
1384 	if (letoh16(pd->pd_fragp->pf_size) < sizeof(*pmf)) {
1385 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1386 			DPRINTF(("%s: mgmt desc too small: %u\n",
1387 			    sc->sc_dev.dv_xname,
1388 			    letoh16(pd->pd_fragp->pf_size)));
1389 		goto out_nopmd;
1390 	}
1391 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1392 	if (pmf->pmf_version != PMF_VER) {
1393 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1394 			DPRINTF(("%s: unknown mgmt version %u\n",
1395 			    sc->sc_dev.dv_xname, pmf->pmf_version));
1396 		goto out_nopmd;
1397 	}
1398 	if (pmf->pmf_device != PMF_DEV) {
1399 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1400 			DPRINTF(("%s: unknown mgmt dev %u\n",
1401 			    sc->sc_dev.dv_xname, pmf->pmf_device));
1402 		goto out;
1403 	}
1404 	if (pmf->pmf_flags & ~PMF_FLAG_VALID) {
1405 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1406 			DPRINTF(("%s: unknown mgmt flags %x\n",
1407 			    sc->sc_dev.dv_xname,
1408 			    pmf->pmf_flags & ~PMF_FLAG_VALID));
1409 		goto out;
1410 	}
1411 	if (pmf->pmf_flags & PMF_FLAG_LE) {
1412 		oid = letoh32(pmf->pmf_oid);
1413 		size = letoh32(pmf->pmf_size);
1414 	} else {
1415 		oid = betoh32(pmf->pmf_oid);
1416 		size = betoh32(pmf->pmf_size);
1417 	}
1418 	if (pmf->pmf_operation == PMF_OP_TRAP) {
1419 		pmd = NULL; /* ignored */
1420 		DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n",
1421 		    sc->sc_dev.dv_xname,
1422 		    pmf->pmf_operation, oid, size));
1423 		pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf),
1424 		    min(size, PGT_FRAG_SIZE - sizeof(*pmf)));
1425 		goto out_nopmd;
1426 	}
1427 	if (pmd == NULL) {
1428 		if (sc->sc_debug & (SC_DEBUG_UNEXPECTED | SC_DEBUG_MGMT))
1429 			DPRINTF(("%s: spurious mgmt received "
1430 			    "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname,
1431 			    pmf->pmf_operation, oid, size));
1432 		goto out_nopmd;
1433 	}
1434 	switch (pmf->pmf_operation) {
1435 	case PMF_OP_RESPONSE:
1436 		pmd->pmd_error = 0;
1437 		break;
1438 	case PMF_OP_ERROR:
1439 		pmd->pmd_error = EPERM;
1440 		goto out;
1441 	default:
1442 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1443 			DPRINTF(("%s: unknown mgmt op %u\n",
1444 			    sc->sc_dev.dv_xname, pmf->pmf_operation));
1445 		pmd->pmd_error = EIO;
1446 		goto out;
1447 	}
1448 	if (oid != pmd->pmd_oid) {
1449 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1450 			DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n",
1451 			    sc->sc_dev.dv_xname, pmd->pmd_oid, oid));
1452 		pmd->pmd_oid = oid;
1453 	}
1454 	if (pmd->pmd_recvbuf != NULL) {
1455 		if (size > PGT_FRAG_SIZE) {
1456 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1457 				DPRINTF(("%s: mgmt oid %#x has bad size %u\n",
1458 				    sc->sc_dev.dv_xname, oid, size));
1459 			pmd->pmd_error = EIO;
1460 			goto out;
1461 		}
1462 		if (size > pmd->pmd_len)
1463 			pmd->pmd_error = ENOMEM;
1464 		else
1465 			memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf),
1466 			    size);
1467 		pmd->pmd_len = size;
1468 	}
1469 
1470 out:
1471 	TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1472 	wakeup_one(pmd);
1473 	if (sc->sc_debug & SC_DEBUG_MGMT)
1474 		DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n",
1475 		    sc->sc_dev.dv_xname, pmd, pmf->pmf_operation,
1476 		    pmd->pmd_oid, pmd->pmd_len));
1477 out_nopmd:
1478 	pgt_reinit_rx_desc_frag(sc, pd);
1479 }
1480 
1481 /*
1482  * Queue packets for reception and defragmentation.  I don't know now
1483  * whether the rx queue being full enough to start, but not finish,
1484  * queueing a fragmented packet, can happen.
1485  */
1486 struct mbuf *
1487 pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq)
1488 {
1489 	struct ifnet *ifp;
1490 	struct pgt_desc *pd;
1491 	struct mbuf *top, **mp, *m;
1492 	size_t datalen;
1493 	uint16_t morefrags, dataoff;
1494 	int tlen = 0;
1495 
1496 	ifp = &sc->sc_ic.ic_if;
1497 	m = NULL;
1498 	top = NULL;
1499 	mp = &top;
1500 
1501 	while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])) != NULL) {
1502 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1503 		sc->sc_dirtyq_count[pq]--;
1504 		datalen = letoh16(pd->pd_fragp->pf_size);
1505 		dataoff = letoh32(pd->pd_fragp->pf_addr) - pd->pd_dmaaddr;
1506 		morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF);
1507 
1508 		if (sc->sc_debug & SC_DEBUG_RXFRAG)
1509 			DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n",
1510 			    sc->sc_dev.dv_xname, datalen, dataoff,
1511 			    pd->pd_fragp->pf_flags));
1512 
1513 		/* Add the (two+?) bytes for the header. */
1514 		if (datalen + dataoff > PGT_FRAG_SIZE) {
1515 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1516 				DPRINTF(("%s data rx too big: %u\n",
1517 				    sc->sc_dev.dv_xname, datalen));
1518 			goto fail;
1519 		}
1520 
1521 		if (m == NULL)
1522 			MGETHDR(m, M_DONTWAIT, MT_DATA);
1523 		else
1524 			m = m_get(M_DONTWAIT, MT_DATA);
1525 
1526 		if (m == NULL)
1527 			goto fail;
1528 		if (datalen > MHLEN) {
1529 			MCLGET(m, M_DONTWAIT);
1530 			if (!(m->m_flags & M_EXT)) {
1531 				m_free(m);
1532 				goto fail;
1533 			}
1534 		}
1535 		bcopy(pd->pd_mem + dataoff, mtod(m, char *), datalen);
1536 		m->m_len = datalen;
1537 		tlen += datalen;
1538 
1539 		*mp = m;
1540 		mp = &m->m_next;
1541 
1542 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1543 		sc->sc_freeq_count[pq]++;
1544 		pgt_reinit_rx_desc_frag(sc, pd);
1545 
1546 		if (!morefrags)
1547 			break;
1548 	}
1549 
1550 	if (top) {
1551 		top->m_pkthdr.len = tlen;
1552 		top->m_pkthdr.rcvif = ifp;
1553 	}
1554 	return (top);
1555 
1556 fail:
1557 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1558 	sc->sc_freeq_count[pq]++;
1559 	pgt_reinit_rx_desc_frag(sc, pd);
1560 
1561 	ifp->if_ierrors++;
1562 	if (top)
1563 		m_freem(top);
1564 	return (NULL);
1565 }
1566 
1567 int
1568 pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid,
1569     void *arg, size_t arglen)
1570 {
1571 	struct pgt_mgmt_desc pmd;
1572 	int error;
1573 
1574 	bzero(&pmd, sizeof(pmd));
1575 	pmd.pmd_recvbuf = arg;
1576 	pmd.pmd_len = arglen;
1577 	pmd.pmd_oid = oid;
1578 
1579 	error = pgt_mgmt_request(sc, &pmd);
1580 	if (error == 0)
1581 		error = pmd.pmd_error;
1582 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1583 		DPRINTF(("%s: failure getting oid %#x: %d\n",
1584 		    sc->sc_dev.dv_xname, oid, error));
1585 
1586 	return (error);
1587 }
1588 
1589 int
1590 pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid,
1591     void *arg, size_t arglen)
1592 {
1593 	struct pgt_mgmt_desc pmd;
1594 	int error;
1595 
1596 	bzero(&pmd, sizeof(pmd));
1597 	pmd.pmd_sendbuf = arg;
1598 	pmd.pmd_recvbuf = arg;
1599 	pmd.pmd_len = arglen;
1600 	pmd.pmd_oid = oid;
1601 
1602 	error = pgt_mgmt_request(sc, &pmd);
1603 	if (error == 0)
1604 		error = pmd.pmd_error;
1605 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1606 		DPRINTF(("%s: failure retrieving oid %#x: %d\n",
1607 		    sc->sc_dev.dv_xname, oid, error));
1608 
1609 	return (error);
1610 }
1611 
1612 int
1613 pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid,
1614     const void *arg, size_t arglen)
1615 {
1616 	struct pgt_mgmt_desc pmd;
1617 	int error;
1618 
1619 	bzero(&pmd, sizeof(pmd));
1620 	pmd.pmd_sendbuf = arg;
1621 	pmd.pmd_len = arglen;
1622 	pmd.pmd_oid = oid;
1623 
1624 	error = pgt_mgmt_request(sc, &pmd);
1625 	if (error == 0)
1626 		error = pmd.pmd_error;
1627 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1628 		DPRINTF(("%s: failure setting oid %#x: %d\n",
1629 		    sc->sc_dev.dv_xname, oid, error));
1630 
1631 	return (error);
1632 }
1633 
1634 void
1635 pgt_state_dump(struct pgt_softc *sc)
1636 {
1637 	printf("%s: state dump: control 0x%08x interrupt 0x%08x\n",
1638 	    sc->sc_dev.dv_xname,
1639 	    pgt_read_4(sc, PGT_REG_CTRL_STAT),
1640 	    pgt_read_4(sc, PGT_REG_INT_STAT));
1641 
1642 	printf("%s: state dump: driver curfrag[]\n",
1643 	    sc->sc_dev.dv_xname);
1644 
1645 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1646 	    sc->sc_dev.dv_xname,
1647 	    letoh32(sc->sc_cb->pcb_driver_curfrag[0]),
1648 	    letoh32(sc->sc_cb->pcb_driver_curfrag[1]),
1649 	    letoh32(sc->sc_cb->pcb_driver_curfrag[2]),
1650 	    letoh32(sc->sc_cb->pcb_driver_curfrag[3]),
1651 	    letoh32(sc->sc_cb->pcb_driver_curfrag[4]),
1652 	    letoh32(sc->sc_cb->pcb_driver_curfrag[5]));
1653 
1654 	printf("%s: state dump: device curfrag[]\n",
1655 	    sc->sc_dev.dv_xname);
1656 
1657 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1658 	    sc->sc_dev.dv_xname,
1659 	    letoh32(sc->sc_cb->pcb_device_curfrag[0]),
1660 	    letoh32(sc->sc_cb->pcb_device_curfrag[1]),
1661 	    letoh32(sc->sc_cb->pcb_device_curfrag[2]),
1662 	    letoh32(sc->sc_cb->pcb_device_curfrag[3]),
1663 	    letoh32(sc->sc_cb->pcb_device_curfrag[4]),
1664 	    letoh32(sc->sc_cb->pcb_device_curfrag[5]));
1665 }
1666 
1667 int
1668 pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1669 {
1670 	struct pgt_desc *pd;
1671 	struct pgt_mgmt_frame *pmf;
1672 	int error, i;
1673 
1674 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1675 		return (EIO);
1676 	if (pmd->pmd_len > PGT_FRAG_SIZE - sizeof(*pmf))
1677 		return (ENOMEM);
1678 	pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX]);
1679 	if (pd == NULL)
1680 		return (ENOMEM);
1681 	error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd);
1682 	if (error)
1683 		return (error);
1684 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1685 	pmf->pmf_version = PMF_VER;
1686 	/* "get" and "retrieve" operations look the same */
1687 	if (pmd->pmd_recvbuf != NULL)
1688 		pmf->pmf_operation = PMF_OP_GET;
1689 	else
1690 		pmf->pmf_operation = PMF_OP_SET;
1691 	pmf->pmf_oid = htobe32(pmd->pmd_oid);
1692 	pmf->pmf_device = PMF_DEV;
1693 	pmf->pmf_flags = 0;
1694 	pmf->pmf_size = htobe32(pmd->pmd_len);
1695 	/* "set" and "retrieve" operations both send data */
1696 	if (pmd->pmd_sendbuf != NULL)
1697 		memcpy((char *)pmf + sizeof(*pmf), pmd->pmd_sendbuf,
1698 		    pmd->pmd_len);
1699 	else
1700 		bzero((char *)pmf + sizeof(*pmf), pmd->pmd_len);
1701 	pmd->pmd_error = EINPROGRESS;
1702 	TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link);
1703 	if (sc->sc_debug & SC_DEBUG_MGMT)
1704 		DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n",
1705 		    sc->sc_dev.dv_xname,
1706 		    pmd, pmf->pmf_operation,
1707 		    pmd->pmd_oid, pmd->pmd_len));
1708 	pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd,
1709 	    sizeof(*pmf) + pmd->pmd_len, 0);
1710 	/*
1711 	 * Try for one second, triggering 10 times.
1712 	 *
1713 	 * Do our best to work around seemingly buggy CardBus controllers
1714 	 * on Soekris 4521 that fail to get interrupts with alarming
1715 	 * regularity: run as if an interrupt occurred and service every
1716 	 * queue except for mbuf reception.
1717 	 */
1718 	i = 0;
1719 	do {
1720 		if (tsleep(pmd, 0, "pgtmgm", hz / 10) != EWOULDBLOCK)
1721 			break;
1722 		if (pmd->pmd_error != EINPROGRESS)
1723 			break;
1724 		if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) {
1725 			pmd->pmd_error = EIO;
1726 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1727 			break;
1728 		}
1729 		if (i != 9)
1730 			pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX);
1731 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
1732 		pgt_update_intr(sc, 0);
1733 #endif
1734 	} while (i++ < 10);
1735 
1736 	if (pmd->pmd_error == EINPROGRESS) {
1737 		printf("%s: timeout waiting for management "
1738 		    "packet response to %#x\n",
1739 		    sc->sc_dev.dv_xname, pmd->pmd_oid);
1740 		TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1741 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1742 			pgt_state_dump(sc);
1743 		pgt_async_reset(sc);
1744 		error = ETIMEDOUT;
1745 	} else
1746 		error = 0;
1747 
1748 	return (error);
1749 }
1750 
1751 void
1752 pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd,
1753     uint16_t len, int morecoming)
1754 {
1755 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1756 	sc->sc_freeq_count[pq]--;
1757 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1758 	sc->sc_dirtyq_count[pq]++;
1759 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1760 		DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname,
1761 		    pd->pd_fragnum, pq));
1762 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1763 	    sc->sc_cbdmam->dm_mapsize,
1764 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
1765 	if (morecoming)
1766 		pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF);
1767 	pd->pd_fragp->pf_size = htole16(len);
1768 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1769 	    pd->pd_dmam->dm_mapsize,
1770 	    BUS_DMASYNC_POSTWRITE);
1771 	sc->sc_cb->pcb_driver_curfrag[pq] =
1772 	    htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1);
1773 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1774 	    sc->sc_cbdmam->dm_mapsize,
1775 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
1776 	if (!morecoming)
1777 		pgt_maybe_trigger(sc, pq);
1778 }
1779 
1780 void
1781 pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq)
1782 {
1783 	unsigned int tries = 1000000 / PGT_WRITEIO_DELAY; /* one second */
1784 	uint32_t reg;
1785 
1786 	if (sc->sc_debug & SC_DEBUG_TRIGGER)
1787 		DPRINTF(("%s: triggered by queue [%u]\n",
1788 		    sc->sc_dev.dv_xname, pq));
1789 	pgt_debug_events(sc, "trig");
1790 	if (sc->sc_flags & SC_POWERSAVE) {
1791 		/* Magic values ahoy? */
1792 		if (pgt_read_4(sc, PGT_REG_INT_STAT) == 0xabadface) {
1793 			do {
1794 				reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1795 				if (!(reg & PGT_CTRL_STAT_SLEEPMODE))
1796 					DELAY(PGT_WRITEIO_DELAY);
1797 			} while (tries-- != 0);
1798 			if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) {
1799 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1800 					DPRINTF(("%s: timeout triggering from "
1801 					    "sleep mode\n",
1802 					    sc->sc_dev.dv_xname));
1803 				pgt_async_reset(sc);
1804 				return;
1805 			}
1806 		}
1807 		pgt_write_4_flush(sc, PGT_REG_DEV_INT,
1808 		    PGT_DEV_INT_WAKEUP);
1809 		DELAY(PGT_WRITEIO_DELAY);
1810 		/* read the status back in */
1811 		(void)pgt_read_4(sc, PGT_REG_CTRL_STAT);
1812 		DELAY(PGT_WRITEIO_DELAY);
1813 	} else {
1814 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1815 		DELAY(PGT_WRITEIO_DELAY);
1816 	}
1817 }
1818 
1819 struct ieee80211_node *
1820 pgt_ieee80211_node_alloc(struct ieee80211com *ic)
1821 {
1822 	struct pgt_ieee80211_node *pin;
1823 
1824 	pin = malloc(sizeof(*pin), M_DEVBUF, M_NOWAIT | M_ZERO);
1825 	if (pin != NULL) {
1826 		pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED;
1827 	}
1828 	return (struct ieee80211_node *)pin;
1829 }
1830 
1831 void
1832 pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni,
1833     int reallynew)
1834 {
1835 	ieee80211_ref_node(ni);
1836 }
1837 
1838 void
1839 pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1840 {
1841 	struct pgt_ieee80211_node *pin;
1842 
1843 	pin = (struct pgt_ieee80211_node *)ni;
1844 	free(pin, M_DEVBUF);
1845 }
1846 
1847 void
1848 pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst,
1849     const struct ieee80211_node *src)
1850 {
1851 	const struct pgt_ieee80211_node *psrc;
1852 	struct pgt_ieee80211_node *pdst;
1853 
1854 	psrc = (const struct pgt_ieee80211_node *)src;
1855 	pdst = (struct pgt_ieee80211_node *)dst;
1856 	bcopy(psrc, pdst, sizeof(*psrc));
1857 }
1858 
1859 int
1860 pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni,
1861     int type, int arg1, int arg2)
1862 {
1863 	return (EOPNOTSUPP);
1864 }
1865 
1866 int
1867 pgt_net_attach(struct pgt_softc *sc)
1868 {
1869 	struct ieee80211com *ic = &sc->sc_ic;
1870 	struct ifnet *ifp = &ic->ic_if;
1871 	struct ieee80211_rateset *rs;
1872 	uint8_t rates[IEEE80211_RATE_MAXSIZE];
1873 	struct pgt_obj_buffer psbuffer;
1874 	struct pgt_obj_frequencies *freqs;
1875 	uint32_t phymode, country;
1876 	unsigned int chan, i, j, firstchan = -1;
1877 	int error;
1878 
1879 	psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT);
1880 	psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr);
1881 	error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country));
1882 	if (error)
1883 		return (error);
1884 	error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode));
1885 	if (error)
1886 		return (error);
1887 	error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr,
1888 	    sizeof(ic->ic_myaddr));
1889 	if (error)
1890 		return (error);
1891 	error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country));
1892 	if (error)
1893 		return (error);
1894 
1895 	ifp->if_softc = sc;
1896 	ifp->if_init = pgt_init;
1897 	ifp->if_ioctl = pgt_ioctl;
1898 	ifp->if_start = pgt_start;
1899 	ifp->if_watchdog = pgt_watchdog;
1900 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
1901 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1902 
1903 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1904 	IFQ_SET_READY(&ifp->if_snd);
1905 
1906 	/*
1907 	 * Set channels
1908 	 *
1909 	 * Prism hardware likes to report supported frequencies that are
1910 	 * not actually available for the country of origin.
1911 	 */
1912 	j = sizeof(*freqs) + (IEEE80211_CHAN_MAX + 1) * sizeof(uint16_t);
1913 	freqs = malloc(j, M_DEVBUF, M_WAITOK);
1914 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j);
1915 	if (error) {
1916 		free(freqs, M_DEVBUF);
1917 		return (error);
1918 	}
1919 
1920 	for (i = 0, j = letoh16(freqs->pof_count); i < j; i++) {
1921 		chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i]),
1922 		    0);
1923 
1924 		if (chan > IEEE80211_CHAN_MAX) {
1925 			printf("%s: reported bogus channel (%uMHz)\n",
1926 			    sc->sc_dev.dv_xname, chan);
1927 			free(freqs, M_DEVBUF);
1928 			return (EIO);
1929 		}
1930 
1931 		if (letoh16(freqs->pof_freqlist_mhz[i]) < 5000) {
1932 			if (!(phymode & htole32(PGT_OID_PHY_2400MHZ)))
1933 				continue;
1934 			if (country == letoh32(PGT_COUNTRY_USA)) {
1935 				if (chan >= 12 && chan <= 14)
1936 					continue;
1937 			}
1938 			if (chan <= 14)
1939 				ic->ic_channels[chan].ic_flags |=
1940 				    IEEE80211_CHAN_B;
1941 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG;
1942 		} else {
1943 			if (!(phymode & htole32(PGT_OID_PHY_5000MHZ)))
1944 				continue;
1945 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A;
1946 		}
1947 
1948 		ic->ic_channels[chan].ic_freq =
1949 		    letoh16(freqs->pof_freqlist_mhz[i]);
1950 
1951 		if (firstchan == -1)
1952 			firstchan = chan;
1953 
1954 		DPRINTF(("%s: set channel %d to freq %uMHz\n",
1955 		    sc->sc_dev.dv_xname, chan,
1956 		    letoh16(freqs->pof_freqlist_mhz[i])));
1957 	}
1958 	free(freqs, M_DEVBUF);
1959 	if (firstchan == -1) {
1960 		printf("%s: no channels found\n", sc->sc_dev.dv_xname);
1961 		return (EIO);
1962 	}
1963 
1964 	/*
1965 	 * Set rates
1966 	 */
1967 	bzero(rates, sizeof(rates));
1968 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates));
1969 	if (error)
1970 		return (error);
1971 	for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) {
1972 		switch (rates[i]) {
1973 		case 2:
1974 		case 4:
1975 		case 11:
1976 		case 22:
1977 		case 44: /* maybe */
1978 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1979 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11B];
1980 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1981 			}
1982 		default:
1983 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1984 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
1985 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1986 			}
1987 			if (phymode & htole32(PGT_OID_PHY_5000MHZ)) {
1988 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
1989 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1990 			}
1991 			rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO];
1992 			rs->rs_rates[rs->rs_nrates++] = rates[i];
1993 		}
1994 	}
1995 
1996 	ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_PMGT | IEEE80211_C_TXPMGT |
1997 	    IEEE80211_C_SHSLOT | IEEE80211_C_SHPREAMBLE | IEEE80211_C_MONITOR;
1998 #ifndef IEEE80211_STA_ONLY
1999 	ic->ic_caps |= IEEE80211_C_IBSS | IEEE80211_C_HOSTAP;
2000 #endif
2001 	ic->ic_opmode = IEEE80211_M_STA;
2002 	ic->ic_state = IEEE80211_S_INIT;
2003 
2004 	if_attach(ifp);
2005 	ieee80211_ifattach(ifp);
2006 
2007 	/* setup post-attach/pre-lateattach vector functions */
2008 	sc->sc_newstate = ic->ic_newstate;
2009 	ic->ic_newstate = pgt_newstate;
2010 	ic->ic_node_alloc = pgt_ieee80211_node_alloc;
2011 	ic->ic_newassoc = pgt_ieee80211_newassoc;
2012 	ic->ic_node_free = pgt_ieee80211_node_free;
2013 	ic->ic_node_copy = pgt_ieee80211_node_copy;
2014 	ic->ic_send_mgmt = pgt_ieee80211_send_mgmt;
2015 	ic->ic_max_rssi = 255;	/* rssi is a u_int8_t */
2016 
2017 	/* let net80211 handle switching around the media + resetting */
2018 	ieee80211_media_init(ifp, pgt_media_change, pgt_media_status);
2019 
2020 #if NBPFILTER > 0
2021 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
2022 	    sizeof(struct ieee80211_frame) + 64);
2023 
2024 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
2025 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
2026 	sc->sc_rxtap.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT);
2027 
2028 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
2029 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
2030 	sc->sc_txtap.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT);
2031 #endif
2032 
2033 	/*
2034          * Enable shutdown and power hooks
2035          */
2036         sc->sc_shutdown_hook = shutdownhook_establish(pgt_shutdown, sc);
2037         if (sc->sc_shutdown_hook == NULL)
2038                 printf("%s: WARNING: unable to establish shutdown hook\n",
2039                     sc->sc_dev.dv_xname);
2040         sc->sc_power_hook = powerhook_establish(pgt_power, sc);
2041         if (sc->sc_power_hook == NULL)
2042                 printf("%s: WARNING: unable to establish power hook\n",
2043                     sc->sc_dev.dv_xname);
2044 
2045 	return (0);
2046 }
2047 
2048 int
2049 pgt_media_change(struct ifnet *ifp)
2050 {
2051 	struct pgt_softc *sc = ifp->if_softc;
2052 	int error;
2053 
2054         error = ieee80211_media_change(ifp);
2055         if (error == ENETRESET) {
2056                 pgt_update_hw_from_sw(sc, 0, 0);
2057                 error = 0;
2058         }
2059 
2060         return (error);
2061 }
2062 
2063 void
2064 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2065 {
2066 	struct pgt_softc *sc = ifp->if_softc;
2067 	struct ieee80211com *ic = &sc->sc_ic;
2068 	uint32_t rate;
2069 	int s;
2070 
2071 	imr->ifm_status = 0;
2072 	imr->ifm_active = IFM_IEEE80211 | IFM_NONE;
2073 
2074 	if (!(ifp->if_flags & IFF_UP))
2075 		return;
2076 
2077 	s = splnet();
2078 
2079 	if (ic->ic_fixed_rate != -1) {
2080 		rate = ic->ic_sup_rates[ic->ic_curmode].
2081 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
2082 	} else {
2083 		if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate)))
2084 			return;
2085 		rate = letoh32(rate);
2086 		if (sc->sc_debug & SC_DEBUG_LINK) {
2087 			DPRINTF(("%s: %s: link rate %u\n",
2088 			    sc->sc_dev.dv_xname, __func__, rate));
2089 		}
2090 		if (rate == 0)
2091 			return;
2092 	}
2093 
2094 	imr->ifm_status = IFM_AVALID;
2095 	imr->ifm_active = IFM_IEEE80211;
2096 	if (ic->ic_state == IEEE80211_S_RUN)
2097 		imr->ifm_status |= IFM_ACTIVE;
2098 
2099 	imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode);
2100 
2101 	switch (ic->ic_opmode) {
2102 	case IEEE80211_M_STA:
2103 		break;
2104 #ifndef IEEE80211_STA_ONLY
2105 	case IEEE80211_M_IBSS:
2106 		imr->ifm_active |= IFM_IEEE80211_ADHOC;
2107 		break;
2108 	case IEEE80211_M_AHDEMO:
2109 		imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
2110 		break;
2111 	case IEEE80211_M_HOSTAP:
2112 		imr->ifm_active |= IFM_IEEE80211_HOSTAP;
2113 		break;
2114 #endif
2115 	case IEEE80211_M_MONITOR:
2116 		imr->ifm_active |= IFM_IEEE80211_MONITOR;
2117 		break;
2118 	default:
2119 		break;
2120 	}
2121 
2122 	splx(s);
2123 }
2124 
2125 /*
2126  * Start data frames.  Critical sections surround the boundary of
2127  * management frame transmission / transmission acknowledgement / response
2128  * and data frame transmission / transmission acknowledgement.
2129  */
2130 void
2131 pgt_start(struct ifnet *ifp)
2132 {
2133 	struct pgt_softc *sc;
2134 	struct ieee80211com *ic;
2135 	struct pgt_desc *pd;
2136 	struct mbuf *m;
2137 	int error;
2138 
2139 	sc = ifp->if_softc;
2140 	ic = &sc->sc_ic;
2141 
2142 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET) ||
2143 	    !(ifp->if_flags & IFF_RUNNING) ||
2144 	    ic->ic_state != IEEE80211_S_RUN) {
2145 		return;
2146 	}
2147 
2148 	/*
2149 	 * Management packets should probably be MLME frames
2150 	 * (i.e. hostap "managed" mode); we don't touch the
2151 	 * net80211 management queue.
2152 	 */
2153 	for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] <
2154 	    PGT_QUEUE_FULL_THRESHOLD && !IFQ_IS_EMPTY(&ifp->if_snd);) {
2155 		pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX]);
2156 		IFQ_POLL(&ifp->if_snd, m);
2157 		if (m == NULL)
2158 			break;
2159 		if (m->m_pkthdr.len <= PGT_FRAG_SIZE) {
2160 			error = pgt_load_tx_desc_frag(sc,
2161 			    PGT_QUEUE_DATA_LOW_TX, pd);
2162 			if (error)
2163 				break;
2164 			IFQ_DEQUEUE(&ifp->if_snd, m);
2165 			m_copydata(m, 0, m->m_pkthdr.len, pd->pd_mem);
2166 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2167 			    pd, m->m_pkthdr.len, 0);
2168 		} else if (m->m_pkthdr.len <= PGT_FRAG_SIZE * 2) {
2169 			struct pgt_desc *pd2;
2170 
2171 			/*
2172 			 * Transmit a fragmented frame if there is
2173 			 * not enough room in one fragment; limit
2174 			 * to two fragments (802.11 itself couldn't
2175 			 * even support a full two.)
2176 			 */
2177 			if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 >
2178 			    PGT_QUEUE_FULL_THRESHOLD)
2179 				break;
2180 			pd2 = TAILQ_NEXT(pd, pd_link);
2181 			error = pgt_load_tx_desc_frag(sc,
2182 			    PGT_QUEUE_DATA_LOW_TX, pd);
2183 			if (error == 0) {
2184 				error = pgt_load_tx_desc_frag(sc,
2185 				    PGT_QUEUE_DATA_LOW_TX, pd2);
2186 				if (error) {
2187 					pgt_unload_tx_desc_frag(sc, pd);
2188 					TAILQ_INSERT_HEAD(&sc->sc_freeq[
2189 					    PGT_QUEUE_DATA_LOW_TX], pd,
2190 					    pd_link);
2191 				}
2192 			}
2193 			if (error)
2194 				break;
2195 			IFQ_DEQUEUE(&ifp->if_snd, m);
2196 			m_copydata(m, 0, PGT_FRAG_SIZE, pd->pd_mem);
2197 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2198 			    pd, PGT_FRAG_SIZE, 1);
2199 			m_copydata(m, PGT_FRAG_SIZE,
2200 			    m->m_pkthdr.len - PGT_FRAG_SIZE, pd2->pd_mem);
2201 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2202 			    pd2, m->m_pkthdr.len - PGT_FRAG_SIZE, 0);
2203 		} else {
2204 			IFQ_DEQUEUE(&ifp->if_snd, m);
2205 			ifp->if_oerrors++;
2206 			m_freem(m);
2207 			m = NULL;
2208 		}
2209 		if (m != NULL) {
2210 			struct ieee80211_node *ni;
2211 #if NBPFILTER > 0
2212 			if (ifp->if_bpf != NULL)
2213 				bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2214 #endif
2215 			ifp->if_opackets++;
2216 			ifp->if_timer = 1;
2217 			sc->sc_txtimer = 5;
2218 			ni = ieee80211_find_txnode(&sc->sc_ic,
2219 			    mtod(m, struct ether_header *)->ether_dhost);
2220 			if (ni != NULL) {
2221 				ni->ni_inact = 0;
2222 				if (ni != ic->ic_bss)
2223 					ieee80211_release_node(&sc->sc_ic, ni);
2224 			}
2225 #if NBPFILTER > 0
2226 			if (sc->sc_drvbpf != NULL) {
2227 				struct mbuf mb;
2228 				struct ether_header eh;
2229 				struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtap;
2230 
2231 				bcopy(mtod(m, struct ether_header *), &eh,
2232 				    sizeof(eh));
2233 				m_adj(m, sizeof(eh));
2234 				m = pgt_ieee80211_encap(sc, &eh, m, NULL);
2235 
2236 				tap->wt_flags = 0;
2237 				//tap->wt_rate = rate;
2238 				tap->wt_rate = 0;
2239 				tap->wt_chan_freq =
2240 				    htole16(ic->ic_bss->ni_chan->ic_freq);
2241 				tap->wt_chan_flags =
2242 				    htole16(ic->ic_bss->ni_chan->ic_flags);
2243 
2244 				if (m != NULL) {
2245 					mb.m_data = (caddr_t)tap;
2246 					mb.m_len = sc->sc_txtap_len;
2247 					mb.m_next = m;
2248 					mb.m_nextpkt = NULL;
2249 					mb.m_type = 0;
2250 					mb.m_flags = 0;
2251 
2252 					bpf_mtap(sc->sc_drvbpf, &mb,
2253 					    BPF_DIRECTION_OUT);
2254 				}
2255 			}
2256 #endif
2257 			if (m != NULL)
2258 				m_freem(m);
2259 		}
2260 	}
2261 }
2262 
2263 int
2264 pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req)
2265 {
2266 	struct pgt_softc *sc = ifp->if_softc;
2267 	struct ifaddr *ifa;
2268 	struct ifreq *ifr;
2269 	struct wi_req *wreq;
2270 	struct ieee80211_nodereq_all *na;
2271 	struct ieee80211com *ic;
2272         struct pgt_obj_bsslist *pob;
2273         struct wi_scan_p2_hdr *p2hdr;
2274         struct wi_scan_res *res;
2275         uint32_t noise;
2276 	int maxscan, i, j, s, error = 0;
2277 
2278 	ic = &sc->sc_ic;
2279 	ifr = (struct ifreq *)req;
2280 
2281 	s = splnet();
2282 	switch (cmd) {
2283 	case SIOCS80211SCAN:
2284 		/*
2285 		 * This chip scans always as soon as it gets initialized.
2286 		 */
2287 
2288 		/*
2289 		 * Give us a bit time to scan in case we were not
2290 		 * initialized before and let the userland process wait.
2291 		 */
2292 		tsleep(&sc->sc_flags, 0, "pgtsca", hz * SCAN_TIMEOUT);
2293 
2294 		break;
2295 	case SIOCG80211ALLNODES: {
2296 		struct ieee80211_nodereq *nr = NULL;
2297 		na = (struct ieee80211_nodereq_all *)req;
2298 		wreq = malloc(sizeof(*wreq), M_DEVBUF, M_WAITOK | M_ZERO);
2299 
2300 		maxscan = PGT_OBJ_BSSLIST_NBSS;
2301 		pob = malloc(sizeof(*pob) +
2302 		    sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF, M_WAITOK);
2303 		error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise,
2304 		    sizeof(noise));
2305 
2306 		if (error == 0) {
2307 			noise = letoh32(noise);
2308 			error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob,
2309 			    sizeof(*pob) +
2310 			    sizeof(struct pgt_obj_bss) * maxscan);
2311 		}
2312 
2313 		if (error == 0) {
2314 			maxscan = min(PGT_OBJ_BSSLIST_NBSS,
2315 			    letoh32(pob->pob_count));
2316 			maxscan = min(maxscan,
2317 			    (sizeof(wreq->wi_val) - sizeof(*p2hdr)) /
2318 			    WI_PRISM2_RES_SIZE);
2319 			p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val;
2320 			p2hdr->wi_rsvd = 0;
2321 			p2hdr->wi_reason = 1;
2322 			wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE) / 2 +
2323 			    sizeof(*p2hdr) / 2;
2324 			wreq->wi_type = WI_RID_SCAN_RES;
2325 		}
2326 
2327 		for (na->na_nodes = j = i = 0; i < maxscan &&
2328 		    (na->na_size >= j + sizeof(struct ieee80211_nodereq));
2329 		    i++) {
2330 			/* allocate node space */
2331 			if (nr == NULL)
2332 				nr = malloc(sizeof(*nr), M_DEVBUF, M_WAITOK);
2333 
2334 			/* get next BSS scan result */
2335 			res = (struct wi_scan_res *)
2336 			    ((char *)&wreq->wi_val + sizeof(*p2hdr) +
2337 			    i * WI_PRISM2_RES_SIZE);
2338 			pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i],
2339 			    res, noise);
2340 
2341 			/* copy it to node structure for ifconfig to read */
2342 			bzero(nr, sizeof(*nr));
2343 			IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid);
2344 			IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid);
2345 			nr->nr_channel = letoh16(res->wi_chan);
2346 			nr->nr_chan_flags = IEEE80211_CHAN_B;
2347 			nr->nr_rssi = letoh16(res->wi_signal);
2348 			nr->nr_max_rssi = 0; /* XXX */
2349 			nr->nr_nwid_len = letoh16(res->wi_ssid_len);
2350 			bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len);
2351 			nr->nr_intval = letoh16(res->wi_interval);
2352 			nr->nr_capinfo = letoh16(res->wi_capinfo);
2353 			nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M ? 2 :
2354 			    (res->wi_rate == WI_WAVELAN_RES_2M ? 4 :
2355 			    (res->wi_rate == WI_WAVELAN_RES_5M ? 11 :
2356 			    (res->wi_rate == WI_WAVELAN_RES_11M ? 22 : 0)));
2357 			nr->nr_nrates = 0;
2358 			while (res->wi_srates[nr->nr_nrates] != 0) {
2359 				nr->nr_rates[nr->nr_nrates] =
2360 				    res->wi_srates[nr->nr_nrates] &
2361 				    WI_VAR_SRATES_MASK;
2362 				nr->nr_nrates++;
2363 			}
2364 			nr->nr_flags = 0;
2365 			if (bcmp(nr->nr_macaddr, nr->nr_bssid,
2366 			    IEEE80211_ADDR_LEN) == 0)
2367 				nr->nr_flags |= IEEE80211_NODEREQ_AP;
2368 			error = copyout(nr, (caddr_t)na->na_node + j,
2369 			    sizeof(struct ieee80211_nodereq));
2370 			if (error)
2371 				break;
2372 
2373 			/* point to next node entry */
2374 			j += sizeof(struct ieee80211_nodereq);
2375 			na->na_nodes++;
2376 		}
2377 		if (nr)
2378 			free(nr, M_DEVBUF);
2379 		free(pob, M_DEVBUF);
2380 		break;
2381 	}
2382 	case SIOCSIFADDR:
2383 		ifa = (struct ifaddr *)req;
2384 		ifp->if_flags |= IFF_UP;
2385 #ifdef INET
2386 		if (ifa->ifa_addr->sa_family == AF_INET)
2387 			 arp_ifinit(&sc->sc_ic.ic_ac, ifa);
2388 #endif
2389 		/* FALLTHROUGH */
2390 	case SIOCSIFFLAGS:
2391 		if (ifp->if_flags & IFF_UP) {
2392 			if ((ifp->if_flags & IFF_RUNNING) == 0) {
2393 				pgt_init(ifp);
2394 				error = ENETRESET;
2395 			}
2396 		} else {
2397 			if (ifp->if_flags & IFF_RUNNING) {
2398 				pgt_stop(sc, SC_NEEDS_RESET);
2399 				error = ENETRESET;
2400 			}
2401 		}
2402 		break;
2403 	case SIOCADDMULTI:
2404 	case SIOCDELMULTI:
2405 		error = (cmd == SIOCADDMULTI) ?
2406 		    ether_addmulti(ifr, &ic->ic_ac) :
2407 		    ether_delmulti(ifr, &ic->ic_ac);
2408 
2409 		if (error == ENETRESET)
2410 			error = 0;
2411 		break;
2412 	case SIOCSIFMTU:
2413 		if (ifr->ifr_mtu > PGT_FRAG_SIZE) {
2414 			error = EINVAL;
2415 			break;
2416 		}
2417 		/* FALLTHROUGH */
2418 	default:
2419 		error = ieee80211_ioctl(ifp, cmd, req);
2420 		break;
2421 	}
2422 
2423 	if (error == ENETRESET) {
2424 		pgt_update_hw_from_sw(sc, 0, 0);
2425 		error = 0;
2426 	}
2427 	splx(s);
2428 
2429 	return (error);
2430 }
2431 
2432 void
2433 pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob,
2434     struct wi_scan_res *scanres, uint32_t noise)
2435 {
2436 	struct ieee80211_rateset *rs;
2437 	struct wi_scan_res ap;
2438 	unsigned int i, n;
2439 
2440 	rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO];
2441 	bzero(&ap, sizeof(ap));
2442 	ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel), 0);
2443 	ap.wi_noise = noise;
2444 	ap.wi_signal = letoh16(pob->pob_rssi);
2445 	IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address);
2446 	ap.wi_interval = letoh16(pob->pob_beacon_period);
2447 	ap.wi_capinfo = letoh16(pob->pob_capinfo);
2448 	ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length);
2449 	memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len);
2450 	n = 0;
2451 	for (i = 0; i < 16; i++) {
2452 		if (letoh16(pob->pob_rates) & (1 << i)) {
2453 			if (i > rs->rs_nrates)
2454 				break;
2455 			ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i];
2456 			if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0]))
2457 				break;
2458 		}
2459 	}
2460 	memcpy(scanres, &ap, WI_PRISM2_RES_SIZE);
2461 }
2462 
2463 void
2464 node_mark_active_ap(void *arg, struct ieee80211_node *ni)
2465 {
2466 	/*
2467 	 * HostAP mode lets all nodes stick around unless
2468 	 * the firmware AP kicks them off.
2469 	 */
2470 	ni->ni_inact = 0;
2471 }
2472 
2473 void
2474 node_mark_active_adhoc(void *arg, struct ieee80211_node *ni)
2475 {
2476 	struct pgt_ieee80211_node *pin;
2477 
2478 	/*
2479 	 * As there is no association in ad-hoc, we let links just
2480 	 * time out naturally as long they are not holding any private
2481 	 * configuration, such as 802.1x authorization.
2482 	 */
2483 	pin = (struct pgt_ieee80211_node *)ni;
2484 	if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED)
2485 		pin->pin_node.ni_inact = 0;
2486 }
2487 
2488 void
2489 pgt_watchdog(struct ifnet *ifp)
2490 {
2491 	struct pgt_softc *sc;
2492 
2493 	sc = ifp->if_softc;
2494 	/*
2495 	 * Check for timed out transmissions (and make sure to set
2496 	 * this watchdog to fire again if there is still data in the
2497 	 * output device queue).
2498 	 */
2499 	if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) {
2500 		int count;
2501 
2502 		ifp->if_timer = 1;
2503 		if (sc->sc_txtimer && --sc->sc_txtimer == 0) {
2504 			count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
2505 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
2506 				DPRINTF(("%s: timeout %d data transmissions\n",
2507 				    sc->sc_dev.dv_xname, count));
2508 		}
2509 	}
2510 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
2511 		return;
2512 	/*
2513 	 * If we're goign to kick the device out of power-save mode
2514 	 * just to update the BSSID and such, we should not do it
2515 	 * very often; need to determine in what way to do that.
2516 	 */
2517 	if (ifp->if_flags & IFF_RUNNING &&
2518 	    sc->sc_ic.ic_state != IEEE80211_S_INIT &&
2519 	    sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
2520 		pgt_async_update(sc);
2521 
2522 #ifndef IEEE80211_STA_ONLY
2523 	/*
2524 	 * As a firmware-based HostAP, we should not time out
2525 	 * nodes inside the driver additionally to the timeout
2526 	 * that exists in the firmware.  The only things we
2527 	 * should have to deal with timing out when doing HostAP
2528 	 * are the privacy-related.
2529 	 */
2530 	switch (sc->sc_ic.ic_opmode) {
2531 	case IEEE80211_M_HOSTAP:
2532 		ieee80211_iterate_nodes(&sc->sc_ic,
2533 		    node_mark_active_ap, NULL);
2534 		break;
2535 	case IEEE80211_M_IBSS:
2536 		ieee80211_iterate_nodes(&sc->sc_ic,
2537 		    node_mark_active_adhoc, NULL);
2538 		break;
2539 	default:
2540 		break;
2541 	}
2542 #endif
2543 	ieee80211_watchdog(ifp);
2544 	ifp->if_timer = 1;
2545 }
2546 
2547 int
2548 pgt_init(struct ifnet *ifp)
2549 {
2550 	struct pgt_softc *sc = ifp->if_softc;
2551 	struct ieee80211com *ic = &sc->sc_ic;
2552 
2553 	/* set default channel */
2554 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
2555 
2556 	if (!(sc->sc_flags & (SC_DYING | SC_UNINITIALIZED)))
2557 		pgt_update_hw_from_sw(sc,
2558 		    ic->ic_state != IEEE80211_S_INIT,
2559 		    ic->ic_opmode != IEEE80211_M_MONITOR);
2560 
2561 	ifp->if_flags |= IFF_RUNNING;
2562 	ifp->if_flags &= ~IFF_OACTIVE;
2563 
2564 	/* Begin background scanning */
2565 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1);
2566 
2567 	return (0);
2568 }
2569 
2570 /*
2571  * After most every configuration change, everything needs to be fully
2572  * reinitialized.  For some operations (currently, WEP settings
2573  * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove
2574  * "associations," and allows EAP authorization to occur again.
2575  * If keepassoc is specified, the reset operation should try to go
2576  * back to the BSS had before.
2577  */
2578 void
2579 pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc, int keepnodes)
2580 {
2581 	struct ieee80211com *ic = &sc->sc_ic;
2582 	struct arpcom *ac = &ic->ic_ac;
2583 	struct ifnet *ifp = &ac->ac_if;
2584 	struct pgt_obj_key keyobj;
2585 	struct pgt_obj_ssid essid;
2586 	uint8_t availrates[IEEE80211_RATE_MAXSIZE + 1];
2587 	uint32_t mode, bsstype, config, profile, channel, slot, preamble;
2588 	uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme;
2589 	unsigned int i;
2590 	int success, shouldbeup, s;
2591 
2592 	config = PGT_CONFIG_MANUAL_RUN | PGT_CONFIG_RX_ANNEX;
2593 
2594 	/*
2595 	 * Promiscuous mode is currently a no-op since packets transmitted,
2596 	 * while in promiscuous mode, don't ever seem to go anywhere.
2597 	 */
2598 	shouldbeup = ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_UP;
2599 
2600 	if (shouldbeup) {
2601 		switch (ic->ic_opmode) {
2602 		case IEEE80211_M_STA:
2603 			if (ifp->if_flags & IFF_PROMISC)
2604 				mode = PGT_MODE_CLIENT;	/* what to do? */
2605 			else
2606 				mode = PGT_MODE_CLIENT;
2607 			bsstype = PGT_BSS_TYPE_STA;
2608 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2609 			break;
2610 #ifndef IEEE80211_STA_ONLY
2611 		case IEEE80211_M_IBSS:
2612 			if (ifp->if_flags & IFF_PROMISC)
2613 				mode = PGT_MODE_CLIENT;	/* what to do? */
2614 			else
2615 				mode = PGT_MODE_CLIENT;
2616 			bsstype = PGT_BSS_TYPE_IBSS;
2617 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2618 			break;
2619 		case IEEE80211_M_HOSTAP:
2620 			mode = PGT_MODE_AP;
2621 			bsstype = PGT_BSS_TYPE_STA;
2622 			/*
2623 			 * For IEEE 802.1x, we need to authenticate and
2624 			 * authorize hosts from here on or they remain
2625 			 * associated but without the ability to send or
2626 			 * receive normal traffic to us (courtesy the
2627 			 * firmware AP implementation).
2628 			 */
2629 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2630 			/*
2631 			 * WDS mode needs several things to work:
2632 			 * discovery of exactly how creating the WDS
2633 			 * links is meant to function, an interface
2634 			 * for this, and ability to encode or decode
2635 			 * the WDS frames.
2636 			 */
2637 			if (sc->sc_wds)
2638 				config |= PGT_CONFIG_WDS;
2639 			break;
2640 #endif
2641 		case IEEE80211_M_MONITOR:
2642 			mode = PGT_MODE_PROMISCUOUS;
2643 			bsstype = PGT_BSS_TYPE_ANY;
2644 			dot1x = PGT_DOT1X_AUTH_NONE;
2645 			break;
2646 		default:
2647 			goto badopmode;
2648 		}
2649 	} else {
2650 badopmode:
2651 		mode = PGT_MODE_CLIENT;
2652 		bsstype = PGT_BSS_TYPE_NONE;
2653 	}
2654 
2655 	DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname));
2656 	switch (ic->ic_curmode) {
2657 	case IEEE80211_MODE_11A:
2658 		profile = PGT_PROFILE_A_ONLY;
2659 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2660 		DPRINTF(("IEEE80211_MODE_11A\n"));
2661 		break;
2662 	case IEEE80211_MODE_11B:
2663 		profile = PGT_PROFILE_B_ONLY;
2664 		preamble = PGT_OID_PREAMBLE_MODE_LONG;
2665 		DPRINTF(("IEEE80211_MODE_11B\n"));
2666 		break;
2667 	case IEEE80211_MODE_11G:
2668 		profile = PGT_PROFILE_G_ONLY;
2669 		preamble = PGT_OID_PREAMBLE_MODE_SHORT;
2670 		DPRINTF(("IEEE80211_MODE_11G\n"));
2671 		break;
2672 	case IEEE80211_MODE_TURBO: /* not handled */
2673 		/* FALLTHROUGH */
2674 	case IEEE80211_MODE_AUTO:
2675 		profile = PGT_PROFILE_MIXED_G_WIFI;
2676 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2677 		DPRINTF(("IEEE80211_MODE_AUTO\n"));
2678 		break;
2679 	default:
2680 		panic("unknown mode %d\n", ic->ic_curmode);
2681 	}
2682 
2683 	switch (sc->sc_80211_ioc_auth) {
2684 	case IEEE80211_AUTH_NONE:
2685 		auth = PGT_AUTH_MODE_NONE;
2686 		break;
2687 	case IEEE80211_AUTH_OPEN:
2688 		auth = PGT_AUTH_MODE_OPEN;
2689 		break;
2690 	default:
2691 		auth = PGT_AUTH_MODE_SHARED;
2692 		break;
2693 	}
2694 
2695 	if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON) {
2696 		wep = 1;
2697 		exunencrypted = 1;
2698 	} else {
2699 		wep = 0;
2700 		exunencrypted = 0;
2701 	}
2702 
2703 	mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO);
2704 	wep = htole32(wep);
2705 	exunencrypted = htole32(exunencrypted);
2706 	profile = htole32(profile);
2707 	preamble = htole32(preamble);
2708 	bsstype = htole32(bsstype);
2709 	config = htole32(config);
2710 	mode = htole32(mode);
2711 
2712 	if (!wep || !sc->sc_dot1x)
2713 		dot1x = PGT_DOT1X_AUTH_NONE;
2714 	dot1x = htole32(dot1x);
2715 	auth = htole32(auth);
2716 
2717 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
2718 		slot = htole32(PGT_OID_SLOT_MODE_SHORT);
2719 	else
2720 		slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC);
2721 
2722 	if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) {
2723 		if (keepassoc)
2724 			channel = 0;
2725 		else
2726 			channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
2727 	} else
2728 		channel = ieee80211_chan2ieee(ic, ic->ic_des_chan);
2729 
2730 	DPRINTF(("%s: set rates", sc->sc_dev.dv_xname));
2731 	for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) {
2732 		availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i];
2733 		DPRINTF((" %d", availrates[i]));
2734 	}
2735 	DPRINTF(("\n"));
2736 	availrates[i++] = 0;
2737 
2738 	essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid));
2739 	memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length);
2740 
2741 	s = splnet();
2742 	for (success = 0; success == 0; success = 1) {
2743 		SETOID(PGT_OID_PROFILE, &profile, sizeof(profile));
2744 		SETOID(PGT_OID_CONFIG, &config, sizeof(config));
2745 		SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme));
2746 
2747 		if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)) {
2748 			SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr,
2749 			    sizeof(ac->ac_enaddr));
2750 			IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr);
2751 		}
2752 
2753 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2754 		SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype));
2755 
2756 		if (channel != 0 && channel != IEEE80211_CHAN_ANY)
2757 			SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel));
2758 
2759 		if (ic->ic_flags & IEEE80211_F_DESBSSID) {
2760 			SETOID(PGT_OID_BSSID, ic->ic_des_bssid,
2761 			    sizeof(ic->ic_des_bssid));
2762 		} else if (keepassoc) {
2763 			SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2764 			    sizeof(ic->ic_bss->ni_bssid));
2765 		}
2766 
2767 		SETOID(PGT_OID_SSID, &essid, sizeof(essid));
2768 
2769 		if (ic->ic_des_esslen > 0)
2770 			SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid));
2771 
2772 		SETOID(PGT_OID_RATES, &availrates, i);
2773 		SETOID(PGT_OID_EXTENDED_RATES, &availrates, i);
2774 		SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble));
2775 		SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot));
2776 		SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth));
2777 		SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted,
2778 		    sizeof(exunencrypted));
2779 		SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x));
2780 		SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep));
2781 		/*
2782 		 * Setting WEP key(s)
2783 		 */
2784 		if (letoh32(wep) != 0) {
2785 			keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP;
2786 			/* key 1 */
2787 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2788 			    IEEE80211_KEYBUF_SIZE);
2789 			keyobj.pok_length = min(keyobj.pok_length,
2790 			    ic->ic_nw_keys[0].k_len);
2791 			bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key,
2792 			    keyobj.pok_length);
2793 			SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj));
2794 			/* key 2 */
2795 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2796 			    IEEE80211_KEYBUF_SIZE);
2797 			keyobj.pok_length = min(keyobj.pok_length,
2798 			    ic->ic_nw_keys[1].k_len);
2799 			bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key,
2800 			    keyobj.pok_length);
2801 			SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj));
2802 			/* key 3 */
2803 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2804 			    IEEE80211_KEYBUF_SIZE);
2805 			keyobj.pok_length = min(keyobj.pok_length,
2806 			    ic->ic_nw_keys[2].k_len);
2807 			bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key,
2808 			    keyobj.pok_length);
2809 			SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj));
2810 			/* key 4 */
2811 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2812 			    IEEE80211_KEYBUF_SIZE);
2813 			keyobj.pok_length = min(keyobj.pok_length,
2814 			    ic->ic_nw_keys[3].k_len);
2815 			bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key,
2816 			    keyobj.pok_length);
2817 			SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj));
2818 
2819 			wepkey = htole32(ic->ic_wep_txkey);
2820 			SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey));
2821 		}
2822 		/* set mode again to commit */
2823 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2824 	}
2825 	splx(s);
2826 
2827 	if (success) {
2828 		if (shouldbeup && keepnodes)
2829 			sc->sc_flags |= SC_NOFREE_ALLNODES;
2830 		if (shouldbeup)
2831 			ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2832 		else
2833 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2834 	} else {
2835 		printf("%s: problem setting modes\n", sc->sc_dev.dv_xname);
2836 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2837 	}
2838 }
2839 
2840 void
2841 pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid,
2842     struct pgt_obj_mlme *mlme)
2843 {
2844 	struct ieee80211com *ic = &sc->sc_ic;
2845 	struct pgt_ieee80211_node *pin;
2846 	struct ieee80211_node *ni;
2847 
2848 	ni = ieee80211_find_node(ic, mlme->pom_address);
2849 	pin = (struct pgt_ieee80211_node *)ni;
2850 	switch (oid) {
2851 	case PGT_OID_DISASSOCIATE:
2852 		if (ni != NULL)
2853 			ieee80211_release_node(&sc->sc_ic, ni);
2854 		break;
2855 	case PGT_OID_ASSOCIATE:
2856 		if (ni == NULL) {
2857 			ni = ieee80211_dup_bss(ic, mlme->pom_address);
2858 			if (ni == NULL)
2859 				break;
2860 			ic->ic_newassoc(ic, ni, 1);
2861 			pin = (struct pgt_ieee80211_node *)ni;
2862 		}
2863 		ni->ni_associd = letoh16(mlme->pom_id);
2864 		pin->pin_mlme_state = letoh16(mlme->pom_state);
2865 		break;
2866 	default:
2867 		if (pin != NULL)
2868 			pin->pin_mlme_state = letoh16(mlme->pom_state);
2869 		break;
2870 	}
2871 }
2872 
2873 /*
2874  * Either in response to an event or after a certain amount of time,
2875  * synchronize our idea of the network we're part of from the hardware.
2876  */
2877 void
2878 pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa,
2879 	    struct mbuf *args)
2880 {
2881 	struct ieee80211com *ic = &sc->sc_ic;
2882 	struct pgt_obj_ssid ssid;
2883 	struct pgt_obj_bss bss;
2884 	uint32_t channel, noise, ls;
2885 	int error, s;
2886 
2887 	if (pa != NULL) {
2888 		struct pgt_obj_mlme *mlme;
2889 		uint32_t oid;
2890 
2891 		oid = *mtod(args, uint32_t *);
2892 		m_adj(args, sizeof(uint32_t));
2893 		if (sc->sc_debug & SC_DEBUG_TRAP)
2894 			DPRINTF(("%s: trap: oid %#x len %u\n",
2895 			    sc->sc_dev.dv_xname, oid, args->m_len));
2896 		switch (oid) {
2897 		case PGT_OID_LINK_STATE:
2898 			if (args->m_len < sizeof(uint32_t))
2899 				break;
2900 			ls = letoh32(*mtod(args, uint32_t *));
2901 			if (sc->sc_debug & (SC_DEBUG_TRAP | SC_DEBUG_LINK))
2902 				DPRINTF(("%s: %s: link rate %u\n",
2903 				    sc->sc_dev.dv_xname, __func__, ls));
2904 			if (ls)
2905 				ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2906 			else
2907 				ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2908 			goto gotlinkstate;
2909 		case PGT_OID_DEAUTHENTICATE:
2910 		case PGT_OID_AUTHENTICATE:
2911 		case PGT_OID_DISASSOCIATE:
2912 		case PGT_OID_ASSOCIATE:
2913 			if (args->m_len < sizeof(struct pgt_obj_mlme))
2914 				break;
2915 			mlme = mtod(args, struct pgt_obj_mlme *);
2916 			if (sc->sc_debug & SC_DEBUG_TRAP)
2917 				DPRINTF(("%s: mlme: address "
2918 				    "%s id 0x%02x state 0x%02x code 0x%02x\n",
2919 				    sc->sc_dev.dv_xname,
2920 				    ether_sprintf(mlme->pom_address),
2921 				    letoh16(mlme->pom_id),
2922 				    letoh16(mlme->pom_state),
2923 				    letoh16(mlme->pom_code)));
2924 #ifndef IEEE80211_STA_ONLY
2925 			if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2926 				pgt_hostap_handle_mlme(sc, oid, mlme);
2927 #endif
2928 			break;
2929 		}
2930 		return;
2931 	}
2932 	if (ic->ic_state == IEEE80211_S_SCAN) {
2933 		s = splnet();
2934 		error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls));
2935 		splx(s);
2936 		if (error)
2937 			return;
2938 		DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname,
2939 		    htole32(ls)));
2940 		if (ls != 0)
2941 			ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2942 	}
2943 
2944 gotlinkstate:
2945 	s = splnet();
2946 	if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0)
2947 		goto out;
2948 	sc->sc_noise = letoh32(noise);
2949 	if (ic->ic_state == IEEE80211_S_RUN) {
2950 		if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel,
2951 		    sizeof(channel)) != 0)
2952 			goto out;
2953 		channel = min(letoh32(channel), IEEE80211_CHAN_MAX);
2954 		ic->ic_bss->ni_chan = &ic->ic_channels[channel];
2955 		if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2956 		    sizeof(ic->ic_bss->ni_bssid)) != 0)
2957 			goto out;
2958 		IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid);
2959 		error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss,
2960 		    sizeof(bss));
2961 		if (error == 0)
2962 			ic->ic_bss->ni_rssi = bss.pob_rssi;
2963 		else if (error != EPERM)
2964 			goto out;
2965 		error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid));
2966 		if (error)
2967 			goto out;
2968 		ic->ic_bss->ni_esslen = min(ssid.pos_length,
2969 		    sizeof(ic->ic_bss->ni_essid));
2970 		memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid,
2971 		    ssid.pos_length);
2972 	}
2973 
2974 out:
2975 	splx(s);
2976 }
2977 
2978 int
2979 pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2980 {
2981 	struct pgt_softc *sc = ic->ic_if.if_softc;
2982 	enum ieee80211_state ostate;
2983 
2984 	ostate = ic->ic_state;
2985 
2986 	DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname,
2987 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]));
2988 
2989 	switch (nstate) {
2990 	case IEEE80211_S_INIT:
2991 		if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0)
2992 			ic->ic_if.if_timer = 0;
2993 		ic->ic_mgt_timer = 0;
2994 		ic->ic_flags &= ~IEEE80211_F_SIBSS;
2995 		ieee80211_free_allnodes(ic);
2996 		break;
2997 	case IEEE80211_S_SCAN:
2998 		ic->ic_if.if_timer = 1;
2999 		ic->ic_mgt_timer = 0;
3000 		if (sc->sc_flags & SC_NOFREE_ALLNODES)
3001 			sc->sc_flags &= ~SC_NOFREE_ALLNODES;
3002 		else
3003 			ieee80211_free_allnodes(ic);
3004 
3005 #ifndef IEEE80211_STA_ONLY
3006 		/* Just use any old channel; we override it anyway. */
3007 		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
3008 			ieee80211_create_ibss(ic, ic->ic_ibss_chan);
3009 #endif
3010 		break;
3011 	case IEEE80211_S_RUN:
3012 		ic->ic_if.if_timer = 1;
3013 		break;
3014 	default:
3015 		break;
3016 	}
3017 
3018 	return (sc->sc_newstate(ic, nstate, arg));
3019 }
3020 
3021 int
3022 pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq)
3023 {
3024 	int wokeup = 0;
3025 
3026 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
3027 	    sc->sc_cbdmam->dm_mapsize,
3028 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
3029 	sc->sc_cb->pcb_device_curfrag[pq] =
3030 	    sc->sc_cb->pcb_driver_curfrag[pq];
3031 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
3032 	    sc->sc_cbdmam->dm_mapsize,
3033 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
3034 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])) {
3035 		struct pgt_desc *pd;
3036 
3037 		pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
3038 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
3039 		sc->sc_dirtyq_count[pq]--;
3040 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
3041 		sc->sc_freeq_count[pq]++;
3042 		pgt_unload_tx_desc_frag(sc, pd);
3043 		if (sc->sc_debug & SC_DEBUG_QUEUES)
3044 			DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n",
3045 			    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
3046 		wokeup++;
3047 		if (pgt_queue_is_data(pq))
3048 			sc->sc_ic.ic_if.if_oerrors++;
3049 	}
3050 
3051 	return (wokeup);
3052 }
3053 
3054 int
3055 pgt_dma_alloc(struct pgt_softc *sc)
3056 {
3057 	size_t size;
3058 	int i, error, nsegs;
3059 
3060 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
3061 		TAILQ_INIT(&sc->sc_freeq[i]);
3062 		TAILQ_INIT(&sc->sc_dirtyq[i]);
3063 	}
3064 
3065 	/*
3066 	 * control block
3067 	 */
3068 	size = sizeof(struct pgt_control_block);
3069 
3070 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3071 	    BUS_DMA_NOWAIT, &sc->sc_cbdmam);
3072 	if (error != 0) {
3073 		printf("%s: can not create DMA tag for control block\n",
3074 		    sc->sc_dev.dv_xname);
3075 		goto out;
3076 	}
3077 
3078 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3079 	    0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT);
3080 	if (error != 0) {
3081 		printf("%s: can not allocate DMA memory for control block\n",
3082 		    sc->sc_dev.dv_xname);
3083 		goto out;
3084 	}
3085 
3086 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs,
3087 	    size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT);
3088 	if (error != 0) {
3089 		printf("%s: can not map DMA memory for control block\n",
3090 		    sc->sc_dev.dv_xname);
3091 		goto out;
3092 	}
3093 	bzero(sc->sc_cb, size);
3094 
3095 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam,
3096 	    sc->sc_cb, size, NULL, BUS_DMA_NOWAIT);
3097 	if (error != 0) {
3098 		printf("%s: can not load DMA map for control block\n",
3099 		    sc->sc_dev.dv_xname);
3100 		goto out;
3101 	}
3102 
3103 	/*
3104 	 * powersave
3105 	 */
3106 	size = PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT;
3107 
3108 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3109 	    BUS_DMA_ALLOCNOW, &sc->sc_psmdmam);
3110 	if (error != 0) {
3111 		printf("%s: can not create DMA tag for powersave\n",
3112 		    sc->sc_dev.dv_xname);
3113 		goto out;
3114 	}
3115 
3116 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3117 	   0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT);
3118 	if (error != 0) {
3119 		printf("%s: can not allocate DMA memory for powersave\n",
3120 		    sc->sc_dev.dv_xname);
3121 		goto out;
3122 	}
3123 
3124 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs,
3125 	    size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT);
3126 	if (error != 0) {
3127 		printf("%s: can not map DMA memory for powersave\n",
3128 		    sc->sc_dev.dv_xname);
3129 		goto out;
3130 	}
3131 	bzero(sc->sc_psmbuf, size);
3132 
3133 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam,
3134 	    sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK);
3135 	if (error != 0) {
3136 		printf("%s: can not load DMA map for powersave\n",
3137 		    sc->sc_dev.dv_xname);
3138 		goto out;
3139 	}
3140 
3141 	/*
3142 	 * fragments
3143 	 */
3144 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3145 	if (error != 0)
3146 		goto out;
3147 
3148 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3149 	if (error != 0)
3150 		goto out;
3151 
3152 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3153 	if (error != 0)
3154 		goto out;
3155 
3156 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3157 	if (error != 0)
3158 		goto out;
3159 
3160 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX);
3161 	if (error != 0)
3162 		goto out;
3163 
3164 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX);
3165 	if (error != 0)
3166 		goto out;
3167 
3168 out:
3169 	if (error) {
3170 		printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname);
3171 		pgt_dma_free(sc);
3172 	}
3173 
3174 	return (error);
3175 }
3176 
3177 int
3178 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq)
3179 {
3180 	struct pgt_desc *pd;
3181 	struct pgt_frag *pcbqueue;
3182 	size_t i, qsize;
3183 	int error, nsegs;
3184 
3185 	switch (pq) {
3186 		case PGT_QUEUE_DATA_LOW_RX:
3187 			pcbqueue = sc->sc_cb->pcb_data_low_rx;
3188 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3189 			break;
3190 		case PGT_QUEUE_DATA_LOW_TX:
3191 			pcbqueue = sc->sc_cb->pcb_data_low_tx;
3192 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3193 			break;
3194 		case PGT_QUEUE_DATA_HIGH_RX:
3195 			pcbqueue = sc->sc_cb->pcb_data_high_rx;
3196 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3197 			break;
3198 		case PGT_QUEUE_DATA_HIGH_TX:
3199 			pcbqueue = sc->sc_cb->pcb_data_high_tx;
3200 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3201 			break;
3202 		case PGT_QUEUE_MGMT_RX:
3203 			pcbqueue = sc->sc_cb->pcb_mgmt_rx;
3204 			qsize = PGT_QUEUE_MGMT_SIZE;
3205 			break;
3206 		case PGT_QUEUE_MGMT_TX:
3207 			pcbqueue = sc->sc_cb->pcb_mgmt_tx;
3208 			qsize = PGT_QUEUE_MGMT_SIZE;
3209 			break;
3210 	}
3211 
3212 	for (i = 0; i < qsize; i++) {
3213 		pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3214 
3215 		error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1,
3216 		    PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam);
3217 		if (error != 0) {
3218 			printf("%s: can not create DMA tag for fragment\n",
3219 			    sc->sc_dev.dv_xname);
3220 			free(pd, M_DEVBUF);
3221 			break;
3222 		}
3223 
3224 		error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE,
3225 		    0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK);
3226 		if (error != 0) {
3227 			printf("%s: error alloc frag %u on queue %u\n",
3228 			    sc->sc_dev.dv_xname, i, pq);
3229 			free(pd, M_DEVBUF);
3230 			break;
3231 		}
3232 
3233 		error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs,
3234 		    PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK);
3235 		if (error != 0) {
3236 			printf("%s: error map frag %u on queue %u\n",
3237 			    sc->sc_dev.dv_xname, i, pq);
3238 			free(pd, M_DEVBUF);
3239 			break;
3240 		}
3241 
3242 		if (pgt_queue_is_rx(pq)) {
3243 			error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam,
3244 			    pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
3245 			if (error != 0) {
3246 				printf("%s: error load frag %u on queue %u\n",
3247 				    sc->sc_dev.dv_xname, i, pq);
3248 				bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas,
3249 				    nsegs);
3250 				free(pd, M_DEVBUF);
3251 				break;
3252 			}
3253 			pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
3254 		}
3255 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
3256 	}
3257 
3258 	return (error);
3259 }
3260 
3261 void
3262 pgt_dma_free(struct pgt_softc *sc)
3263 {
3264 	/*
3265 	 * fragments
3266 	 */
3267 	if (sc->sc_dmat != NULL) {
3268 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3269 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3270 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3271 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3272 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX);
3273 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX);
3274 	}
3275 
3276 	/*
3277 	 * powersave
3278 	 */
3279 	if (sc->sc_psmbuf != NULL) {
3280 		bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam);
3281 		bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1);
3282 		sc->sc_psmbuf = NULL;
3283 		sc->sc_psmdmam = NULL;
3284 	}
3285 
3286 	/*
3287 	 * control block
3288 	 */
3289 	if (sc->sc_cb != NULL) {
3290 		bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam);
3291 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1);
3292 		sc->sc_cb = NULL;
3293 		sc->sc_cbdmam = NULL;
3294 	}
3295 }
3296 
3297 void
3298 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq)
3299 {
3300 	struct pgt_desc	*pd;
3301 
3302 	while (!TAILQ_EMPTY(&sc->sc_freeq[pq])) {
3303 		pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
3304 		TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
3305 		if (pd->pd_dmam != NULL) {
3306 			bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
3307 			pd->pd_dmam = NULL;
3308 		}
3309 		bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1);
3310 		free(pd, M_DEVBUF);
3311 	}
3312 }
3313 
3314 void
3315 pgt_shutdown(void *arg)
3316 {
3317 	struct pgt_softc *sc = arg;
3318 
3319 	DPRINTF(("%s: %s\n", sc->sc_dev.dv_xname, __func__));
3320 
3321 	pgt_stop(sc, SC_DYING);
3322 }
3323 
3324 void
3325 pgt_power(int why, void *arg)
3326 {
3327 	struct pgt_softc *sc = arg;
3328 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3329 	int s;
3330 
3331 	DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why));
3332 
3333 	s = splnet();
3334 
3335 	switch (why) {
3336 	case PWR_STANDBY:
3337 	case PWR_SUSPEND:
3338 		pgt_stop(sc, SC_NEEDS_RESET);
3339 		pgt_update_hw_from_sw(sc, 0, 0);
3340 
3341 		if (sc->sc_power != NULL)
3342 			(*sc->sc_power)(sc, why);
3343 		break;
3344 	case PWR_RESUME:
3345 		if (sc->sc_power != NULL)
3346 			(*sc->sc_power)(sc, why);
3347 
3348 		pgt_stop(sc, SC_NEEDS_RESET);
3349 		pgt_update_hw_from_sw(sc, 0, 0);
3350 
3351 		if ((ifp->if_flags & IFF_UP) &&
3352 		    !(ifp->if_flags & IFF_RUNNING)) {
3353 			pgt_init(ifp);
3354 			pgt_update_hw_from_sw(sc, 0, 0);
3355 		}
3356 		break;
3357 	}
3358 
3359 	splx(s);
3360 }
3361