xref: /openbsd/sys/dev/ic/pgt.c (revision cf96265b)
1 /*	$OpenBSD: pgt.c,v 1.104 2023/11/10 15:51:20 bluhm Exp $  */
2 
3 /*
4  * Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2006 Marcus Glocker <mglocker@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Copyright (c) 2004 Fujitsu Laboratories of America, Inc.
22  * Copyright (c) 2004 Brian Fundakowski Feldman
23  * All rights reserved.
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  * 1. Redistributions of source code must retain the above copyright
29  *    notice, this list of conditions and the following disclaimer.
30  * 2. Redistributions in binary form must reproduce the above copyright
31  *    notice, this list of conditions and the following disclaimer in the
32  *    documentation and/or other materials provided with the distribution.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
35  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
38  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44  * SUCH DAMAGE.
45  */
46 
47 #include "bpfilter.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/socket.h>
54 #include <sys/mbuf.h>
55 #include <sys/endian.h>
56 #include <sys/sockio.h>
57 #include <sys/kthread.h>
58 #include <sys/time.h>
59 #include <sys/ioctl.h>
60 #include <sys/device.h>
61 
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64 
65 #include <net/if.h>
66 #include <net/if_llc.h>
67 #include <net/if_media.h>
68 
69 #if NBPFILTER > 0
70 #include <net/bpf.h>
71 #endif
72 
73 #include <netinet/in.h>
74 #include <netinet/if_ether.h>
75 
76 #include <net80211/ieee80211_var.h>
77 #include <net80211/ieee80211_radiotap.h>
78 
79 #include <dev/ic/pgtreg.h>
80 #include <dev/ic/pgtvar.h>
81 
82 #include <dev/ic/if_wireg.h>
83 #include <dev/ic/if_wi_ieee.h>
84 #include <dev/ic/if_wivar.h>
85 
86 #ifdef PGT_DEBUG
87 #define DPRINTF(x)	do { printf x; } while (0)
88 #else
89 #define DPRINTF(x)
90 #endif
91 
92 #define	SETOID(oid, var, size) {					\
93 	if (pgt_oid_set(sc, oid, var, size) != 0)			\
94 		break;							\
95 }
96 
97 /*
98  * This is a driver for the Intersil Prism family of 802.11g network cards,
99  * based upon version 1.2 of the Linux driver.
100  */
101 
102 #define SCAN_TIMEOUT			5	/* 5 seconds */
103 
104 struct cfdriver pgt_cd = {
105         NULL, "pgt", DV_IFNET
106 };
107 
108 void	 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr);
109 int	 pgt_media_change(struct ifnet *ifp);
110 void	 pgt_write_memory_barrier(struct pgt_softc *);
111 uint32_t pgt_read_4(struct pgt_softc *, uint16_t);
112 void	 pgt_write_4(struct pgt_softc *, uint16_t, uint32_t);
113 void	 pgt_write_4_flush(struct pgt_softc *, uint16_t, uint32_t);
114 void	 pgt_debug_events(struct pgt_softc *, const char *);
115 uint32_t pgt_queue_frags_pending(struct pgt_softc *, enum pgt_queue);
116 void	 pgt_reinit_rx_desc_frag(struct pgt_softc *, struct pgt_desc *);
117 int	 pgt_load_tx_desc_frag(struct pgt_softc *, enum pgt_queue,
118 	     struct pgt_desc *);
119 void	 pgt_unload_tx_desc_frag(struct pgt_softc *, struct pgt_desc *);
120 int	 pgt_load_firmware(struct pgt_softc *);
121 void	 pgt_cleanup_queue(struct pgt_softc *, enum pgt_queue,
122 	     struct pgt_frag *);
123 int	 pgt_reset(struct pgt_softc *);
124 void	 pgt_stop(struct pgt_softc *, unsigned int);
125 void	 pgt_reboot(struct pgt_softc *);
126 void	 pgt_init_intr(struct pgt_softc *);
127 void	 pgt_update_intr(struct pgt_softc *, int);
128 struct mbuf
129 	*pgt_ieee80211_encap(struct pgt_softc *, struct ether_header *,
130 	     struct mbuf *, struct ieee80211_node **);
131 void	 pgt_input_frames(struct pgt_softc *, struct mbuf *);
132 void	 pgt_wakeup_intr(struct pgt_softc *);
133 void	 pgt_sleep_intr(struct pgt_softc *);
134 void	 pgt_empty_traps(struct pgt_softc_kthread *);
135 void	 pgt_per_device_kthread(void *);
136 void	 pgt_async_reset(struct pgt_softc *);
137 void	 pgt_async_update(struct pgt_softc *);
138 void	 pgt_txdone(struct pgt_softc *, enum pgt_queue);
139 void	 pgt_rxdone(struct pgt_softc *, enum pgt_queue);
140 void	 pgt_trap_received(struct pgt_softc *, uint32_t, void *, size_t);
141 void	 pgt_mgmtrx_completion(struct pgt_softc *, struct pgt_mgmt_desc *);
142 struct mbuf
143 	*pgt_datarx_completion(struct pgt_softc *, enum pgt_queue);
144 int	 pgt_oid_get(struct pgt_softc *, enum pgt_oid, void *, size_t);
145 int	 pgt_oid_retrieve(struct pgt_softc *, enum pgt_oid, void *, size_t);
146 int	 pgt_oid_set(struct pgt_softc *, enum pgt_oid, const void *, size_t);
147 void	 pgt_state_dump(struct pgt_softc *);
148 int	 pgt_mgmt_request(struct pgt_softc *, struct pgt_mgmt_desc *);
149 void	 pgt_desc_transmit(struct pgt_softc *, enum pgt_queue,
150 	     struct pgt_desc *, uint16_t, int);
151 void	 pgt_maybe_trigger(struct pgt_softc *, enum pgt_queue);
152 struct ieee80211_node
153 	*pgt_ieee80211_node_alloc(struct ieee80211com *);
154 void	 pgt_ieee80211_newassoc(struct ieee80211com *,
155 	     struct ieee80211_node *, int);
156 void	 pgt_ieee80211_node_free(struct ieee80211com *,
157 	    struct ieee80211_node *);
158 void	 pgt_ieee80211_node_copy(struct ieee80211com *,
159 	     struct ieee80211_node *,
160 	     const struct ieee80211_node *);
161 int	 pgt_ieee80211_send_mgmt(struct ieee80211com *,
162 	     struct ieee80211_node *, int, int, int);
163 int	 pgt_net_attach(struct pgt_softc *);
164 void	 pgt_start(struct ifnet *);
165 int	 pgt_ioctl(struct ifnet *, u_long, caddr_t);
166 void	 pgt_obj_bss2scanres(struct pgt_softc *,
167 	     struct pgt_obj_bss *, struct wi_scan_res *, uint32_t);
168 void	 node_mark_active_ap(void *, struct ieee80211_node *);
169 void	 node_mark_active_adhoc(void *, struct ieee80211_node *);
170 void	 pgt_watchdog(struct ifnet *);
171 int	 pgt_init(struct ifnet *);
172 void	 pgt_update_hw_from_sw(struct pgt_softc *, int);
173 void	 pgt_hostap_handle_mlme(struct pgt_softc *, uint32_t,
174 	     struct pgt_obj_mlme *);
175 void	 pgt_update_sw_from_hw(struct pgt_softc *,
176 	     struct pgt_async_trap *, struct mbuf *);
177 int	 pgt_newstate(struct ieee80211com *, enum ieee80211_state, int);
178 int	 pgt_drain_tx_queue(struct pgt_softc *, enum pgt_queue);
179 int	 pgt_dma_alloc(struct pgt_softc *);
180 int	 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq);
181 void	 pgt_dma_free(struct pgt_softc *);
182 void	 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq);
183 void	 pgt_wakeup(struct pgt_softc *);
184 
185 void
pgt_write_memory_barrier(struct pgt_softc * sc)186 pgt_write_memory_barrier(struct pgt_softc *sc)
187 {
188 	bus_space_barrier(sc->sc_iotag, sc->sc_iohandle, 0, 0,
189 	    BUS_SPACE_BARRIER_WRITE);
190 }
191 
192 u_int32_t
pgt_read_4(struct pgt_softc * sc,uint16_t offset)193 pgt_read_4(struct pgt_softc *sc, uint16_t offset)
194 {
195 	return (bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, offset));
196 }
197 
198 void
pgt_write_4(struct pgt_softc * sc,uint16_t offset,uint32_t value)199 pgt_write_4(struct pgt_softc *sc, uint16_t offset, uint32_t value)
200 {
201 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
202 }
203 
204 /*
205  * Write out 4 bytes and cause a PCI flush by reading back in on a
206  * harmless register.
207  */
208 void
pgt_write_4_flush(struct pgt_softc * sc,uint16_t offset,uint32_t value)209 pgt_write_4_flush(struct pgt_softc *sc, uint16_t offset, uint32_t value)
210 {
211 	bus_space_write_4(sc->sc_iotag, sc->sc_iohandle, offset, value);
212 	(void)bus_space_read_4(sc->sc_iotag, sc->sc_iohandle, PGT_REG_INT_EN);
213 }
214 
215 /*
216  * Print the state of events in the queues from an interrupt or a trigger.
217  */
218 void
pgt_debug_events(struct pgt_softc * sc,const char * when)219 pgt_debug_events(struct pgt_softc *sc, const char *when)
220 {
221 #define	COUNT(i)							\
222 	letoh32(sc->sc_cb->pcb_driver_curfrag[i]) -			\
223 	letoh32(sc->sc_cb->pcb_device_curfrag[i])
224 	if (sc->sc_debug & SC_DEBUG_EVENTS)
225 		DPRINTF(("%s: ev%s: %u %u %u %u %u %u\n",
226 		    sc->sc_dev.dv_xname, when, COUNT(0), COUNT(1), COUNT(2),
227 		    COUNT(3), COUNT(4), COUNT(5)));
228 #undef COUNT
229 }
230 
231 uint32_t
pgt_queue_frags_pending(struct pgt_softc * sc,enum pgt_queue pq)232 pgt_queue_frags_pending(struct pgt_softc *sc, enum pgt_queue pq)
233 {
234 	return (letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) -
235 	    letoh32(sc->sc_cb->pcb_device_curfrag[pq]));
236 }
237 
238 void
pgt_reinit_rx_desc_frag(struct pgt_softc * sc,struct pgt_desc * pd)239 pgt_reinit_rx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
240 {
241 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
242 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
243 	pd->pd_fragp->pf_flags = 0;
244 
245 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
246 	    BUS_DMASYNC_POSTWRITE);
247 }
248 
249 int
pgt_load_tx_desc_frag(struct pgt_softc * sc,enum pgt_queue pq,struct pgt_desc * pd)250 pgt_load_tx_desc_frag(struct pgt_softc *sc, enum pgt_queue pq,
251     struct pgt_desc *pd)
252 {
253 	int error;
254 
255 	error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam, pd->pd_mem,
256 	    PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
257 	if (error) {
258 		DPRINTF(("%s: unable to load %s tx DMA: %d\n",
259 		    sc->sc_dev.dv_xname,
260 		    pgt_queue_is_data(pq) ? "data" : "mgmt", error));
261 		return (error);
262 	}
263 	pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
264 	pd->pd_fragp->pf_addr = htole32((uint32_t)pd->pd_dmaaddr);
265 	pd->pd_fragp->pf_size = htole16(PGT_FRAG_SIZE);
266 	pd->pd_fragp->pf_flags = htole16(0);
267 
268 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0, pd->pd_dmam->dm_mapsize,
269 	    BUS_DMASYNC_POSTWRITE);
270 
271 	return (0);
272 }
273 
274 void
pgt_unload_tx_desc_frag(struct pgt_softc * sc,struct pgt_desc * pd)275 pgt_unload_tx_desc_frag(struct pgt_softc *sc, struct pgt_desc *pd)
276 {
277         bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
278 	pd->pd_dmaaddr = 0;
279 }
280 
281 int
pgt_load_firmware(struct pgt_softc * sc)282 pgt_load_firmware(struct pgt_softc *sc)
283 {
284 	int error, reg, dirreg, fwoff, ucodeoff, fwlen;
285 	uint8_t *ucode;
286 	uint32_t *uc;
287 	size_t size;
288 	char *name;
289 
290 	if (sc->sc_flags & SC_ISL3877)
291 		name = "pgt-isl3877";
292 	else
293 		name = "pgt-isl3890";	/* includes isl3880 */
294 
295 	error = loadfirmware(name, &ucode, &size);
296 
297 	if (error != 0) {
298 		DPRINTF(("%s: error %d, could not read firmware %s\n",
299 		    sc->sc_dev.dv_xname, error, name));
300 		return (EIO);
301 	}
302 
303 	if (size & 3) {
304 		DPRINTF(("%s: bad firmware size %u\n",
305 		    sc->sc_dev.dv_xname, size));
306 		free(ucode, M_DEVBUF, 0);
307 		return (EINVAL);
308 	}
309 
310 	pgt_reboot(sc);
311 
312 	fwoff = 0;
313 	ucodeoff = 0;
314 	uc = (uint32_t *)ucode;
315 	reg = PGT_FIRMWARE_INTERNAL_OFFSET;
316 	while (fwoff < size) {
317 		pgt_write_4_flush(sc, PGT_REG_DIR_MEM_BASE, reg);
318 
319 		if ((size - fwoff) >= PGT_DIRECT_MEMORY_SIZE)
320 			fwlen = PGT_DIRECT_MEMORY_SIZE;
321 		else
322 			fwlen = size - fwoff;
323 
324 		dirreg = PGT_DIRECT_MEMORY_OFFSET;
325 		while (fwlen > 4) {
326 			pgt_write_4(sc, dirreg, uc[ucodeoff]);
327 			fwoff += 4;
328 			dirreg += 4;
329 			reg += 4;
330 			fwlen -= 4;
331 			ucodeoff++;
332 		}
333 		pgt_write_4_flush(sc, dirreg, uc[ucodeoff]);
334 		fwoff += 4;
335 		dirreg += 4;
336 		reg += 4;
337 		fwlen -= 4;
338 		ucodeoff++;
339 	}
340 	DPRINTF(("%s: %d bytes microcode loaded from %s\n",
341 	    sc->sc_dev.dv_xname, fwoff, name));
342 
343 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
344 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_CLOCKRUN);
345 	reg |= PGT_CTRL_STAT_RAMBOOT;
346 	pgt_write_4_flush(sc, PGT_REG_CTRL_STAT, reg);
347 	pgt_write_memory_barrier(sc);
348 	DELAY(PGT_WRITEIO_DELAY);
349 
350 	reg |= PGT_CTRL_STAT_RESET;
351 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
352 	pgt_write_memory_barrier(sc);
353 	DELAY(PGT_WRITEIO_DELAY);
354 
355 	reg &= ~PGT_CTRL_STAT_RESET;
356 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
357 	pgt_write_memory_barrier(sc);
358 	DELAY(PGT_WRITEIO_DELAY);
359 
360 	free(ucode, M_DEVBUF, 0);
361 
362 	return (0);
363 }
364 
365 void
pgt_cleanup_queue(struct pgt_softc * sc,enum pgt_queue pq,struct pgt_frag * pqfrags)366 pgt_cleanup_queue(struct pgt_softc *sc, enum pgt_queue pq,
367     struct pgt_frag *pqfrags)
368 {
369 	struct pgt_desc *pd;
370 	unsigned int i;
371 
372 	sc->sc_cb->pcb_device_curfrag[pq] = 0;
373 	i = 0;
374 	/* XXX why only freeq ??? */
375 	TAILQ_FOREACH(pd, &sc->sc_freeq[pq], pd_link) {
376 		pd->pd_fragnum = i;
377 		pd->pd_fragp = &pqfrags[i];
378 		if (pgt_queue_is_rx(pq))
379 			pgt_reinit_rx_desc_frag(sc, pd);
380 		i++;
381 	}
382 	sc->sc_freeq_count[pq] = i;
383 	/*
384 	 * The ring buffer describes how many free buffers are available from
385 	 * the host (for receive queues) or how many are pending (for
386 	 * transmit queues).
387 	 */
388 	if (pgt_queue_is_rx(pq))
389 		sc->sc_cb->pcb_driver_curfrag[pq] = htole32(i);
390 	else
391 		sc->sc_cb->pcb_driver_curfrag[pq] = 0;
392 }
393 
394 /*
395  * Turn off interrupts, reset the device (possibly loading firmware),
396  * and put everything in a known state.
397  */
398 int
pgt_reset(struct pgt_softc * sc)399 pgt_reset(struct pgt_softc *sc)
400 {
401 	int error;
402 
403 	/* disable all interrupts */
404 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
405 	DELAY(PGT_WRITEIO_DELAY);
406 
407 	/*
408 	 * Set up the management receive queue, assuming there are no
409 	 * requests in progress.
410 	 */
411 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
412 	    sc->sc_cbdmam->dm_mapsize,
413 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
414 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_RX,
415 	    &sc->sc_cb->pcb_data_low_rx[0]);
416 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_LOW_TX,
417 	    &sc->sc_cb->pcb_data_low_tx[0]);
418 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_RX,
419 	    &sc->sc_cb->pcb_data_high_rx[0]);
420 	pgt_cleanup_queue(sc, PGT_QUEUE_DATA_HIGH_TX,
421 	    &sc->sc_cb->pcb_data_high_tx[0]);
422 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_RX,
423 	    &sc->sc_cb->pcb_mgmt_rx[0]);
424 	pgt_cleanup_queue(sc, PGT_QUEUE_MGMT_TX,
425 	    &sc->sc_cb->pcb_mgmt_tx[0]);
426 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
427 	    sc->sc_cbdmam->dm_mapsize,
428 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
429 
430 	/* load firmware */
431 	if (sc->sc_flags & SC_NEEDS_FIRMWARE) {
432 		error = pgt_load_firmware(sc);
433 		if (error) {
434 			printf("%s: firmware load failed\n",
435 			    sc->sc_dev.dv_xname);
436 			return (error);
437 		}
438 		sc->sc_flags &= ~SC_NEEDS_FIRMWARE;
439 		DPRINTF(("%s: firmware loaded\n", sc->sc_dev.dv_xname));
440 	}
441 
442 	/* upload the control block's DMA address */
443 	pgt_write_4_flush(sc, PGT_REG_CTRL_BLK_BASE,
444 	    htole32((uint32_t)sc->sc_cbdmam->dm_segs[0].ds_addr));
445 	DELAY(PGT_WRITEIO_DELAY);
446 
447 	/* send a reset event */
448 	pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_RESET);
449 	DELAY(PGT_WRITEIO_DELAY);
450 
451 	/* await only the initialization interrupt */
452 	pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_INIT);
453 	DELAY(PGT_WRITEIO_DELAY);
454 
455 	return (0);
456 }
457 
458 /*
459  * If we're trying to reset and the device has seemingly not been detached,
460  * we'll spend a minute seeing if we can't do the reset.
461  */
462 void
pgt_stop(struct pgt_softc * sc,unsigned int flag)463 pgt_stop(struct pgt_softc *sc, unsigned int flag)
464 {
465 	struct ieee80211com *ic;
466 	unsigned int wokeup;
467 	int tryagain = 0;
468 
469 	ic = &sc->sc_ic;
470 
471 	ic->ic_if.if_flags &= ~IFF_RUNNING;
472 	sc->sc_flags |= SC_UNINITIALIZED;
473 	sc->sc_flags |= flag;
474 
475 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
476 	pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
477 	pgt_drain_tx_queue(sc, PGT_QUEUE_MGMT_TX);
478 
479 trying_again:
480 	/* disable all interrupts */
481 	pgt_write_4_flush(sc, PGT_REG_INT_EN, 0);
482 	DELAY(PGT_WRITEIO_DELAY);
483 
484 	/* reboot card */
485 	pgt_reboot(sc);
486 
487 	do {
488 		wokeup = 0;
489 		/*
490 		 * We don't expect to be woken up, just to drop the lock
491 		 * and time out.  Only tx queues can have anything valid
492 		 * on them outside of an interrupt.
493 		 */
494 		while (!TAILQ_EMPTY(&sc->sc_mgmtinprog)) {
495 			struct pgt_mgmt_desc *pmd;
496 
497 			pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
498 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
499 			pmd->pmd_error = ENETRESET;
500 			wakeup_one(pmd);
501 			if (sc->sc_debug & SC_DEBUG_MGMT)
502 				DPRINTF(("%s: queue: mgmt %p <- %#x "
503 				    "(drained)\n", sc->sc_dev.dv_xname,
504 				    pmd, pmd->pmd_oid));
505 			wokeup++;
506 		}
507 		if (wokeup > 0) {
508 			if (flag == SC_NEEDS_RESET && sc->sc_flags & SC_DYING) {
509 				sc->sc_flags &= ~flag;
510 				return;
511 			}
512 		}
513 	} while (wokeup > 0);
514 
515 	if (flag == SC_NEEDS_RESET) {
516 		int error;
517 
518 		DPRINTF(("%s: resetting\n", sc->sc_dev.dv_xname));
519 		sc->sc_flags &= ~SC_POWERSAVE;
520 		sc->sc_flags |= SC_NEEDS_FIRMWARE;
521 		error = pgt_reset(sc);
522 		if (error == 0) {
523 			tsleep_nsec(&sc->sc_flags, 0, "pgtres", SEC_TO_NSEC(1));
524 			if (sc->sc_flags & SC_UNINITIALIZED) {
525 				printf("%s: not responding\n",
526 				    sc->sc_dev.dv_xname);
527 				/* Thud.  It was probably removed. */
528 				if (tryagain)
529 					panic("pgt went for lunch"); /* XXX */
530 				tryagain = 1;
531 			} else {
532 				/* await all interrupts */
533 				pgt_write_4_flush(sc, PGT_REG_INT_EN,
534 				    PGT_INT_STAT_SOURCES);
535 				DELAY(PGT_WRITEIO_DELAY);
536 				ic->ic_if.if_flags |= IFF_RUNNING;
537 			}
538 		}
539 
540 		if (tryagain)
541 			goto trying_again;
542 
543 		sc->sc_flags &= ~flag;
544 		if (ic->ic_if.if_flags & IFF_RUNNING)
545 			pgt_update_hw_from_sw(sc,
546 			    ic->ic_state != IEEE80211_S_INIT);
547 	}
548 
549 	ic->ic_if.if_flags &= ~IFF_RUNNING;
550 	ifq_clr_oactive(&ic->ic_if.if_snd);
551 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
552 }
553 
554 void
pgt_attach(struct device * self)555 pgt_attach(struct device *self)
556 {
557 	struct pgt_softc *sc = (struct pgt_softc *)self;
558 	int error;
559 
560 	/* debug flags */
561 	//sc->sc_debug |= SC_DEBUG_QUEUES;	/* super verbose */
562 	//sc->sc_debug |= SC_DEBUG_MGMT;
563 	sc->sc_debug |= SC_DEBUG_UNEXPECTED;
564 	//sc->sc_debug |= SC_DEBUG_TRIGGER;	/* verbose */
565 	//sc->sc_debug |= SC_DEBUG_EVENTS;	/* super verbose */
566 	//sc->sc_debug |= SC_DEBUG_POWER;
567 	sc->sc_debug |= SC_DEBUG_TRAP;
568 	sc->sc_debug |= SC_DEBUG_LINK;
569 	//sc->sc_debug |= SC_DEBUG_RXANNEX;
570 	//sc->sc_debug |= SC_DEBUG_RXFRAG;
571 	//sc->sc_debug |= SC_DEBUG_RXETHER;
572 
573 	/* enable card if possible */
574 	if (sc->sc_enable != NULL)
575 		(*sc->sc_enable)(sc);
576 
577 	error = pgt_dma_alloc(sc);
578 	if (error)
579 		return;
580 
581 	sc->sc_ic.ic_if.if_softc = sc;
582 	TAILQ_INIT(&sc->sc_mgmtinprog);
583 	TAILQ_INIT(&sc->sc_kthread.sck_traps);
584 	sc->sc_flags |= SC_NEEDS_FIRMWARE | SC_UNINITIALIZED;
585 	sc->sc_80211_ioc_auth = IEEE80211_AUTH_OPEN;
586 
587 	error = pgt_reset(sc);
588 	if (error)
589 		return;
590 
591 	tsleep_nsec(&sc->sc_flags, 0, "pgtres", SEC_TO_NSEC(1));
592 	if (sc->sc_flags & SC_UNINITIALIZED) {
593 		printf("%s: not responding\n", sc->sc_dev.dv_xname);
594 		sc->sc_flags |= SC_NEEDS_FIRMWARE;
595 		return;
596 	} else {
597 		/* await all interrupts */
598 		pgt_write_4_flush(sc, PGT_REG_INT_EN, PGT_INT_STAT_SOURCES);
599 		DELAY(PGT_WRITEIO_DELAY);
600 	}
601 
602 	error = pgt_net_attach(sc);
603 	if (error)
604 		return;
605 
606 	if (kthread_create(pgt_per_device_kthread, sc, NULL,
607 	    sc->sc_dev.dv_xname) != 0)
608 		return;
609 
610 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
611 }
612 
613 int
pgt_detach(struct pgt_softc * sc)614 pgt_detach(struct pgt_softc *sc)
615 {
616 	if (sc->sc_flags & SC_NEEDS_FIRMWARE || sc->sc_flags & SC_UNINITIALIZED)
617 		/* device was not initialized correctly, so leave early */
618 		goto out;
619 
620 	/* stop card */
621 	pgt_stop(sc, SC_DYING);
622 	pgt_reboot(sc);
623 
624 	ieee80211_ifdetach(&sc->sc_ic.ic_if);
625 	if_detach(&sc->sc_ic.ic_if);
626 
627 out:
628 	/* disable card if possible */
629 	if (sc->sc_disable != NULL)
630 		(*sc->sc_disable)(sc);
631 
632 	pgt_dma_free(sc);
633 
634 	return (0);
635 }
636 
637 void
pgt_reboot(struct pgt_softc * sc)638 pgt_reboot(struct pgt_softc *sc)
639 {
640 	uint32_t reg;
641 
642 	reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
643 	reg &= ~(PGT_CTRL_STAT_RESET | PGT_CTRL_STAT_RAMBOOT);
644 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
645 	pgt_write_memory_barrier(sc);
646 	DELAY(PGT_WRITEIO_DELAY);
647 
648 	reg |= PGT_CTRL_STAT_RESET;
649 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
650 	pgt_write_memory_barrier(sc);
651 	DELAY(PGT_WRITEIO_DELAY);
652 
653 	reg &= ~PGT_CTRL_STAT_RESET;
654 	pgt_write_4(sc, PGT_REG_CTRL_STAT, reg);
655 	pgt_write_memory_barrier(sc);
656 	DELAY(PGT_RESET_DELAY);
657 }
658 
659 void
pgt_init_intr(struct pgt_softc * sc)660 pgt_init_intr(struct pgt_softc *sc)
661 {
662 	if ((sc->sc_flags & SC_UNINITIALIZED) == 0) {
663 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
664 			DPRINTF(("%s: spurious initialization\n",
665 			    sc->sc_dev.dv_xname));
666 	} else {
667 		sc->sc_flags &= ~SC_UNINITIALIZED;
668 		wakeup(&sc->sc_flags);
669 	}
670 }
671 
672 /*
673  * If called with a NULL last_nextpkt, only the mgmt queue will be checked
674  * for new packets.
675  */
676 void
pgt_update_intr(struct pgt_softc * sc,int hack)677 pgt_update_intr(struct pgt_softc *sc, int hack)
678 {
679 	/* priority order */
680 	enum pgt_queue pqs[PGT_QUEUE_COUNT] = {
681 	    PGT_QUEUE_MGMT_TX, PGT_QUEUE_MGMT_RX,
682 	    PGT_QUEUE_DATA_HIGH_TX, PGT_QUEUE_DATA_HIGH_RX,
683 	    PGT_QUEUE_DATA_LOW_TX, PGT_QUEUE_DATA_LOW_RX
684 	};
685 	struct mbuf *m;
686 	uint32_t npend;
687 	unsigned int dirtycount;
688 	int i;
689 
690 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
691 	    sc->sc_cbdmam->dm_mapsize,
692 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
693 	pgt_debug_events(sc, "intr");
694 	/*
695 	 * Check for completion of tx in their dirty queues.
696 	 * Check completion of rx into their dirty queues.
697 	 */
698 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
699 		size_t qdirty, qfree;
700 
701 		qdirty = sc->sc_dirtyq_count[pqs[i]];
702 		qfree = sc->sc_freeq_count[pqs[i]];
703 		/*
704 		 * We want the wrap-around here.
705 		 */
706 		if (pgt_queue_is_rx(pqs[i])) {
707 			int data;
708 
709 			data = pgt_queue_is_data(pqs[i]);
710 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
711 			if (hack && data)
712 				continue;
713 #endif
714 			npend = pgt_queue_frags_pending(sc, pqs[i]);
715 			/*
716 			 * Receive queues clean up below, so qdirty must
717 			 * always be 0.
718 			 */
719 			if (npend > qfree) {
720 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
721 					DPRINTF(("%s: rx queue [%u] "
722 					    "overflowed by %u\n",
723 					    sc->sc_dev.dv_xname, pqs[i],
724 					    npend - qfree));
725 				sc->sc_flags |= SC_INTR_RESET;
726 				break;
727 			}
728 			while (qfree-- > npend)
729 				pgt_rxdone(sc, pqs[i]);
730 		} else {
731 			npend = pgt_queue_frags_pending(sc, pqs[i]);
732 			if (npend > qdirty) {
733 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
734 					DPRINTF(("%s: tx queue [%u] "
735 					    "underflowed by %u\n",
736 					    sc->sc_dev.dv_xname, pqs[i],
737 					    npend - qdirty));
738 				sc->sc_flags |= SC_INTR_RESET;
739 				break;
740 			}
741 			/*
742 			 * If the free queue was empty, or the data transmit
743 			 * queue just became empty, wake up any waiters.
744 			 */
745 			if (qdirty > npend) {
746 				if (pgt_queue_is_data(pqs[i])) {
747 					sc->sc_ic.ic_if.if_timer = 0;
748 					ifq_clr_oactive(
749 					    &sc->sc_ic.ic_if.if_snd);
750 				}
751 				while (qdirty-- > npend)
752 					pgt_txdone(sc, pqs[i]);
753 			}
754 		}
755 	}
756 
757 	/*
758 	 * This is the deferred completion for received management frames
759 	 * and where we queue network frames for stack input.
760 	 */
761 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX];
762 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX])) {
763 		struct pgt_mgmt_desc *pmd;
764 
765 		pmd = TAILQ_FIRST(&sc->sc_mgmtinprog);
766 		/*
767 		 * If there is no mgmt request in progress or the operation
768 		 * returned is explicitly a trap, this pmd will essentially
769 		 * be ignored.
770 		 */
771 		pgt_mgmtrx_completion(sc, pmd);
772 	}
773 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX] =
774 	    htole32(dirtycount +
775 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_MGMT_RX]));
776 
777 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_HIGH_RX];
778 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_HIGH_RX])) {
779 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_HIGH_RX)))
780 			pgt_input_frames(sc, m);
781 	}
782 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX] =
783 	    htole32(dirtycount +
784 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_HIGH_RX]));
785 
786 	dirtycount = sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_RX];
787 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[PGT_QUEUE_DATA_LOW_RX])) {
788 		if ((m = pgt_datarx_completion(sc, PGT_QUEUE_DATA_LOW_RX)))
789 			pgt_input_frames(sc, m);
790 	}
791 	sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX] =
792 	    htole32(dirtycount +
793 		letoh32(sc->sc_cb->pcb_driver_curfrag[PGT_QUEUE_DATA_LOW_RX]));
794 
795 	/*
796 	 * Write out what we've finished with.
797 	 */
798 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
799 	    sc->sc_cbdmam->dm_mapsize,
800 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
801 }
802 
803 struct mbuf *
pgt_ieee80211_encap(struct pgt_softc * sc,struct ether_header * eh,struct mbuf * m,struct ieee80211_node ** ni)804 pgt_ieee80211_encap(struct pgt_softc *sc, struct ether_header *eh,
805     struct mbuf *m, struct ieee80211_node **ni)
806 {
807 	struct ieee80211com *ic;
808 	struct ieee80211_frame *frame;
809 	struct llc *snap;
810 
811 	ic = &sc->sc_ic;
812 	if (ni != NULL && ic->ic_opmode == IEEE80211_M_MONITOR) {
813 		*ni = ieee80211_ref_node(ic->ic_bss);
814 		(*ni)->ni_inact = 0;
815 		return (m);
816 	}
817 
818 	M_PREPEND(m, sizeof(*frame) + sizeof(*snap), M_DONTWAIT);
819 	if (m == NULL)
820 		return (m);
821 	if (m->m_len < sizeof(*frame) + sizeof(*snap)) {
822 		m = m_pullup(m, sizeof(*frame) + sizeof(*snap));
823 		if (m == NULL)
824 			return (m);
825 	}
826 	frame = mtod(m, struct ieee80211_frame *);
827 	snap = (struct llc *)&frame[1];
828 	if (ni != NULL) {
829 		if (ic->ic_opmode == IEEE80211_M_STA) {
830 			*ni = ieee80211_ref_node(ic->ic_bss);
831 		}
832 #ifndef IEEE80211_STA_ONLY
833 		else {
834 			*ni = ieee80211_find_node(ic, eh->ether_shost);
835 			/*
836 			 * Make up associations for ad-hoc mode.  To support
837 			 * ad-hoc WPA, we'll need to maintain a bounded
838 			 * pool of ad-hoc stations.
839 			 */
840 			if (*ni == NULL &&
841 			    ic->ic_opmode != IEEE80211_M_HOSTAP) {
842 				*ni = ieee80211_dup_bss(ic, eh->ether_shost);
843 				if (*ni != NULL) {
844 					(*ni)->ni_associd = 1;
845 					ic->ic_newassoc(ic, *ni, 1);
846 				}
847 			}
848 			if (*ni == NULL) {
849 				m_freem(m);
850 				return (NULL);
851 			}
852 		}
853 #endif
854 		(*ni)->ni_inact = 0;
855 	}
856 	snap->llc_dsap = snap->llc_ssap = LLC_SNAP_LSAP;
857 	snap->llc_control = LLC_UI;
858 	snap->llc_snap.org_code[0] = 0;
859 	snap->llc_snap.org_code[1] = 0;
860 	snap->llc_snap.org_code[2] = 0;
861 	snap->llc_snap.ether_type = eh->ether_type;
862 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
863 	/* Doesn't look like much of the 802.11 header is available. */
864 	*(uint16_t *)frame->i_dur = *(uint16_t *)frame->i_seq = 0;
865 	/*
866 	 * Translate the addresses; WDS is not handled.
867 	 */
868 	switch (ic->ic_opmode) {
869 	case IEEE80211_M_STA:
870 		frame->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
871 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
872 		IEEE80211_ADDR_COPY(frame->i_addr2, ic->ic_bss->ni_bssid);
873 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_shost);
874 		break;
875 #ifndef IEEE80211_STA_ONLY
876 	case IEEE80211_M_IBSS:
877 	case IEEE80211_M_AHDEMO:
878 		frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
879 		IEEE80211_ADDR_COPY(frame->i_addr1, eh->ether_dhost);
880 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
881 		IEEE80211_ADDR_COPY(frame->i_addr3, ic->ic_bss->ni_bssid);
882 		break;
883 	case IEEE80211_M_HOSTAP:
884 		/* HostAP forwarding defaults to being done on firmware. */
885 		frame->i_fc[1] = IEEE80211_FC1_DIR_TODS;
886 		IEEE80211_ADDR_COPY(frame->i_addr1, ic->ic_bss->ni_bssid);
887 		IEEE80211_ADDR_COPY(frame->i_addr2, eh->ether_shost);
888 		IEEE80211_ADDR_COPY(frame->i_addr3, eh->ether_dhost);
889 		break;
890 #endif
891 	default:
892 		break;
893 	}
894 	return (m);
895 }
896 
897 void
pgt_input_frames(struct pgt_softc * sc,struct mbuf * m)898 pgt_input_frames(struct pgt_softc *sc, struct mbuf *m)
899 {
900 	struct ether_header eh;
901 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
902 	struct ifnet *ifp;
903 	struct ieee80211_channel *chan;
904 	struct ieee80211_rxinfo rxi;
905 	struct ieee80211_node *ni;
906 	struct ieee80211com *ic;
907 	struct pgt_rx_annex *pra;
908 	struct pgt_rx_header *pha;
909 	struct mbuf *next;
910 	unsigned int n;
911 	uint32_t rstamp;
912 	uint8_t rssi;
913 
914 	ic = &sc->sc_ic;
915 	ifp = &ic->ic_if;
916 	for (next = m; m != NULL; m = next) {
917 		next = m->m_nextpkt;
918 		m->m_nextpkt = NULL;
919 
920 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
921 			if (m->m_len < sizeof(*pha)) {
922 				m = m_pullup(m, sizeof(*pha));
923 				if (m == NULL) {
924 					if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
925 						DPRINTF(("%s: m_pullup "
926 						    "failure\n",
927 						    sc->sc_dev.dv_xname));
928 					ifp->if_ierrors++;
929 					continue;
930 				}
931 			}
932 			pha = mtod(m, struct pgt_rx_header *);
933 			pra = NULL;
934 			goto input;
935 		}
936 
937 		if (m->m_len < sizeof(*pra)) {
938 			m = m_pullup(m, sizeof(*pra));
939 			if (m == NULL) {
940 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
941 					DPRINTF(("%s: m_pullup failure\n",
942 					    sc->sc_dev.dv_xname));
943 				ifp->if_ierrors++;
944 				continue;
945 			}
946 		}
947 		pra = mtod(m, struct pgt_rx_annex *);
948 		pha = &pra->pra_header;
949 		if (sc->sc_debug & SC_DEBUG_RXANNEX)
950 			DPRINTF(("%s: rx annex: ? %04x "
951 			    "len %u clock %u flags %02x ? %02x rate %u ? %02x "
952 			    "freq %u ? %04x rssi %u pad %02x%02x%02x\n",
953 			    sc->sc_dev.dv_xname,
954 			    letoh16(pha->pra_unknown0),
955 			    letoh16(pha->pra_length),
956 			    letoh32(pha->pra_clock), pha->pra_flags,
957 			    pha->pra_unknown1, pha->pra_rate,
958 			    pha->pra_unknown2, letoh32(pha->pra_frequency),
959 			    pha->pra_unknown3, pha->pra_rssi,
960 			    pha->pra_pad[0], pha->pra_pad[1], pha->pra_pad[2]));
961 		if (sc->sc_debug & SC_DEBUG_RXETHER)
962 			DPRINTF(("%s: rx ether: %s < %s 0x%04x\n",
963 			    sc->sc_dev.dv_xname,
964 			    ether_sprintf(pra->pra_ether_dhost),
965 			    ether_sprintf(pra->pra_ether_shost),
966 			    ntohs(pra->pra_ether_type)));
967 
968 		memcpy(eh.ether_dhost, pra->pra_ether_dhost, ETHER_ADDR_LEN);
969 		memcpy(eh.ether_shost, pra->pra_ether_shost, ETHER_ADDR_LEN);
970 		eh.ether_type = pra->pra_ether_type;
971 
972 input:
973 		/*
974 		 * This flag is set if e.g. packet could not be decrypted.
975 		 */
976 		if (pha->pra_flags & PRA_FLAG_BAD) {
977 			ifp->if_ierrors++;
978 			m_freem(m);
979 			continue;
980 		}
981 
982 		/*
983 		 * After getting what we want, chop off the annex, then
984 		 * turn into something that looks like it really was
985 		 * 802.11.
986 		 */
987 		rssi = pha->pra_rssi;
988 		rstamp = letoh32(pha->pra_clock);
989 		n = ieee80211_mhz2ieee(letoh32(pha->pra_frequency), 0);
990 		if (n <= IEEE80211_CHAN_MAX)
991 			chan = &ic->ic_channels[n];
992 		else
993 			chan = ic->ic_bss->ni_chan;
994 		/* Send to 802.3 listeners. */
995 		if (pra) {
996 			m_adj(m, sizeof(*pra));
997 		} else
998 			m_adj(m, sizeof(*pha));
999 
1000 		m = pgt_ieee80211_encap(sc, &eh, m, &ni);
1001 		if (m != NULL) {
1002 #if NBPFILTER > 0
1003 			if (sc->sc_drvbpf != NULL) {
1004 				struct mbuf mb;
1005 				struct pgt_rx_radiotap_hdr *tap = &sc->sc_rxtap;
1006 
1007 				tap->wr_flags = 0;
1008 				tap->wr_chan_freq = htole16(chan->ic_freq);
1009 				tap->wr_chan_flags = htole16(chan->ic_flags);
1010 				tap->wr_rssi = rssi;
1011 				tap->wr_max_rssi = ic->ic_max_rssi;
1012 
1013 				mb.m_data = (caddr_t)tap;
1014 				mb.m_len = sc->sc_rxtap_len;
1015 				mb.m_next = m;
1016 				mb.m_nextpkt = NULL;
1017 				mb.m_type = 0;
1018 				mb.m_flags = 0;
1019 				bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
1020 			}
1021 #endif
1022 			memset(&rxi, 0, sizeof(rxi));
1023 			ni->ni_rssi = rxi.rxi_rssi = rssi;
1024 			ni->ni_rstamp = rxi.rxi_tstamp = rstamp;
1025 			ieee80211_inputm(ifp, m, ni, &rxi, &ml);
1026 			/*
1027 			 * The frame may have caused the node to be marked for
1028 			 * reclamation (e.g. in response to a DEAUTH message)
1029 			 * so use free_node here instead of unref_node.
1030 			 */
1031 			if (ni == ic->ic_bss)
1032 				ieee80211_unref_node(&ni);
1033 			else
1034 				ieee80211_release_node(&sc->sc_ic, ni);
1035 		} else {
1036 			ifp->if_ierrors++;
1037 		}
1038 	}
1039 	if_input(ifp, &ml);
1040 }
1041 
1042 void
pgt_wakeup_intr(struct pgt_softc * sc)1043 pgt_wakeup_intr(struct pgt_softc *sc)
1044 {
1045 	int shouldupdate;
1046 	int i;
1047 
1048 	shouldupdate = 0;
1049 	/* Check for any queues being empty before updating. */
1050 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1051 	    sc->sc_cbdmam->dm_mapsize,
1052 	    BUS_DMASYNC_POSTREAD);
1053 	for (i = 0; !shouldupdate && i < PGT_QUEUE_COUNT; i++) {
1054 		if (pgt_queue_is_tx(i))
1055 			shouldupdate = pgt_queue_frags_pending(sc, i);
1056 		else
1057 			shouldupdate = pgt_queue_frags_pending(sc, i) <
1058 			    sc->sc_freeq_count[i];
1059 	}
1060 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1061 		shouldupdate = 1;
1062 	if (sc->sc_debug & SC_DEBUG_POWER)
1063 		DPRINTF(("%s: wakeup interrupt (update = %d)\n",
1064 		    sc->sc_dev.dv_xname, shouldupdate));
1065 	sc->sc_flags &= ~SC_POWERSAVE;
1066 	if (shouldupdate) {
1067 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1068 		DELAY(PGT_WRITEIO_DELAY);
1069 	}
1070 }
1071 
1072 void
pgt_sleep_intr(struct pgt_softc * sc)1073 pgt_sleep_intr(struct pgt_softc *sc)
1074 {
1075 	int allowed;
1076 	int i;
1077 
1078 	allowed = 1;
1079 	/* Check for any queues not being empty before allowing. */
1080 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1081 	    sc->sc_cbdmam->dm_mapsize,
1082 	    BUS_DMASYNC_POSTREAD);
1083 	for (i = 0; allowed && i < PGT_QUEUE_COUNT; i++) {
1084 		if (pgt_queue_is_tx(i))
1085 			allowed = pgt_queue_frags_pending(sc, i) == 0;
1086 		else
1087 			allowed = pgt_queue_frags_pending(sc, i) >=
1088 			    sc->sc_freeq_count[i];
1089 	}
1090 	if (!TAILQ_EMPTY(&sc->sc_mgmtinprog))
1091 		allowed = 0;
1092 	if (sc->sc_debug & SC_DEBUG_POWER)
1093 		DPRINTF(("%s: sleep interrupt (allowed = %d)\n",
1094 		    sc->sc_dev.dv_xname, allowed));
1095 	if (allowed && sc->sc_ic.ic_flags & IEEE80211_F_PMGTON) {
1096 		sc->sc_flags |= SC_POWERSAVE;
1097 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_SLEEP);
1098 		DELAY(PGT_WRITEIO_DELAY);
1099 	}
1100 }
1101 
1102 void
pgt_empty_traps(struct pgt_softc_kthread * sck)1103 pgt_empty_traps(struct pgt_softc_kthread *sck)
1104 {
1105 	struct pgt_async_trap *pa;
1106 	struct mbuf *m;
1107 
1108 	while (!TAILQ_EMPTY(&sck->sck_traps)) {
1109 		pa = TAILQ_FIRST(&sck->sck_traps);
1110 		TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1111 		m = pa->pa_mbuf;
1112 		m_freem(m);
1113 	}
1114 }
1115 
1116 void
pgt_per_device_kthread(void * argp)1117 pgt_per_device_kthread(void *argp)
1118 {
1119 	struct pgt_softc *sc;
1120 	struct pgt_softc_kthread *sck;
1121 	struct pgt_async_trap *pa;
1122 	struct mbuf *m;
1123 	int s;
1124 
1125 	sc = argp;
1126 	sck = &sc->sc_kthread;
1127 	while (!sck->sck_exit) {
1128 		if (!sck->sck_update && !sck->sck_reset &&
1129 		    TAILQ_EMPTY(&sck->sck_traps))
1130 			tsleep_nsec(&sc->sc_kthread, 0, "pgtkth", INFSLP);
1131 		if (sck->sck_reset) {
1132 			DPRINTF(("%s: [thread] async reset\n",
1133 			    sc->sc_dev.dv_xname));
1134 			sck->sck_reset = 0;
1135 			sck->sck_update = 0;
1136 			pgt_empty_traps(sck);
1137 			s = splnet();
1138 			pgt_stop(sc, SC_NEEDS_RESET);
1139 			splx(s);
1140 		} else if (!TAILQ_EMPTY(&sck->sck_traps)) {
1141 			DPRINTF(("%s: [thread] got a trap\n",
1142 			    sc->sc_dev.dv_xname));
1143 			pa = TAILQ_FIRST(&sck->sck_traps);
1144 			TAILQ_REMOVE(&sck->sck_traps, pa, pa_link);
1145 			m = pa->pa_mbuf;
1146 			m_adj(m, sizeof(*pa));
1147 			pgt_update_sw_from_hw(sc, pa, m);
1148 			m_freem(m);
1149 		} else if (sck->sck_update) {
1150 			sck->sck_update = 0;
1151 			pgt_update_sw_from_hw(sc, NULL, NULL);
1152 		}
1153 	}
1154 	pgt_empty_traps(sck);
1155 	kthread_exit(0);
1156 }
1157 
1158 void
pgt_async_reset(struct pgt_softc * sc)1159 pgt_async_reset(struct pgt_softc *sc)
1160 {
1161 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1162 		return;
1163 	sc->sc_kthread.sck_reset = 1;
1164 	wakeup(&sc->sc_kthread);
1165 }
1166 
1167 void
pgt_async_update(struct pgt_softc * sc)1168 pgt_async_update(struct pgt_softc *sc)
1169 {
1170 	if (sc->sc_flags & SC_DYING)
1171 		return;
1172 	sc->sc_kthread.sck_update = 1;
1173 	wakeup(&sc->sc_kthread);
1174 }
1175 
1176 int
pgt_intr(void * arg)1177 pgt_intr(void *arg)
1178 {
1179 	struct pgt_softc *sc;
1180 	struct ifnet *ifp;
1181 	u_int32_t reg;
1182 
1183 	sc = arg;
1184 	ifp = &sc->sc_ic.ic_if;
1185 
1186 	/*
1187 	 * Here the Linux driver ands in the value of the INT_EN register,
1188 	 * and masks off everything but the documented interrupt bits.  Why?
1189 	 *
1190 	 * Unknown bit 0x4000 is set upon initialization, 0x8000000 some
1191 	 * other times.
1192 	 */
1193 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON &&
1194 	    sc->sc_flags & SC_POWERSAVE) {
1195 		/*
1196 		 * Don't try handling the interrupt in sleep mode.
1197 		 */
1198 		reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1199 		if (reg & PGT_CTRL_STAT_SLEEPMODE)
1200 			return (0);
1201 	}
1202 	reg = pgt_read_4(sc, PGT_REG_INT_STAT);
1203 	if (reg == 0)
1204 		return (0); /* This interrupt is not from us */
1205 
1206 	pgt_write_4_flush(sc, PGT_REG_INT_ACK, reg);
1207 	if (reg & PGT_INT_STAT_INIT)
1208 		pgt_init_intr(sc);
1209 	if (reg & PGT_INT_STAT_UPDATE) {
1210 		pgt_update_intr(sc, 0);
1211 		/*
1212 		 * If we got an update, it's not really asleep.
1213 		 */
1214 		sc->sc_flags &= ~SC_POWERSAVE;
1215 		/*
1216 		 * Pretend I have any idea what the documentation
1217 		 * would say, and just give it a shot sending an
1218 		 * "update" after acknowledging the interrupt
1219 		 * bits and writing out the new control block.
1220 		 */
1221 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1222 		DELAY(PGT_WRITEIO_DELAY);
1223 	}
1224 	if (reg & PGT_INT_STAT_SLEEP && !(reg & PGT_INT_STAT_WAKEUP))
1225 		pgt_sleep_intr(sc);
1226 	if (reg & PGT_INT_STAT_WAKEUP)
1227 		pgt_wakeup_intr(sc);
1228 
1229 	if (sc->sc_flags & SC_INTR_RESET) {
1230 		sc->sc_flags &= ~SC_INTR_RESET;
1231 		pgt_async_reset(sc);
1232 	}
1233 
1234 	if (reg & ~PGT_INT_STAT_SOURCES && sc->sc_debug & SC_DEBUG_UNEXPECTED) {
1235 		DPRINTF(("%s: unknown interrupt bits %#x (stat %#x)\n",
1236 		    sc->sc_dev.dv_xname,
1237 		    reg & ~PGT_INT_STAT_SOURCES,
1238 		    pgt_read_4(sc, PGT_REG_CTRL_STAT)));
1239 	}
1240 
1241 	if (!ifq_empty(&ifp->if_snd))
1242 		pgt_start(ifp);
1243 
1244 	return (1);
1245 }
1246 
1247 void
pgt_txdone(struct pgt_softc * sc,enum pgt_queue pq)1248 pgt_txdone(struct pgt_softc *sc, enum pgt_queue pq)
1249 {
1250 	struct pgt_desc *pd;
1251 
1252 	pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
1253 	TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1254 	sc->sc_dirtyq_count[pq]--;
1255 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1256 	sc->sc_freeq_count[pq]++;
1257 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1258 	    pd->pd_dmam->dm_mapsize,
1259 	    BUS_DMASYNC_POSTREAD);
1260 	/* Management frames want completion information. */
1261 	if (sc->sc_debug & SC_DEBUG_QUEUES) {
1262 		DPRINTF(("%s: queue: tx %u <- [%u]\n",
1263 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1264 		if (sc->sc_debug & SC_DEBUG_MGMT && pgt_queue_is_mgmt(pq)) {
1265 			struct pgt_mgmt_frame *pmf;
1266 
1267 			pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1268 			DPRINTF(("%s: queue: txmgmt %p <- "
1269 			    "(ver %u, op %u, flags %#x)\n",
1270 			    sc->sc_dev.dv_xname,
1271 			    pd, pmf->pmf_version, pmf->pmf_operation,
1272 			    pmf->pmf_flags));
1273 		}
1274 	}
1275 	pgt_unload_tx_desc_frag(sc, pd);
1276 }
1277 
1278 void
pgt_rxdone(struct pgt_softc * sc,enum pgt_queue pq)1279 pgt_rxdone(struct pgt_softc *sc, enum pgt_queue pq)
1280 {
1281 	struct pgt_desc *pd;
1282 
1283 	pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
1284 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1285 	sc->sc_freeq_count[pq]--;
1286 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1287 	sc->sc_dirtyq_count[pq]++;
1288 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1289 	    pd->pd_dmam->dm_mapsize,
1290 	    BUS_DMASYNC_POSTREAD);
1291 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1292 		DPRINTF(("%s: queue: rx %u <- [%u]\n",
1293 		    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
1294 	if (sc->sc_debug & SC_DEBUG_UNEXPECTED &&
1295 	    pd->pd_fragp->pf_flags & ~htole16(PF_FLAG_MF))
1296 		DPRINTF(("%s: unknown flags on rx [%u]: %#x\n",
1297 		    sc->sc_dev.dv_xname, pq, letoh16(pd->pd_fragp->pf_flags)));
1298 }
1299 
1300 /*
1301  * Traps are generally used for the firmware to report changes in state
1302  * back to the host.  Mostly this processes changes in link state, but
1303  * it needs to also be used to initiate WPA and other authentication
1304  * schemes in terms of client (station) or server (access point).
1305  */
1306 void
pgt_trap_received(struct pgt_softc * sc,uint32_t oid,void * trapdata,size_t size)1307 pgt_trap_received(struct pgt_softc *sc, uint32_t oid, void *trapdata,
1308     size_t size)
1309 {
1310 	struct pgt_async_trap *pa;
1311 	struct mbuf *m;
1312 	char *p;
1313 	size_t total;
1314 
1315 	if (sc->sc_flags & SC_DYING)
1316 		return;
1317 
1318 	total = sizeof(oid) + size + sizeof(struct pgt_async_trap);
1319 	if (total > MLEN) {
1320 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1321 		if (m == NULL)
1322 			return;
1323 		MCLGET(m, M_DONTWAIT);
1324 		if (!(m->m_flags & M_EXT)) {
1325 			m_freem(m);
1326 			m = NULL;
1327 		}
1328 	} else
1329 		m = m_get(M_DONTWAIT, MT_DATA);
1330 
1331 	if (m == NULL)
1332 		return;
1333 	else
1334 		m->m_len = total;
1335 
1336 	pa = mtod(m, struct pgt_async_trap *);
1337 	p = mtod(m, char *) + sizeof(*pa);
1338 	*(uint32_t *)p = oid;
1339 	p += sizeof(uint32_t);
1340 	memcpy(p, trapdata, size);
1341 	pa->pa_mbuf = m;
1342 
1343 	TAILQ_INSERT_TAIL(&sc->sc_kthread.sck_traps, pa, pa_link);
1344 	wakeup(&sc->sc_kthread);
1345 }
1346 
1347 /*
1348  * Process a completed management response (all requests should be
1349  * responded to, quickly) or an event (trap).
1350  */
1351 void
pgt_mgmtrx_completion(struct pgt_softc * sc,struct pgt_mgmt_desc * pmd)1352 pgt_mgmtrx_completion(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1353 {
1354 	struct pgt_desc *pd;
1355 	struct pgt_mgmt_frame *pmf;
1356 	uint32_t oid, size;
1357 
1358 	pd = TAILQ_FIRST(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX]);
1359 	TAILQ_REMOVE(&sc->sc_dirtyq[PGT_QUEUE_MGMT_RX], pd, pd_link);
1360 	sc->sc_dirtyq_count[PGT_QUEUE_MGMT_RX]--;
1361 	TAILQ_INSERT_TAIL(&sc->sc_freeq[PGT_QUEUE_MGMT_RX],
1362 	    pd, pd_link);
1363 	sc->sc_freeq_count[PGT_QUEUE_MGMT_RX]++;
1364 	if (letoh16(pd->pd_fragp->pf_size) < sizeof(*pmf)) {
1365 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1366 			DPRINTF(("%s: mgmt desc too small: %u\n",
1367 			    sc->sc_dev.dv_xname,
1368 			    letoh16(pd->pd_fragp->pf_size)));
1369 		goto out_nopmd;
1370 	}
1371 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1372 	if (pmf->pmf_version != PMF_VER) {
1373 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1374 			DPRINTF(("%s: unknown mgmt version %u\n",
1375 			    sc->sc_dev.dv_xname, pmf->pmf_version));
1376 		goto out_nopmd;
1377 	}
1378 	if (pmf->pmf_device != PMF_DEV) {
1379 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1380 			DPRINTF(("%s: unknown mgmt dev %u\n",
1381 			    sc->sc_dev.dv_xname, pmf->pmf_device));
1382 		goto out;
1383 	}
1384 	if (pmf->pmf_flags & ~PMF_FLAG_VALID) {
1385 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1386 			DPRINTF(("%s: unknown mgmt flags %x\n",
1387 			    sc->sc_dev.dv_xname,
1388 			    pmf->pmf_flags & ~PMF_FLAG_VALID));
1389 		goto out;
1390 	}
1391 	if (pmf->pmf_flags & PMF_FLAG_LE) {
1392 		oid = letoh32(pmf->pmf_oid);
1393 		size = letoh32(pmf->pmf_size);
1394 	} else {
1395 		oid = betoh32(pmf->pmf_oid);
1396 		size = betoh32(pmf->pmf_size);
1397 	}
1398 	if (pmf->pmf_operation == PMF_OP_TRAP) {
1399 		pmd = NULL; /* ignored */
1400 		DPRINTF(("%s: mgmt trap received (op %u, oid %#x, len %u)\n",
1401 		    sc->sc_dev.dv_xname,
1402 		    pmf->pmf_operation, oid, size));
1403 		pgt_trap_received(sc, oid, (char *)pmf + sizeof(*pmf),
1404 		    min(size, PGT_FRAG_SIZE - sizeof(*pmf)));
1405 		goto out_nopmd;
1406 	}
1407 	if (pmd == NULL) {
1408 		if (sc->sc_debug & (SC_DEBUG_UNEXPECTED | SC_DEBUG_MGMT))
1409 			DPRINTF(("%s: spurious mgmt received "
1410 			    "(op %u, oid %#x, len %u)\n", sc->sc_dev.dv_xname,
1411 			    pmf->pmf_operation, oid, size));
1412 		goto out_nopmd;
1413 	}
1414 	switch (pmf->pmf_operation) {
1415 	case PMF_OP_RESPONSE:
1416 		pmd->pmd_error = 0;
1417 		break;
1418 	case PMF_OP_ERROR:
1419 		pmd->pmd_error = EPERM;
1420 		goto out;
1421 	default:
1422 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1423 			DPRINTF(("%s: unknown mgmt op %u\n",
1424 			    sc->sc_dev.dv_xname, pmf->pmf_operation));
1425 		pmd->pmd_error = EIO;
1426 		goto out;
1427 	}
1428 	if (oid != pmd->pmd_oid) {
1429 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1430 			DPRINTF(("%s: mgmt oid changed from %#x -> %#x\n",
1431 			    sc->sc_dev.dv_xname, pmd->pmd_oid, oid));
1432 		pmd->pmd_oid = oid;
1433 	}
1434 	if (pmd->pmd_recvbuf != NULL) {
1435 		if (size > PGT_FRAG_SIZE) {
1436 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1437 				DPRINTF(("%s: mgmt oid %#x has bad size %u\n",
1438 				    sc->sc_dev.dv_xname, oid, size));
1439 			pmd->pmd_error = EIO;
1440 			goto out;
1441 		}
1442 		if (size > pmd->pmd_len)
1443 			pmd->pmd_error = ENOMEM;
1444 		else
1445 			memcpy(pmd->pmd_recvbuf, (char *)pmf + sizeof(*pmf),
1446 			    size);
1447 		pmd->pmd_len = size;
1448 	}
1449 
1450 out:
1451 	TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1452 	wakeup_one(pmd);
1453 	if (sc->sc_debug & SC_DEBUG_MGMT)
1454 		DPRINTF(("%s: queue: mgmt %p <- (op %u, oid %#x, len %u)\n",
1455 		    sc->sc_dev.dv_xname, pmd, pmf->pmf_operation,
1456 		    pmd->pmd_oid, pmd->pmd_len));
1457 out_nopmd:
1458 	pgt_reinit_rx_desc_frag(sc, pd);
1459 }
1460 
1461 /*
1462  * Queue packets for reception and defragmentation.  I don't know now
1463  * whether the rx queue being full enough to start, but not finish,
1464  * queueing a fragmented packet, can happen.
1465  */
1466 struct mbuf *
pgt_datarx_completion(struct pgt_softc * sc,enum pgt_queue pq)1467 pgt_datarx_completion(struct pgt_softc *sc, enum pgt_queue pq)
1468 {
1469 	struct ifnet *ifp;
1470 	struct pgt_desc *pd;
1471 	struct mbuf *top, **mp, *m;
1472 	size_t datalen;
1473 	uint16_t morefrags, dataoff;
1474 	int tlen = 0;
1475 
1476 	ifp = &sc->sc_ic.ic_if;
1477 	m = NULL;
1478 	top = NULL;
1479 	mp = &top;
1480 
1481 	while ((pd = TAILQ_FIRST(&sc->sc_dirtyq[pq])) != NULL) {
1482 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
1483 		sc->sc_dirtyq_count[pq]--;
1484 		datalen = letoh16(pd->pd_fragp->pf_size);
1485 		dataoff = letoh32(pd->pd_fragp->pf_addr) - pd->pd_dmaaddr;
1486 		morefrags = pd->pd_fragp->pf_flags & htole16(PF_FLAG_MF);
1487 
1488 		if (sc->sc_debug & SC_DEBUG_RXFRAG)
1489 			DPRINTF(("%s: rx frag: len %u memoff %u flags %x\n",
1490 			    sc->sc_dev.dv_xname, datalen, dataoff,
1491 			    pd->pd_fragp->pf_flags));
1492 
1493 		/* Add the (two+?) bytes for the header. */
1494 		if (datalen + dataoff > PGT_FRAG_SIZE) {
1495 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1496 				DPRINTF(("%s data rx too big: %u\n",
1497 				    sc->sc_dev.dv_xname, datalen));
1498 			goto fail;
1499 		}
1500 
1501 		if (m == NULL)
1502 			MGETHDR(m, M_DONTWAIT, MT_DATA);
1503 		else
1504 			m = m_get(M_DONTWAIT, MT_DATA);
1505 
1506 		if (m == NULL)
1507 			goto fail;
1508 		if (datalen > MHLEN) {
1509 			MCLGET(m, M_DONTWAIT);
1510 			if (!(m->m_flags & M_EXT)) {
1511 				m_free(m);
1512 				goto fail;
1513 			}
1514 		}
1515 		bcopy(pd->pd_mem + dataoff, mtod(m, char *), datalen);
1516 		m->m_len = datalen;
1517 		tlen += datalen;
1518 
1519 		*mp = m;
1520 		mp = &m->m_next;
1521 
1522 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1523 		sc->sc_freeq_count[pq]++;
1524 		pgt_reinit_rx_desc_frag(sc, pd);
1525 
1526 		if (!morefrags)
1527 			break;
1528 	}
1529 
1530 	if (top) {
1531 		top->m_pkthdr.len = tlen;
1532 	}
1533 	return (top);
1534 
1535 fail:
1536 	TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
1537 	sc->sc_freeq_count[pq]++;
1538 	pgt_reinit_rx_desc_frag(sc, pd);
1539 
1540 	ifp->if_ierrors++;
1541 	m_freem(top);
1542 	return (NULL);
1543 }
1544 
1545 int
pgt_oid_get(struct pgt_softc * sc,enum pgt_oid oid,void * arg,size_t arglen)1546 pgt_oid_get(struct pgt_softc *sc, enum pgt_oid oid,
1547     void *arg, size_t arglen)
1548 {
1549 	struct pgt_mgmt_desc pmd;
1550 	int error;
1551 
1552 	bzero(&pmd, sizeof(pmd));
1553 	pmd.pmd_recvbuf = arg;
1554 	pmd.pmd_len = arglen;
1555 	pmd.pmd_oid = oid;
1556 
1557 	error = pgt_mgmt_request(sc, &pmd);
1558 	if (error == 0)
1559 		error = pmd.pmd_error;
1560 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1561 		DPRINTF(("%s: failure getting oid %#x: %d\n",
1562 		    sc->sc_dev.dv_xname, oid, error));
1563 
1564 	return (error);
1565 }
1566 
1567 int
pgt_oid_retrieve(struct pgt_softc * sc,enum pgt_oid oid,void * arg,size_t arglen)1568 pgt_oid_retrieve(struct pgt_softc *sc, enum pgt_oid oid,
1569     void *arg, size_t arglen)
1570 {
1571 	struct pgt_mgmt_desc pmd;
1572 	int error;
1573 
1574 	bzero(&pmd, sizeof(pmd));
1575 	pmd.pmd_sendbuf = arg;
1576 	pmd.pmd_recvbuf = arg;
1577 	pmd.pmd_len = arglen;
1578 	pmd.pmd_oid = oid;
1579 
1580 	error = pgt_mgmt_request(sc, &pmd);
1581 	if (error == 0)
1582 		error = pmd.pmd_error;
1583 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1584 		DPRINTF(("%s: failure retrieving oid %#x: %d\n",
1585 		    sc->sc_dev.dv_xname, oid, error));
1586 
1587 	return (error);
1588 }
1589 
1590 int
pgt_oid_set(struct pgt_softc * sc,enum pgt_oid oid,const void * arg,size_t arglen)1591 pgt_oid_set(struct pgt_softc *sc, enum pgt_oid oid,
1592     const void *arg, size_t arglen)
1593 {
1594 	struct pgt_mgmt_desc pmd;
1595 	int error;
1596 
1597 	bzero(&pmd, sizeof(pmd));
1598 	pmd.pmd_sendbuf = arg;
1599 	pmd.pmd_len = arglen;
1600 	pmd.pmd_oid = oid;
1601 
1602 	error = pgt_mgmt_request(sc, &pmd);
1603 	if (error == 0)
1604 		error = pmd.pmd_error;
1605 	if (error != 0 && error != EPERM && sc->sc_debug & SC_DEBUG_UNEXPECTED)
1606 		DPRINTF(("%s: failure setting oid %#x: %d\n",
1607 		    sc->sc_dev.dv_xname, oid, error));
1608 
1609 	return (error);
1610 }
1611 
1612 void
pgt_state_dump(struct pgt_softc * sc)1613 pgt_state_dump(struct pgt_softc *sc)
1614 {
1615 	printf("%s: state dump: control 0x%08x interrupt 0x%08x\n",
1616 	    sc->sc_dev.dv_xname,
1617 	    pgt_read_4(sc, PGT_REG_CTRL_STAT),
1618 	    pgt_read_4(sc, PGT_REG_INT_STAT));
1619 
1620 	printf("%s: state dump: driver curfrag[]\n",
1621 	    sc->sc_dev.dv_xname);
1622 
1623 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1624 	    sc->sc_dev.dv_xname,
1625 	    letoh32(sc->sc_cb->pcb_driver_curfrag[0]),
1626 	    letoh32(sc->sc_cb->pcb_driver_curfrag[1]),
1627 	    letoh32(sc->sc_cb->pcb_driver_curfrag[2]),
1628 	    letoh32(sc->sc_cb->pcb_driver_curfrag[3]),
1629 	    letoh32(sc->sc_cb->pcb_driver_curfrag[4]),
1630 	    letoh32(sc->sc_cb->pcb_driver_curfrag[5]));
1631 
1632 	printf("%s: state dump: device curfrag[]\n",
1633 	    sc->sc_dev.dv_xname);
1634 
1635 	printf("%s: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1636 	    sc->sc_dev.dv_xname,
1637 	    letoh32(sc->sc_cb->pcb_device_curfrag[0]),
1638 	    letoh32(sc->sc_cb->pcb_device_curfrag[1]),
1639 	    letoh32(sc->sc_cb->pcb_device_curfrag[2]),
1640 	    letoh32(sc->sc_cb->pcb_device_curfrag[3]),
1641 	    letoh32(sc->sc_cb->pcb_device_curfrag[4]),
1642 	    letoh32(sc->sc_cb->pcb_device_curfrag[5]));
1643 }
1644 
1645 int
pgt_mgmt_request(struct pgt_softc * sc,struct pgt_mgmt_desc * pmd)1646 pgt_mgmt_request(struct pgt_softc *sc, struct pgt_mgmt_desc *pmd)
1647 {
1648 	struct pgt_desc *pd;
1649 	struct pgt_mgmt_frame *pmf;
1650 	int error, i, ret;
1651 
1652 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
1653 		return (EIO);
1654 	if (pmd->pmd_len > PGT_FRAG_SIZE - sizeof(*pmf))
1655 		return (ENOMEM);
1656 	pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_MGMT_TX]);
1657 	if (pd == NULL)
1658 		return (ENOMEM);
1659 	error = pgt_load_tx_desc_frag(sc, PGT_QUEUE_MGMT_TX, pd);
1660 	if (error)
1661 		return (error);
1662 	pmf = (struct pgt_mgmt_frame *)pd->pd_mem;
1663 	pmf->pmf_version = PMF_VER;
1664 	/* "get" and "retrieve" operations look the same */
1665 	if (pmd->pmd_recvbuf != NULL)
1666 		pmf->pmf_operation = PMF_OP_GET;
1667 	else
1668 		pmf->pmf_operation = PMF_OP_SET;
1669 	pmf->pmf_oid = htobe32(pmd->pmd_oid);
1670 	pmf->pmf_device = PMF_DEV;
1671 	pmf->pmf_flags = 0;
1672 	pmf->pmf_size = htobe32(pmd->pmd_len);
1673 	/* "set" and "retrieve" operations both send data */
1674 	if (pmd->pmd_sendbuf != NULL)
1675 		memcpy(pmf + 1, pmd->pmd_sendbuf, pmd->pmd_len);
1676 	else
1677 		bzero(pmf + 1, pmd->pmd_len);
1678 	pmd->pmd_error = EINPROGRESS;
1679 	TAILQ_INSERT_TAIL(&sc->sc_mgmtinprog, pmd, pmd_link);
1680 	if (sc->sc_debug & SC_DEBUG_MGMT)
1681 		DPRINTF(("%s: queue: mgmt %p -> (op %u, oid %#x, len %u)\n",
1682 		    sc->sc_dev.dv_xname,
1683 		    pmd, pmf->pmf_operation,
1684 		    pmd->pmd_oid, pmd->pmd_len));
1685 	pgt_desc_transmit(sc, PGT_QUEUE_MGMT_TX, pd,
1686 	    sizeof(*pmf) + pmd->pmd_len, 0);
1687 	/*
1688 	 * Try for one second, triggering 10 times.
1689 	 *
1690 	 * Do our best to work around seemingly buggy CardBus controllers
1691 	 * on Soekris 4521 that fail to get interrupts with alarming
1692 	 * regularity: run as if an interrupt occurred and service every
1693 	 * queue except for mbuf reception.
1694 	 */
1695 	i = 0;
1696 	do {
1697 		ret = tsleep_nsec(pmd, 0, "pgtmgm", MSEC_TO_NSEC(100));
1698 		if (ret != EWOULDBLOCK)
1699 			break;
1700 		if (pmd->pmd_error != EINPROGRESS)
1701 			break;
1702 		if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET)) {
1703 			pmd->pmd_error = EIO;
1704 			TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1705 			break;
1706 		}
1707 		if (i != 9)
1708 			pgt_maybe_trigger(sc, PGT_QUEUE_MGMT_RX);
1709 #ifdef PGT_BUGGY_INTERRUPT_RECOVERY
1710 		pgt_update_intr(sc, 0);
1711 #endif
1712 	} while (i++ < 10);
1713 
1714 	if (pmd->pmd_error == EINPROGRESS) {
1715 		printf("%s: timeout waiting for management "
1716 		    "packet response to %#x\n",
1717 		    sc->sc_dev.dv_xname, pmd->pmd_oid);
1718 		TAILQ_REMOVE(&sc->sc_mgmtinprog, pmd, pmd_link);
1719 		if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1720 			pgt_state_dump(sc);
1721 		pgt_async_reset(sc);
1722 		error = ETIMEDOUT;
1723 	} else
1724 		error = 0;
1725 
1726 	return (error);
1727 }
1728 
1729 void
pgt_desc_transmit(struct pgt_softc * sc,enum pgt_queue pq,struct pgt_desc * pd,uint16_t len,int morecoming)1730 pgt_desc_transmit(struct pgt_softc *sc, enum pgt_queue pq, struct pgt_desc *pd,
1731     uint16_t len, int morecoming)
1732 {
1733 	TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
1734 	sc->sc_freeq_count[pq]--;
1735 	TAILQ_INSERT_TAIL(&sc->sc_dirtyq[pq], pd, pd_link);
1736 	sc->sc_dirtyq_count[pq]++;
1737 	if (sc->sc_debug & SC_DEBUG_QUEUES)
1738 		DPRINTF(("%s: queue: tx %u -> [%u]\n", sc->sc_dev.dv_xname,
1739 		    pd->pd_fragnum, pq));
1740 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1741 	    sc->sc_cbdmam->dm_mapsize,
1742 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
1743 	if (morecoming)
1744 		pd->pd_fragp->pf_flags |= htole16(PF_FLAG_MF);
1745 	pd->pd_fragp->pf_size = htole16(len);
1746 	bus_dmamap_sync(sc->sc_dmat, pd->pd_dmam, 0,
1747 	    pd->pd_dmam->dm_mapsize,
1748 	    BUS_DMASYNC_POSTWRITE);
1749 	sc->sc_cb->pcb_driver_curfrag[pq] =
1750 	    htole32(letoh32(sc->sc_cb->pcb_driver_curfrag[pq]) + 1);
1751 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
1752 	    sc->sc_cbdmam->dm_mapsize,
1753 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
1754 	if (!morecoming)
1755 		pgt_maybe_trigger(sc, pq);
1756 }
1757 
1758 void
pgt_maybe_trigger(struct pgt_softc * sc,enum pgt_queue pq)1759 pgt_maybe_trigger(struct pgt_softc *sc, enum pgt_queue pq)
1760 {
1761 	unsigned int tries = 1000000 / PGT_WRITEIO_DELAY; /* one second */
1762 	uint32_t reg;
1763 
1764 	if (sc->sc_debug & SC_DEBUG_TRIGGER)
1765 		DPRINTF(("%s: triggered by queue [%u]\n",
1766 		    sc->sc_dev.dv_xname, pq));
1767 	pgt_debug_events(sc, "trig");
1768 	if (sc->sc_flags & SC_POWERSAVE) {
1769 		/* Magic values ahoy? */
1770 		if (pgt_read_4(sc, PGT_REG_INT_STAT) == 0xabadface) {
1771 			do {
1772 				reg = pgt_read_4(sc, PGT_REG_CTRL_STAT);
1773 				if (!(reg & PGT_CTRL_STAT_SLEEPMODE))
1774 					DELAY(PGT_WRITEIO_DELAY);
1775 			} while (tries-- != 0);
1776 			if (!(reg & PGT_CTRL_STAT_SLEEPMODE)) {
1777 				if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
1778 					DPRINTF(("%s: timeout triggering from "
1779 					    "sleep mode\n",
1780 					    sc->sc_dev.dv_xname));
1781 				pgt_async_reset(sc);
1782 				return;
1783 			}
1784 		}
1785 		pgt_write_4_flush(sc, PGT_REG_DEV_INT,
1786 		    PGT_DEV_INT_WAKEUP);
1787 		DELAY(PGT_WRITEIO_DELAY);
1788 		/* read the status back in */
1789 		(void)pgt_read_4(sc, PGT_REG_CTRL_STAT);
1790 		DELAY(PGT_WRITEIO_DELAY);
1791 	} else {
1792 		pgt_write_4_flush(sc, PGT_REG_DEV_INT, PGT_DEV_INT_UPDATE);
1793 		DELAY(PGT_WRITEIO_DELAY);
1794 	}
1795 }
1796 
1797 struct ieee80211_node *
pgt_ieee80211_node_alloc(struct ieee80211com * ic)1798 pgt_ieee80211_node_alloc(struct ieee80211com *ic)
1799 {
1800 	struct pgt_ieee80211_node *pin;
1801 
1802 	pin = malloc(sizeof(*pin), M_DEVBUF, M_NOWAIT | M_ZERO);
1803 	if (pin != NULL) {
1804 		pin->pin_dot1x_auth = PIN_DOT1X_UNAUTHORIZED;
1805 	}
1806 	return (struct ieee80211_node *)pin;
1807 }
1808 
1809 void
pgt_ieee80211_newassoc(struct ieee80211com * ic,struct ieee80211_node * ni,int reallynew)1810 pgt_ieee80211_newassoc(struct ieee80211com *ic, struct ieee80211_node *ni,
1811     int reallynew)
1812 {
1813 	ieee80211_ref_node(ni);
1814 }
1815 
1816 void
pgt_ieee80211_node_free(struct ieee80211com * ic,struct ieee80211_node * ni)1817 pgt_ieee80211_node_free(struct ieee80211com *ic, struct ieee80211_node *ni)
1818 {
1819 	struct pgt_ieee80211_node *pin;
1820 
1821 	pin = (struct pgt_ieee80211_node *)ni;
1822 	free(pin, M_DEVBUF, 0);
1823 }
1824 
1825 void
pgt_ieee80211_node_copy(struct ieee80211com * ic,struct ieee80211_node * dst,const struct ieee80211_node * src)1826 pgt_ieee80211_node_copy(struct ieee80211com *ic, struct ieee80211_node *dst,
1827     const struct ieee80211_node *src)
1828 {
1829 	const struct pgt_ieee80211_node *psrc;
1830 	struct pgt_ieee80211_node *pdst;
1831 
1832 	psrc = (const struct pgt_ieee80211_node *)src;
1833 	pdst = (struct pgt_ieee80211_node *)dst;
1834 	bcopy(psrc, pdst, sizeof(*psrc));
1835 }
1836 
1837 int
pgt_ieee80211_send_mgmt(struct ieee80211com * ic,struct ieee80211_node * ni,int type,int arg1,int arg2)1838 pgt_ieee80211_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni,
1839     int type, int arg1, int arg2)
1840 {
1841 	return (EOPNOTSUPP);
1842 }
1843 
1844 int
pgt_net_attach(struct pgt_softc * sc)1845 pgt_net_attach(struct pgt_softc *sc)
1846 {
1847 	struct ieee80211com *ic = &sc->sc_ic;
1848 	struct ifnet *ifp = &ic->ic_if;
1849 	struct ieee80211_rateset *rs;
1850 	uint8_t rates[IEEE80211_RATE_MAXSIZE];
1851 	struct pgt_obj_buffer psbuffer;
1852 	struct pgt_obj_frequencies *freqs;
1853 	uint32_t phymode, country;
1854 	unsigned int chan, i, j, firstchan = -1;
1855 	int error;
1856 
1857 	psbuffer.pob_size = htole32(PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT);
1858 	psbuffer.pob_addr = htole32(sc->sc_psmdmam->dm_segs[0].ds_addr);
1859 	error = pgt_oid_set(sc, PGT_OID_PSM_BUFFER, &psbuffer, sizeof(country));
1860 	if (error)
1861 		return (error);
1862 	error = pgt_oid_get(sc, PGT_OID_PHY, &phymode, sizeof(phymode));
1863 	if (error)
1864 		return (error);
1865 	error = pgt_oid_get(sc, PGT_OID_MAC_ADDRESS, ic->ic_myaddr,
1866 	    sizeof(ic->ic_myaddr));
1867 	if (error)
1868 		return (error);
1869 	error = pgt_oid_get(sc, PGT_OID_COUNTRY, &country, sizeof(country));
1870 	if (error)
1871 		return (error);
1872 
1873 	ifp->if_softc = sc;
1874 	ifp->if_ioctl = pgt_ioctl;
1875 	ifp->if_start = pgt_start;
1876 	ifp->if_watchdog = pgt_watchdog;
1877 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
1878 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
1879 
1880 	ifq_init_maxlen(&ifp->if_snd, IFQ_MAXLEN);
1881 
1882 	/*
1883 	 * Set channels
1884 	 *
1885 	 * Prism hardware likes to report supported frequencies that are
1886 	 * not actually available for the country of origin.
1887 	 */
1888 	j = sizeof(*freqs) + (IEEE80211_CHAN_MAX + 1) * sizeof(uint16_t);
1889 	freqs = malloc(j, M_DEVBUF, M_WAITOK);
1890 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_FREQUENCIES, freqs, j);
1891 	if (error) {
1892 		free(freqs, M_DEVBUF, 0);
1893 		return (error);
1894 	}
1895 
1896 	for (i = 0, j = letoh16(freqs->pof_count); i < j; i++) {
1897 		chan = ieee80211_mhz2ieee(letoh16(freqs->pof_freqlist_mhz[i]),
1898 		    0);
1899 
1900 		if (chan > IEEE80211_CHAN_MAX) {
1901 			printf("%s: reported bogus channel (%uMHz)\n",
1902 			    sc->sc_dev.dv_xname, chan);
1903 			free(freqs, M_DEVBUF, 0);
1904 			return (EIO);
1905 		}
1906 
1907 		if (letoh16(freqs->pof_freqlist_mhz[i]) < 5000) {
1908 			if (!(phymode & htole32(PGT_OID_PHY_2400MHZ)))
1909 				continue;
1910 			if (country == letoh32(PGT_COUNTRY_USA)) {
1911 				if (chan >= 12 && chan <= 14)
1912 					continue;
1913 			}
1914 			if (chan <= 14)
1915 				ic->ic_channels[chan].ic_flags |=
1916 				    IEEE80211_CHAN_B;
1917 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_PUREG;
1918 		} else {
1919 			if (!(phymode & htole32(PGT_OID_PHY_5000MHZ)))
1920 				continue;
1921 			ic->ic_channels[chan].ic_flags |= IEEE80211_CHAN_A;
1922 		}
1923 
1924 		ic->ic_channels[chan].ic_freq =
1925 		    letoh16(freqs->pof_freqlist_mhz[i]);
1926 
1927 		if (firstchan == -1)
1928 			firstchan = chan;
1929 
1930 		DPRINTF(("%s: set channel %d to freq %uMHz\n",
1931 		    sc->sc_dev.dv_xname, chan,
1932 		    letoh16(freqs->pof_freqlist_mhz[i])));
1933 	}
1934 	free(freqs, M_DEVBUF, 0);
1935 	if (firstchan == -1) {
1936 		printf("%s: no channels found\n", sc->sc_dev.dv_xname);
1937 		return (EIO);
1938 	}
1939 
1940 	/*
1941 	 * Set rates
1942 	 */
1943 	bzero(rates, sizeof(rates));
1944 	error = pgt_oid_get(sc, PGT_OID_SUPPORTED_RATES, rates, sizeof(rates));
1945 	if (error)
1946 		return (error);
1947 	for (i = 0; i < sizeof(rates) && rates[i] != 0; i++) {
1948 		switch (rates[i]) {
1949 		case 2:
1950 		case 4:
1951 		case 11:
1952 		case 22:
1953 		case 44: /* maybe */
1954 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1955 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11B];
1956 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1957 			}
1958 		default:
1959 			if (phymode & htole32(PGT_OID_PHY_2400MHZ)) {
1960 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
1961 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1962 			}
1963 			if (phymode & htole32(PGT_OID_PHY_5000MHZ)) {
1964 				rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
1965 				rs->rs_rates[rs->rs_nrates++] = rates[i];
1966 			}
1967 			rs = &ic->ic_sup_rates[IEEE80211_MODE_AUTO];
1968 			rs->rs_rates[rs->rs_nrates++] = rates[i];
1969 		}
1970 	}
1971 
1972 	ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_PMGT | IEEE80211_C_TXPMGT |
1973 	    IEEE80211_C_SHSLOT | IEEE80211_C_SHPREAMBLE | IEEE80211_C_MONITOR;
1974 #ifndef IEEE80211_STA_ONLY
1975 	ic->ic_caps |= IEEE80211_C_IBSS | IEEE80211_C_HOSTAP;
1976 #endif
1977 	ic->ic_opmode = IEEE80211_M_STA;
1978 	ic->ic_state = IEEE80211_S_INIT;
1979 
1980 	if_attach(ifp);
1981 	ieee80211_ifattach(ifp);
1982 
1983 	/* setup post-attach/pre-lateattach vector functions */
1984 	sc->sc_newstate = ic->ic_newstate;
1985 	ic->ic_newstate = pgt_newstate;
1986 	ic->ic_node_alloc = pgt_ieee80211_node_alloc;
1987 	ic->ic_newassoc = pgt_ieee80211_newassoc;
1988 	ic->ic_node_free = pgt_ieee80211_node_free;
1989 	ic->ic_node_copy = pgt_ieee80211_node_copy;
1990 	ic->ic_send_mgmt = pgt_ieee80211_send_mgmt;
1991 	ic->ic_max_rssi = 255;	/* rssi is a u_int8_t */
1992 
1993 	/* let net80211 handle switching around the media + resetting */
1994 	ieee80211_media_init(ifp, pgt_media_change, pgt_media_status);
1995 
1996 #if NBPFILTER > 0
1997 	bpfattach(&sc->sc_drvbpf, ifp, DLT_IEEE802_11_RADIO,
1998 	    sizeof(struct ieee80211_frame) + 64);
1999 
2000 	sc->sc_rxtap_len = sizeof(sc->sc_rxtapu);
2001 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
2002 	sc->sc_rxtap.wr_ihdr.it_present = htole32(PGT_RX_RADIOTAP_PRESENT);
2003 
2004 	sc->sc_txtap_len = sizeof(sc->sc_txtapu);
2005 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
2006 	sc->sc_txtap.wt_ihdr.it_present = htole32(PGT_TX_RADIOTAP_PRESENT);
2007 #endif
2008 	return (0);
2009 }
2010 
2011 int
pgt_media_change(struct ifnet * ifp)2012 pgt_media_change(struct ifnet *ifp)
2013 {
2014 	struct pgt_softc *sc = ifp->if_softc;
2015 	int error;
2016 
2017         error = ieee80211_media_change(ifp);
2018         if (error == ENETRESET) {
2019                 pgt_update_hw_from_sw(sc, 0);
2020                 error = 0;
2021         }
2022 
2023         return (error);
2024 }
2025 
2026 void
pgt_media_status(struct ifnet * ifp,struct ifmediareq * imr)2027 pgt_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2028 {
2029 	struct pgt_softc *sc = ifp->if_softc;
2030 	struct ieee80211com *ic = &sc->sc_ic;
2031 	uint32_t rate;
2032 	int s;
2033 
2034 	imr->ifm_status = 0;
2035 	imr->ifm_active = IFM_IEEE80211 | IFM_NONE;
2036 
2037 	if (!(ifp->if_flags & IFF_UP))
2038 		return;
2039 
2040 	s = splnet();
2041 
2042 	if (ic->ic_fixed_rate != -1) {
2043 		rate = ic->ic_sup_rates[ic->ic_curmode].
2044 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
2045 	} else {
2046 		if (pgt_oid_get(sc, PGT_OID_LINK_STATE, &rate, sizeof(rate)))
2047 			goto out;
2048 		rate = letoh32(rate);
2049 		if (sc->sc_debug & SC_DEBUG_LINK) {
2050 			DPRINTF(("%s: %s: link rate %u\n",
2051 			    sc->sc_dev.dv_xname, __func__, rate));
2052 		}
2053 		if (rate == 0)
2054 			goto out;
2055 	}
2056 
2057 	imr->ifm_status = IFM_AVALID;
2058 	imr->ifm_active = IFM_IEEE80211;
2059 	if (ic->ic_state == IEEE80211_S_RUN)
2060 		imr->ifm_status |= IFM_ACTIVE;
2061 
2062 	imr->ifm_active |= ieee80211_rate2media(ic, rate, ic->ic_curmode);
2063 
2064 	switch (ic->ic_opmode) {
2065 	case IEEE80211_M_STA:
2066 		break;
2067 #ifndef IEEE80211_STA_ONLY
2068 	case IEEE80211_M_IBSS:
2069 		imr->ifm_active |= IFM_IEEE80211_ADHOC;
2070 		break;
2071 	case IEEE80211_M_AHDEMO:
2072 		imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
2073 		break;
2074 	case IEEE80211_M_HOSTAP:
2075 		imr->ifm_active |= IFM_IEEE80211_HOSTAP;
2076 		break;
2077 #endif
2078 	case IEEE80211_M_MONITOR:
2079 		imr->ifm_active |= IFM_IEEE80211_MONITOR;
2080 		break;
2081 	default:
2082 		break;
2083 	}
2084 
2085 out:
2086 	splx(s);
2087 }
2088 
2089 /*
2090  * Start data frames.  Critical sections surround the boundary of
2091  * management frame transmission / transmission acknowledgement / response
2092  * and data frame transmission / transmission acknowledgement.
2093  */
2094 void
pgt_start(struct ifnet * ifp)2095 pgt_start(struct ifnet *ifp)
2096 {
2097 	struct pgt_softc *sc;
2098 	struct ieee80211com *ic;
2099 	struct pgt_desc *pd;
2100 	struct mbuf *m;
2101 	int error;
2102 
2103 	sc = ifp->if_softc;
2104 	ic = &sc->sc_ic;
2105 
2106 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET) ||
2107 	    !(ifp->if_flags & IFF_RUNNING) ||
2108 	    ic->ic_state != IEEE80211_S_RUN) {
2109 		return;
2110 	}
2111 
2112 	/*
2113 	 * Management packets should probably be MLME frames
2114 	 * (i.e. hostap "managed" mode); we don't touch the
2115 	 * net80211 management queue.
2116 	 */
2117 	for (; sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] <
2118 	    PGT_QUEUE_FULL_THRESHOLD && !ifq_empty(&ifp->if_snd);) {
2119 		pd = TAILQ_FIRST(&sc->sc_freeq[PGT_QUEUE_DATA_LOW_TX]);
2120 		m = ifq_deq_begin(&ifp->if_snd);
2121 		if (m == NULL)
2122 			break;
2123 		if (m->m_pkthdr.len <= PGT_FRAG_SIZE) {
2124 			error = pgt_load_tx_desc_frag(sc,
2125 			    PGT_QUEUE_DATA_LOW_TX, pd);
2126 			if (error) {
2127 				ifq_deq_rollback(&ifp->if_snd, m);
2128 				break;
2129 			}
2130 			ifq_deq_commit(&ifp->if_snd, m);
2131 			m_copydata(m, 0, m->m_pkthdr.len, pd->pd_mem);
2132 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2133 			    pd, m->m_pkthdr.len, 0);
2134 		} else if (m->m_pkthdr.len <= PGT_FRAG_SIZE * 2) {
2135 			struct pgt_desc *pd2;
2136 
2137 			/*
2138 			 * Transmit a fragmented frame if there is
2139 			 * not enough room in one fragment; limit
2140 			 * to two fragments (802.11 itself couldn't
2141 			 * even support a full two.)
2142 			 */
2143 			if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] + 2 >
2144 			    PGT_QUEUE_FULL_THRESHOLD) {
2145 				ifq_deq_rollback(&ifp->if_snd, m);
2146 				break;
2147 			}
2148 			pd2 = TAILQ_NEXT(pd, pd_link);
2149 			error = pgt_load_tx_desc_frag(sc,
2150 			    PGT_QUEUE_DATA_LOW_TX, pd);
2151 			if (error == 0) {
2152 				error = pgt_load_tx_desc_frag(sc,
2153 				    PGT_QUEUE_DATA_LOW_TX, pd2);
2154 				if (error) {
2155 					pgt_unload_tx_desc_frag(sc, pd);
2156 					TAILQ_INSERT_HEAD(&sc->sc_freeq[
2157 					    PGT_QUEUE_DATA_LOW_TX], pd,
2158 					    pd_link);
2159 				}
2160 			}
2161 			if (error) {
2162 				ifq_deq_rollback(&ifp->if_snd, m);
2163 				break;
2164 			}
2165 			ifq_deq_commit(&ifp->if_snd, m);
2166 			m_copydata(m, 0, PGT_FRAG_SIZE, pd->pd_mem);
2167 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2168 			    pd, PGT_FRAG_SIZE, 1);
2169 			m_copydata(m, PGT_FRAG_SIZE,
2170 			    m->m_pkthdr.len - PGT_FRAG_SIZE, pd2->pd_mem);
2171 			pgt_desc_transmit(sc, PGT_QUEUE_DATA_LOW_TX,
2172 			    pd2, m->m_pkthdr.len - PGT_FRAG_SIZE, 0);
2173 		} else {
2174 			ifq_deq_commit(&ifp->if_snd, m);
2175 			ifp->if_oerrors++;
2176 			m_freem(m);
2177 			m = NULL;
2178 		}
2179 		if (m != NULL) {
2180 			struct ieee80211_node *ni;
2181 #if NBPFILTER > 0
2182 			if (ifp->if_bpf != NULL)
2183 				bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2184 #endif
2185 			ifp->if_timer = 1;
2186 			sc->sc_txtimer = 5;
2187 			ni = ieee80211_find_txnode(&sc->sc_ic,
2188 			    mtod(m, struct ether_header *)->ether_dhost);
2189 			if (ni != NULL) {
2190 				ni->ni_inact = 0;
2191 				if (ni != ic->ic_bss)
2192 					ieee80211_release_node(&sc->sc_ic, ni);
2193 			}
2194 #if NBPFILTER > 0
2195 			if (sc->sc_drvbpf != NULL) {
2196 				struct mbuf mb;
2197 				struct ether_header eh;
2198 				struct pgt_tx_radiotap_hdr *tap = &sc->sc_txtap;
2199 
2200 				bcopy(mtod(m, struct ether_header *), &eh,
2201 				    sizeof(eh));
2202 				m_adj(m, sizeof(eh));
2203 				m = pgt_ieee80211_encap(sc, &eh, m, NULL);
2204 
2205 				tap->wt_flags = 0;
2206 				//tap->wt_rate = rate;
2207 				tap->wt_rate = 0;
2208 				tap->wt_chan_freq =
2209 				    htole16(ic->ic_bss->ni_chan->ic_freq);
2210 				tap->wt_chan_flags =
2211 				    htole16(ic->ic_bss->ni_chan->ic_flags);
2212 
2213 				if (m != NULL) {
2214 					mb.m_data = (caddr_t)tap;
2215 					mb.m_len = sc->sc_txtap_len;
2216 					mb.m_next = m;
2217 					mb.m_nextpkt = NULL;
2218 					mb.m_type = 0;
2219 					mb.m_flags = 0;
2220 
2221 					bpf_mtap(sc->sc_drvbpf, &mb,
2222 					    BPF_DIRECTION_OUT);
2223 				}
2224 			}
2225 #endif
2226 			m_freem(m);
2227 		}
2228 	}
2229 }
2230 
2231 int
pgt_ioctl(struct ifnet * ifp,u_long cmd,caddr_t req)2232 pgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req)
2233 {
2234 	struct pgt_softc *sc = ifp->if_softc;
2235 	struct ifreq *ifr;
2236 	struct wi_req *wreq;
2237 	struct ieee80211_nodereq_all *na;
2238 	struct ieee80211com *ic;
2239         struct pgt_obj_bsslist *pob;
2240         struct wi_scan_p2_hdr *p2hdr;
2241         struct wi_scan_res *res;
2242         uint32_t noise;
2243 	int maxscan, i, j, s, error = 0;
2244 
2245 	ic = &sc->sc_ic;
2246 	ifr = (struct ifreq *)req;
2247 
2248 	s = splnet();
2249 	switch (cmd) {
2250 	case SIOCS80211SCAN:
2251 		/*
2252 		 * This chip scans always as soon as it gets initialized.
2253 		 */
2254 		break;
2255 	case SIOCG80211ALLNODES: {
2256 		struct ieee80211_nodereq *nr = NULL;
2257 		na = (struct ieee80211_nodereq_all *)req;
2258 		wreq = malloc(sizeof(*wreq), M_DEVBUF, M_WAITOK | M_ZERO);
2259 
2260 		maxscan = PGT_OBJ_BSSLIST_NBSS;
2261 		pob = malloc(sizeof(*pob) +
2262 		    sizeof(struct pgt_obj_bss) * maxscan, M_DEVBUF, M_WAITOK);
2263 		error = pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise,
2264 		    sizeof(noise));
2265 
2266 		if (error == 0) {
2267 			noise = letoh32(noise);
2268 			error = pgt_oid_get(sc, PGT_OID_BSS_LIST, pob,
2269 			    sizeof(*pob) +
2270 			    sizeof(struct pgt_obj_bss) * maxscan);
2271 		}
2272 
2273 		if (error == 0) {
2274 			maxscan = min(PGT_OBJ_BSSLIST_NBSS,
2275 			    letoh32(pob->pob_count));
2276 			maxscan = min(maxscan,
2277 			    (sizeof(wreq->wi_val) - sizeof(*p2hdr)) /
2278 			    WI_PRISM2_RES_SIZE);
2279 			p2hdr = (struct wi_scan_p2_hdr *)&wreq->wi_val;
2280 			p2hdr->wi_rsvd = 0;
2281 			p2hdr->wi_reason = 1;
2282 			wreq->wi_len = (maxscan * WI_PRISM2_RES_SIZE) / 2 +
2283 			    sizeof(*p2hdr) / 2;
2284 			wreq->wi_type = WI_RID_SCAN_RES;
2285 		}
2286 
2287 		for (na->na_nodes = j = i = 0; i < maxscan &&
2288 		    (na->na_size >= j + sizeof(struct ieee80211_nodereq));
2289 		    i++) {
2290 			/* allocate node space */
2291 			if (nr == NULL)
2292 				nr = malloc(sizeof(*nr), M_DEVBUF, M_WAITOK);
2293 
2294 			/* get next BSS scan result */
2295 			res = (struct wi_scan_res *)
2296 			    ((char *)&wreq->wi_val + sizeof(*p2hdr) +
2297 			    i * WI_PRISM2_RES_SIZE);
2298 			pgt_obj_bss2scanres(sc, &pob->pob_bsslist[i],
2299 			    res, noise);
2300 
2301 			/* copy it to node structure for ifconfig to read */
2302 			bzero(nr, sizeof(*nr));
2303 			IEEE80211_ADDR_COPY(nr->nr_macaddr, res->wi_bssid);
2304 			IEEE80211_ADDR_COPY(nr->nr_bssid, res->wi_bssid);
2305 			nr->nr_channel = letoh16(res->wi_chan);
2306 			nr->nr_chan_flags = IEEE80211_CHAN_B;
2307 			nr->nr_rssi = letoh16(res->wi_signal);
2308 			nr->nr_max_rssi = 0; /* XXX */
2309 			nr->nr_nwid_len = letoh16(res->wi_ssid_len);
2310 			bcopy(res->wi_ssid, nr->nr_nwid, nr->nr_nwid_len);
2311 			nr->nr_intval = letoh16(res->wi_interval);
2312 			nr->nr_capinfo = letoh16(res->wi_capinfo);
2313 			nr->nr_txrate = res->wi_rate == WI_WAVELAN_RES_1M ? 2 :
2314 			    (res->wi_rate == WI_WAVELAN_RES_2M ? 4 :
2315 			    (res->wi_rate == WI_WAVELAN_RES_5M ? 11 :
2316 			    (res->wi_rate == WI_WAVELAN_RES_11M ? 22 : 0)));
2317 			nr->nr_nrates = 0;
2318 			while (res->wi_srates[nr->nr_nrates] != 0) {
2319 				nr->nr_rates[nr->nr_nrates] =
2320 				    res->wi_srates[nr->nr_nrates] &
2321 				    WI_VAR_SRATES_MASK;
2322 				nr->nr_nrates++;
2323 			}
2324 			nr->nr_flags = 0;
2325 			if (bcmp(nr->nr_macaddr, nr->nr_bssid,
2326 			    IEEE80211_ADDR_LEN) == 0)
2327 				nr->nr_flags |= IEEE80211_NODEREQ_AP;
2328 			error = copyout(nr, (caddr_t)na->na_node + j,
2329 			    sizeof(struct ieee80211_nodereq));
2330 			if (error)
2331 				break;
2332 
2333 			/* point to next node entry */
2334 			j += sizeof(struct ieee80211_nodereq);
2335 			na->na_nodes++;
2336 		}
2337 		if (nr)
2338 			free(nr, M_DEVBUF, 0);
2339 		free(pob, M_DEVBUF, 0);
2340 		free(wreq, M_DEVBUF, 0);
2341 		break;
2342 	}
2343 	case SIOCSIFADDR:
2344 		ifp->if_flags |= IFF_UP;
2345 		/* FALLTHROUGH */
2346 	case SIOCSIFFLAGS:
2347 		if (ifp->if_flags & IFF_UP) {
2348 			if ((ifp->if_flags & IFF_RUNNING) == 0) {
2349 				pgt_init(ifp);
2350 				error = ENETRESET;
2351 			}
2352 		} else {
2353 			if (ifp->if_flags & IFF_RUNNING) {
2354 				pgt_stop(sc, SC_NEEDS_RESET);
2355 				error = ENETRESET;
2356 			}
2357 		}
2358 		break;
2359 	case SIOCSIFMTU:
2360 		if (ifr->ifr_mtu > PGT_FRAG_SIZE) {
2361 			error = EINVAL;
2362 			break;
2363 		}
2364 		/* FALLTHROUGH */
2365 	default:
2366 		error = ieee80211_ioctl(ifp, cmd, req);
2367 		break;
2368 	}
2369 
2370 	if (error == ENETRESET) {
2371 		pgt_update_hw_from_sw(sc, 0);
2372 		error = 0;
2373 	}
2374 	splx(s);
2375 
2376 	return (error);
2377 }
2378 
2379 void
pgt_obj_bss2scanres(struct pgt_softc * sc,struct pgt_obj_bss * pob,struct wi_scan_res * scanres,uint32_t noise)2380 pgt_obj_bss2scanres(struct pgt_softc *sc, struct pgt_obj_bss *pob,
2381     struct wi_scan_res *scanres, uint32_t noise)
2382 {
2383 	struct ieee80211_rateset *rs;
2384 	struct wi_scan_res ap;
2385 	unsigned int i, n;
2386 
2387 	rs = &sc->sc_ic.ic_sup_rates[IEEE80211_MODE_AUTO];
2388 	bzero(&ap, sizeof(ap));
2389 	ap.wi_chan = ieee80211_mhz2ieee(letoh16(pob->pob_channel), 0);
2390 	ap.wi_noise = noise;
2391 	ap.wi_signal = letoh16(pob->pob_rssi);
2392 	IEEE80211_ADDR_COPY(ap.wi_bssid, pob->pob_address);
2393 	ap.wi_interval = letoh16(pob->pob_beacon_period);
2394 	ap.wi_capinfo = letoh16(pob->pob_capinfo);
2395 	ap.wi_ssid_len = min(sizeof(ap.wi_ssid), pob->pob_ssid.pos_length);
2396 	memcpy(ap.wi_ssid, pob->pob_ssid.pos_ssid, ap.wi_ssid_len);
2397 	n = 0;
2398 	for (i = 0; i < 16; i++) {
2399 		if (letoh16(pob->pob_rates) & (1 << i)) {
2400 			if (i >= rs->rs_nrates)
2401 				break;
2402 			ap.wi_srates[n++] = ap.wi_rate = rs->rs_rates[i];
2403 			if (n >= sizeof(ap.wi_srates) / sizeof(ap.wi_srates[0]))
2404 				break;
2405 		}
2406 	}
2407 	memcpy(scanres, &ap, WI_PRISM2_RES_SIZE);
2408 }
2409 
2410 void
node_mark_active_ap(void * arg,struct ieee80211_node * ni)2411 node_mark_active_ap(void *arg, struct ieee80211_node *ni)
2412 {
2413 	/*
2414 	 * HostAP mode lets all nodes stick around unless
2415 	 * the firmware AP kicks them off.
2416 	 */
2417 	ni->ni_inact = 0;
2418 }
2419 
2420 void
node_mark_active_adhoc(void * arg,struct ieee80211_node * ni)2421 node_mark_active_adhoc(void *arg, struct ieee80211_node *ni)
2422 {
2423 	struct pgt_ieee80211_node *pin;
2424 
2425 	/*
2426 	 * As there is no association in ad-hoc, we let links just
2427 	 * time out naturally as long they are not holding any private
2428 	 * configuration, such as 802.1x authorization.
2429 	 */
2430 	pin = (struct pgt_ieee80211_node *)ni;
2431 	if (pin->pin_dot1x_auth == PIN_DOT1X_AUTHORIZED)
2432 		pin->pin_node.ni_inact = 0;
2433 }
2434 
2435 void
pgt_watchdog(struct ifnet * ifp)2436 pgt_watchdog(struct ifnet *ifp)
2437 {
2438 	struct pgt_softc *sc;
2439 
2440 	sc = ifp->if_softc;
2441 	/*
2442 	 * Check for timed out transmissions (and make sure to set
2443 	 * this watchdog to fire again if there is still data in the
2444 	 * output device queue).
2445 	 */
2446 	if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] != 0) {
2447 		int count;
2448 
2449 		ifp->if_timer = 1;
2450 		if (sc->sc_txtimer && --sc->sc_txtimer == 0) {
2451 			count = pgt_drain_tx_queue(sc, PGT_QUEUE_DATA_LOW_TX);
2452 			if (sc->sc_debug & SC_DEBUG_UNEXPECTED)
2453 				DPRINTF(("%s: timeout %d data transmissions\n",
2454 				    sc->sc_dev.dv_xname, count));
2455 		}
2456 	}
2457 	if (sc->sc_flags & (SC_DYING | SC_NEEDS_RESET))
2458 		return;
2459 	/*
2460 	 * If we're going to kick the device out of power-save mode
2461 	 * just to update the BSSID and such, we should not do it
2462 	 * very often; need to determine in what way to do that.
2463 	 */
2464 	if (ifp->if_flags & IFF_RUNNING &&
2465 	    sc->sc_ic.ic_state != IEEE80211_S_INIT &&
2466 	    sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR)
2467 		pgt_async_update(sc);
2468 
2469 #ifndef IEEE80211_STA_ONLY
2470 	/*
2471 	 * As a firmware-based HostAP, we should not time out
2472 	 * nodes inside the driver additionally to the timeout
2473 	 * that exists in the firmware.  The only things we
2474 	 * should have to deal with timing out when doing HostAP
2475 	 * are the privacy-related.
2476 	 */
2477 	switch (sc->sc_ic.ic_opmode) {
2478 	case IEEE80211_M_HOSTAP:
2479 		ieee80211_iterate_nodes(&sc->sc_ic,
2480 		    node_mark_active_ap, NULL);
2481 		break;
2482 	case IEEE80211_M_IBSS:
2483 		ieee80211_iterate_nodes(&sc->sc_ic,
2484 		    node_mark_active_adhoc, NULL);
2485 		break;
2486 	default:
2487 		break;
2488 	}
2489 #endif
2490 	ieee80211_watchdog(ifp);
2491 	ifp->if_timer = 1;
2492 }
2493 
2494 int
pgt_init(struct ifnet * ifp)2495 pgt_init(struct ifnet *ifp)
2496 {
2497 	struct pgt_softc *sc = ifp->if_softc;
2498 	struct ieee80211com *ic = &sc->sc_ic;
2499 
2500 	/* set default channel */
2501 	ic->ic_bss->ni_chan = ic->ic_ibss_chan;
2502 
2503 	if (!(sc->sc_flags & (SC_DYING | SC_UNINITIALIZED)))
2504 		pgt_update_hw_from_sw(sc,
2505 		    ic->ic_state != IEEE80211_S_INIT);
2506 
2507 	ifp->if_flags |= IFF_RUNNING;
2508 	ifq_clr_oactive(&ifp->if_snd);
2509 
2510 	/* Begin background scanning */
2511 	ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1);
2512 
2513 	return (0);
2514 }
2515 
2516 /*
2517  * After most every configuration change, everything needs to be fully
2518  * reinitialized.  For some operations (currently, WEP settings
2519  * in ad-hoc+802.1x mode), the change is "soft" and doesn't remove
2520  * "associations," and allows EAP authorization to occur again.
2521  * If keepassoc is specified, the reset operation should try to go
2522  * back to the BSS had before.
2523  */
2524 void
pgt_update_hw_from_sw(struct pgt_softc * sc,int keepassoc)2525 pgt_update_hw_from_sw(struct pgt_softc *sc, int keepassoc)
2526 {
2527 	struct ieee80211com *ic = &sc->sc_ic;
2528 	struct arpcom *ac = &ic->ic_ac;
2529 	struct ifnet *ifp = &ac->ac_if;
2530 	struct pgt_obj_key keyobj;
2531 	struct pgt_obj_ssid essid;
2532 	uint8_t availrates[IEEE80211_RATE_MAXSIZE + 1];
2533 	uint32_t mode, bsstype, config, profile, channel, slot, preamble;
2534 	uint32_t wep, exunencrypted, wepkey, dot1x, auth, mlme;
2535 	unsigned int i;
2536 	int success, shouldbeup, s;
2537 
2538 	config = PGT_CONFIG_MANUAL_RUN | PGT_CONFIG_RX_ANNEX;
2539 
2540 	/*
2541 	 * Promiscuous mode is currently a no-op since packets transmitted,
2542 	 * while in promiscuous mode, don't ever seem to go anywhere.
2543 	 */
2544 	shouldbeup = ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_UP;
2545 
2546 	if (shouldbeup) {
2547 		switch (ic->ic_opmode) {
2548 		case IEEE80211_M_STA:
2549 			if (ifp->if_flags & IFF_PROMISC)
2550 				mode = PGT_MODE_CLIENT;	/* what to do? */
2551 			else
2552 				mode = PGT_MODE_CLIENT;
2553 			bsstype = PGT_BSS_TYPE_STA;
2554 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2555 			break;
2556 #ifndef IEEE80211_STA_ONLY
2557 		case IEEE80211_M_IBSS:
2558 			if (ifp->if_flags & IFF_PROMISC)
2559 				mode = PGT_MODE_CLIENT;	/* what to do? */
2560 			else
2561 				mode = PGT_MODE_CLIENT;
2562 			bsstype = PGT_BSS_TYPE_IBSS;
2563 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2564 			break;
2565 		case IEEE80211_M_HOSTAP:
2566 			mode = PGT_MODE_AP;
2567 			bsstype = PGT_BSS_TYPE_STA;
2568 			/*
2569 			 * For IEEE 802.1x, we need to authenticate and
2570 			 * authorize hosts from here on or they remain
2571 			 * associated but without the ability to send or
2572 			 * receive normal traffic to us (courtesy the
2573 			 * firmware AP implementation).
2574 			 */
2575 			dot1x = PGT_DOT1X_AUTH_ENABLED;
2576 			/*
2577 			 * WDS mode needs several things to work:
2578 			 * discovery of exactly how creating the WDS
2579 			 * links is meant to function, an interface
2580 			 * for this, and ability to encode or decode
2581 			 * the WDS frames.
2582 			 */
2583 			if (sc->sc_wds)
2584 				config |= PGT_CONFIG_WDS;
2585 			break;
2586 #endif
2587 		case IEEE80211_M_MONITOR:
2588 			mode = PGT_MODE_PROMISCUOUS;
2589 			bsstype = PGT_BSS_TYPE_ANY;
2590 			dot1x = PGT_DOT1X_AUTH_NONE;
2591 			break;
2592 		default:
2593 			goto badopmode;
2594 		}
2595 	} else {
2596 badopmode:
2597 		mode = PGT_MODE_CLIENT;
2598 		bsstype = PGT_BSS_TYPE_NONE;
2599 	}
2600 
2601 	DPRINTF(("%s: current mode is ", sc->sc_dev.dv_xname));
2602 	switch (ic->ic_curmode) {
2603 	case IEEE80211_MODE_11A:
2604 		profile = PGT_PROFILE_A_ONLY;
2605 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2606 		DPRINTF(("IEEE80211_MODE_11A\n"));
2607 		break;
2608 	case IEEE80211_MODE_11B:
2609 		profile = PGT_PROFILE_B_ONLY;
2610 		preamble = PGT_OID_PREAMBLE_MODE_LONG;
2611 		DPRINTF(("IEEE80211_MODE_11B\n"));
2612 		break;
2613 	case IEEE80211_MODE_11G:
2614 		profile = PGT_PROFILE_G_ONLY;
2615 		preamble = PGT_OID_PREAMBLE_MODE_SHORT;
2616 		DPRINTF(("IEEE80211_MODE_11G\n"));
2617 		break;
2618 	case IEEE80211_MODE_AUTO:
2619 		profile = PGT_PROFILE_MIXED_G_WIFI;
2620 		preamble = PGT_OID_PREAMBLE_MODE_DYNAMIC;
2621 		DPRINTF(("IEEE80211_MODE_AUTO\n"));
2622 		break;
2623 	default:
2624 		panic("unknown mode %d", ic->ic_curmode);
2625 	}
2626 
2627 	switch (sc->sc_80211_ioc_auth) {
2628 	case IEEE80211_AUTH_NONE:
2629 		auth = PGT_AUTH_MODE_NONE;
2630 		break;
2631 	case IEEE80211_AUTH_OPEN:
2632 		auth = PGT_AUTH_MODE_OPEN;
2633 		break;
2634 	default:
2635 		auth = PGT_AUTH_MODE_SHARED;
2636 		break;
2637 	}
2638 
2639 	if (sc->sc_ic.ic_flags & IEEE80211_F_WEPON) {
2640 		wep = 1;
2641 		exunencrypted = 1;
2642 	} else {
2643 		wep = 0;
2644 		exunencrypted = 0;
2645 	}
2646 
2647 	mlme = htole32(PGT_MLME_AUTO_LEVEL_AUTO);
2648 	wep = htole32(wep);
2649 	exunencrypted = htole32(exunencrypted);
2650 	profile = htole32(profile);
2651 	preamble = htole32(preamble);
2652 	bsstype = htole32(bsstype);
2653 	config = htole32(config);
2654 	mode = htole32(mode);
2655 
2656 	if (!wep || !sc->sc_dot1x)
2657 		dot1x = PGT_DOT1X_AUTH_NONE;
2658 	dot1x = htole32(dot1x);
2659 	auth = htole32(auth);
2660 
2661 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
2662 		slot = htole32(PGT_OID_SLOT_MODE_SHORT);
2663 	else
2664 		slot = htole32(PGT_OID_SLOT_MODE_DYNAMIC);
2665 
2666 	if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) {
2667 		if (keepassoc)
2668 			channel = 0;
2669 		else
2670 			channel = ieee80211_chan2ieee(ic, ic->ic_bss->ni_chan);
2671 	} else
2672 		channel = ieee80211_chan2ieee(ic, ic->ic_des_chan);
2673 
2674 	DPRINTF(("%s: set rates", sc->sc_dev.dv_xname));
2675 	for (i = 0; i < ic->ic_sup_rates[ic->ic_curmode].rs_nrates; i++) {
2676 		availrates[i] = ic->ic_sup_rates[ic->ic_curmode].rs_rates[i];
2677 		DPRINTF((" %d", availrates[i]));
2678 	}
2679 	DPRINTF(("\n"));
2680 	availrates[i++] = 0;
2681 
2682 	essid.pos_length = min(ic->ic_des_esslen, sizeof(essid.pos_ssid));
2683 	memcpy(&essid.pos_ssid, ic->ic_des_essid, essid.pos_length);
2684 
2685 	s = splnet();
2686 	for (success = 0; success == 0; success = 1) {
2687 		SETOID(PGT_OID_PROFILE, &profile, sizeof(profile));
2688 		SETOID(PGT_OID_CONFIG, &config, sizeof(config));
2689 		SETOID(PGT_OID_MLME_AUTO_LEVEL, &mlme, sizeof(mlme));
2690 
2691 		if (!IEEE80211_ADDR_EQ(ic->ic_myaddr, ac->ac_enaddr)) {
2692 			SETOID(PGT_OID_MAC_ADDRESS, ac->ac_enaddr,
2693 			    sizeof(ac->ac_enaddr));
2694 			IEEE80211_ADDR_COPY(ic->ic_myaddr, ac->ac_enaddr);
2695 		}
2696 
2697 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2698 		SETOID(PGT_OID_BSS_TYPE, &bsstype, sizeof(bsstype));
2699 
2700 		if (channel != 0 && channel != IEEE80211_CHAN_ANY)
2701 			SETOID(PGT_OID_CHANNEL, &channel, sizeof(channel));
2702 
2703 		if (ic->ic_flags & IEEE80211_F_DESBSSID) {
2704 			SETOID(PGT_OID_BSSID, ic->ic_des_bssid,
2705 			    sizeof(ic->ic_des_bssid));
2706 		} else if (keepassoc) {
2707 			SETOID(PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2708 			    sizeof(ic->ic_bss->ni_bssid));
2709 		}
2710 
2711 		SETOID(PGT_OID_SSID, &essid, sizeof(essid));
2712 
2713 		if (ic->ic_des_esslen > 0)
2714 			SETOID(PGT_OID_SSID_OVERRIDE, &essid, sizeof(essid));
2715 
2716 		SETOID(PGT_OID_RATES, &availrates, i);
2717 		SETOID(PGT_OID_EXTENDED_RATES, &availrates, i);
2718 		SETOID(PGT_OID_PREAMBLE_MODE, &preamble, sizeof(preamble));
2719 		SETOID(PGT_OID_SLOT_MODE, &slot, sizeof(slot));
2720 		SETOID(PGT_OID_AUTH_MODE, &auth, sizeof(auth));
2721 		SETOID(PGT_OID_EXCLUDE_UNENCRYPTED, &exunencrypted,
2722 		    sizeof(exunencrypted));
2723 		SETOID(PGT_OID_DOT1X, &dot1x, sizeof(dot1x));
2724 		SETOID(PGT_OID_PRIVACY_INVOKED, &wep, sizeof(wep));
2725 		/*
2726 		 * Setting WEP key(s)
2727 		 */
2728 		if (letoh32(wep) != 0) {
2729 			keyobj.pok_type = PGT_OBJ_KEY_TYPE_WEP;
2730 			/* key 1 */
2731 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2732 			    IEEE80211_KEYBUF_SIZE);
2733 			keyobj.pok_length = min(keyobj.pok_length,
2734 			    ic->ic_nw_keys[0].k_len);
2735 			bcopy(ic->ic_nw_keys[0].k_key, keyobj.pok_key,
2736 			    keyobj.pok_length);
2737 			SETOID(PGT_OID_DEFAULT_KEY0, &keyobj, sizeof(keyobj));
2738 			/* key 2 */
2739 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2740 			    IEEE80211_KEYBUF_SIZE);
2741 			keyobj.pok_length = min(keyobj.pok_length,
2742 			    ic->ic_nw_keys[1].k_len);
2743 			bcopy(ic->ic_nw_keys[1].k_key, keyobj.pok_key,
2744 			    keyobj.pok_length);
2745 			SETOID(PGT_OID_DEFAULT_KEY1, &keyobj, sizeof(keyobj));
2746 			/* key 3 */
2747 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2748 			    IEEE80211_KEYBUF_SIZE);
2749 			keyobj.pok_length = min(keyobj.pok_length,
2750 			    ic->ic_nw_keys[2].k_len);
2751 			bcopy(ic->ic_nw_keys[2].k_key, keyobj.pok_key,
2752 			    keyobj.pok_length);
2753 			SETOID(PGT_OID_DEFAULT_KEY2, &keyobj, sizeof(keyobj));
2754 			/* key 4 */
2755 			keyobj.pok_length = min(sizeof(keyobj.pok_key),
2756 			    IEEE80211_KEYBUF_SIZE);
2757 			keyobj.pok_length = min(keyobj.pok_length,
2758 			    ic->ic_nw_keys[3].k_len);
2759 			bcopy(ic->ic_nw_keys[3].k_key, keyobj.pok_key,
2760 			    keyobj.pok_length);
2761 			SETOID(PGT_OID_DEFAULT_KEY3, &keyobj, sizeof(keyobj));
2762 
2763 			wepkey = htole32(ic->ic_wep_txkey);
2764 			SETOID(PGT_OID_DEFAULT_KEYNUM, &wepkey, sizeof(wepkey));
2765 		}
2766 		/* set mode again to commit */
2767 		SETOID(PGT_OID_MODE, &mode, sizeof(mode));
2768 	}
2769 	splx(s);
2770 
2771 	if (success) {
2772 		if (shouldbeup)
2773 			ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2774 		else
2775 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2776 	} else {
2777 		printf("%s: problem setting modes\n", sc->sc_dev.dv_xname);
2778 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2779 	}
2780 }
2781 
2782 void
pgt_hostap_handle_mlme(struct pgt_softc * sc,uint32_t oid,struct pgt_obj_mlme * mlme)2783 pgt_hostap_handle_mlme(struct pgt_softc *sc, uint32_t oid,
2784     struct pgt_obj_mlme *mlme)
2785 {
2786 	struct ieee80211com *ic = &sc->sc_ic;
2787 	struct pgt_ieee80211_node *pin;
2788 	struct ieee80211_node *ni;
2789 
2790 	ni = ieee80211_find_node(ic, mlme->pom_address);
2791 	pin = (struct pgt_ieee80211_node *)ni;
2792 	switch (oid) {
2793 	case PGT_OID_DISASSOCIATE:
2794 		if (ni != NULL)
2795 			ieee80211_release_node(&sc->sc_ic, ni);
2796 		break;
2797 	case PGT_OID_ASSOCIATE:
2798 		if (ni == NULL) {
2799 			ni = ieee80211_dup_bss(ic, mlme->pom_address);
2800 			if (ni == NULL)
2801 				break;
2802 			ic->ic_newassoc(ic, ni, 1);
2803 			pin = (struct pgt_ieee80211_node *)ni;
2804 		}
2805 		ni->ni_associd = letoh16(mlme->pom_id);
2806 		pin->pin_mlme_state = letoh16(mlme->pom_state);
2807 		break;
2808 	default:
2809 		if (pin != NULL)
2810 			pin->pin_mlme_state = letoh16(mlme->pom_state);
2811 		break;
2812 	}
2813 }
2814 
2815 /*
2816  * Either in response to an event or after a certain amount of time,
2817  * synchronize our idea of the network we're part of from the hardware.
2818  */
2819 void
pgt_update_sw_from_hw(struct pgt_softc * sc,struct pgt_async_trap * pa,struct mbuf * args)2820 pgt_update_sw_from_hw(struct pgt_softc *sc, struct pgt_async_trap *pa,
2821 	    struct mbuf *args)
2822 {
2823 	struct ieee80211com *ic = &sc->sc_ic;
2824 	struct pgt_obj_ssid ssid;
2825 	struct pgt_obj_bss bss;
2826 	uint32_t channel, noise, ls;
2827 	int error, s;
2828 
2829 	if (pa != NULL) {
2830 		struct pgt_obj_mlme *mlme;
2831 		uint32_t oid;
2832 
2833 		oid = *mtod(args, uint32_t *);
2834 		m_adj(args, sizeof(uint32_t));
2835 		if (sc->sc_debug & SC_DEBUG_TRAP)
2836 			DPRINTF(("%s: trap: oid %#x len %u\n",
2837 			    sc->sc_dev.dv_xname, oid, args->m_len));
2838 		switch (oid) {
2839 		case PGT_OID_LINK_STATE:
2840 			if (args->m_len < sizeof(uint32_t))
2841 				break;
2842 			ls = letoh32(*mtod(args, uint32_t *));
2843 			if (sc->sc_debug & (SC_DEBUG_TRAP | SC_DEBUG_LINK))
2844 				DPRINTF(("%s: %s: link rate %u\n",
2845 				    sc->sc_dev.dv_xname, __func__, ls));
2846 			if (ls)
2847 				ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2848 			else
2849 				ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
2850 			goto gotlinkstate;
2851 		case PGT_OID_DEAUTHENTICATE:
2852 		case PGT_OID_AUTHENTICATE:
2853 		case PGT_OID_DISASSOCIATE:
2854 		case PGT_OID_ASSOCIATE:
2855 			if (args->m_len < sizeof(struct pgt_obj_mlme))
2856 				break;
2857 			mlme = mtod(args, struct pgt_obj_mlme *);
2858 			if (sc->sc_debug & SC_DEBUG_TRAP)
2859 				DPRINTF(("%s: mlme: address "
2860 				    "%s id 0x%02x state 0x%02x code 0x%02x\n",
2861 				    sc->sc_dev.dv_xname,
2862 				    ether_sprintf(mlme->pom_address),
2863 				    letoh16(mlme->pom_id),
2864 				    letoh16(mlme->pom_state),
2865 				    letoh16(mlme->pom_code)));
2866 #ifndef IEEE80211_STA_ONLY
2867 			if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2868 				pgt_hostap_handle_mlme(sc, oid, mlme);
2869 #endif
2870 			break;
2871 		}
2872 		return;
2873 	}
2874 	if (ic->ic_state == IEEE80211_S_SCAN) {
2875 		s = splnet();
2876 		error = pgt_oid_get(sc, PGT_OID_LINK_STATE, &ls, sizeof(ls));
2877 		splx(s);
2878 		if (error)
2879 			return;
2880 		DPRINTF(("%s: up_sw_from_hw: link %u\n", sc->sc_dev.dv_xname,
2881 		    htole32(ls)));
2882 		if (ls != 0)
2883 			ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
2884 	}
2885 
2886 gotlinkstate:
2887 	s = splnet();
2888 	if (pgt_oid_get(sc, PGT_OID_NOISE_FLOOR, &noise, sizeof(noise)) != 0)
2889 		goto out;
2890 	sc->sc_noise = letoh32(noise);
2891 	if (ic->ic_state == IEEE80211_S_RUN) {
2892 		if (pgt_oid_get(sc, PGT_OID_CHANNEL, &channel,
2893 		    sizeof(channel)) != 0)
2894 			goto out;
2895 		channel = min(letoh32(channel), IEEE80211_CHAN_MAX);
2896 		ic->ic_bss->ni_chan = &ic->ic_channels[channel];
2897 		if (pgt_oid_get(sc, PGT_OID_BSSID, ic->ic_bss->ni_bssid,
2898 		    sizeof(ic->ic_bss->ni_bssid)) != 0)
2899 			goto out;
2900 		IEEE80211_ADDR_COPY(&bss.pob_address, ic->ic_bss->ni_bssid);
2901 		error = pgt_oid_retrieve(sc, PGT_OID_BSS_FIND, &bss,
2902 		    sizeof(bss));
2903 		if (error == 0)
2904 			ic->ic_bss->ni_rssi = bss.pob_rssi;
2905 		else if (error != EPERM)
2906 			goto out;
2907 		error = pgt_oid_get(sc, PGT_OID_SSID, &ssid, sizeof(ssid));
2908 		if (error)
2909 			goto out;
2910 		ic->ic_bss->ni_esslen = min(ssid.pos_length,
2911 		    sizeof(ic->ic_bss->ni_essid));
2912 		memcpy(ic->ic_bss->ni_essid, ssid.pos_ssid,
2913 		    ssid.pos_length);
2914 	}
2915 
2916 out:
2917 	splx(s);
2918 }
2919 
2920 int
pgt_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)2921 pgt_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2922 {
2923 	struct pgt_softc *sc = ic->ic_if.if_softc;
2924 	enum ieee80211_state ostate;
2925 
2926 	ostate = ic->ic_state;
2927 
2928 	DPRINTF(("%s: newstate %s -> %s\n", sc->sc_dev.dv_xname,
2929 	    ieee80211_state_name[ostate], ieee80211_state_name[nstate]));
2930 
2931 	switch (nstate) {
2932 	case IEEE80211_S_INIT:
2933 		if (sc->sc_dirtyq_count[PGT_QUEUE_DATA_LOW_TX] == 0)
2934 			ic->ic_if.if_timer = 0;
2935 		ic->ic_mgt_timer = 0;
2936 		ic->ic_flags &= ~IEEE80211_F_SIBSS;
2937 		ieee80211_free_allnodes(ic, 1);
2938 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
2939 		break;
2940 	case IEEE80211_S_SCAN:
2941 		ic->ic_if.if_timer = 1;
2942 		ic->ic_mgt_timer = 0;
2943 		ieee80211_node_cleanup(ic, ic->ic_bss);
2944 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
2945 #ifndef IEEE80211_STA_ONLY
2946 		/* Just use any old channel; we override it anyway. */
2947 		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2948 			ieee80211_create_ibss(ic, ic->ic_ibss_chan);
2949 #endif
2950 		break;
2951 	case IEEE80211_S_RUN:
2952 		ic->ic_if.if_timer = 1;
2953 		break;
2954 	default:
2955 		break;
2956 	}
2957 
2958 	return (sc->sc_newstate(ic, nstate, arg));
2959 }
2960 
2961 int
pgt_drain_tx_queue(struct pgt_softc * sc,enum pgt_queue pq)2962 pgt_drain_tx_queue(struct pgt_softc *sc, enum pgt_queue pq)
2963 {
2964 	int wokeup = 0;
2965 
2966 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
2967 	    sc->sc_cbdmam->dm_mapsize,
2968 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE);
2969 	sc->sc_cb->pcb_device_curfrag[pq] =
2970 	    sc->sc_cb->pcb_driver_curfrag[pq];
2971 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cbdmam, 0,
2972 	    sc->sc_cbdmam->dm_mapsize,
2973 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_PREREAD);
2974 	while (!TAILQ_EMPTY(&sc->sc_dirtyq[pq])) {
2975 		struct pgt_desc *pd;
2976 
2977 		pd = TAILQ_FIRST(&sc->sc_dirtyq[pq]);
2978 		TAILQ_REMOVE(&sc->sc_dirtyq[pq], pd, pd_link);
2979 		sc->sc_dirtyq_count[pq]--;
2980 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
2981 		sc->sc_freeq_count[pq]++;
2982 		pgt_unload_tx_desc_frag(sc, pd);
2983 		if (sc->sc_debug & SC_DEBUG_QUEUES)
2984 			DPRINTF(("%s: queue: tx %u <- [%u] (drained)\n",
2985 			    sc->sc_dev.dv_xname, pd->pd_fragnum, pq));
2986 		wokeup++;
2987 		if (pgt_queue_is_data(pq))
2988 			sc->sc_ic.ic_if.if_oerrors++;
2989 	}
2990 
2991 	return (wokeup);
2992 }
2993 
2994 int
pgt_dma_alloc(struct pgt_softc * sc)2995 pgt_dma_alloc(struct pgt_softc *sc)
2996 {
2997 	size_t size;
2998 	int i, error, nsegs;
2999 
3000 	for (i = 0; i < PGT_QUEUE_COUNT; i++) {
3001 		TAILQ_INIT(&sc->sc_freeq[i]);
3002 		TAILQ_INIT(&sc->sc_dirtyq[i]);
3003 	}
3004 
3005 	/*
3006 	 * control block
3007 	 */
3008 	size = sizeof(struct pgt_control_block);
3009 
3010 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3011 	    BUS_DMA_NOWAIT, &sc->sc_cbdmam);
3012 	if (error != 0) {
3013 		printf("%s: can not create DMA tag for control block\n",
3014 		    sc->sc_dev.dv_xname);
3015 		goto out;
3016 	}
3017 
3018 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3019 	    0, &sc->sc_cbdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3020 	if (error != 0) {
3021 		printf("%s: can not allocate DMA memory for control block\n",
3022 		    sc->sc_dev.dv_xname);
3023 		goto out;
3024 	}
3025 
3026 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cbdmas, nsegs,
3027 	    size, (caddr_t *)&sc->sc_cb, BUS_DMA_NOWAIT);
3028 	if (error != 0) {
3029 		printf("%s: can not map DMA memory for control block\n",
3030 		    sc->sc_dev.dv_xname);
3031 		goto out;
3032 	}
3033 
3034 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cbdmam,
3035 	    sc->sc_cb, size, NULL, BUS_DMA_NOWAIT);
3036 	if (error != 0) {
3037 		printf("%s: can not load DMA map for control block\n",
3038 		    sc->sc_dev.dv_xname);
3039 		goto out;
3040 	}
3041 
3042 	/*
3043 	 * powersave
3044 	 */
3045 	size = PGT_FRAG_SIZE * PGT_PSM_BUFFER_FRAME_COUNT;
3046 
3047 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3048 	    BUS_DMA_ALLOCNOW, &sc->sc_psmdmam);
3049 	if (error != 0) {
3050 		printf("%s: can not create DMA tag for powersave\n",
3051 		    sc->sc_dev.dv_xname);
3052 		goto out;
3053 	}
3054 
3055 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE,
3056 	   0, &sc->sc_psmdmas, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
3057 	if (error != 0) {
3058 		printf("%s: can not allocate DMA memory for powersave\n",
3059 		    sc->sc_dev.dv_xname);
3060 		goto out;
3061 	}
3062 
3063 	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_psmdmas, nsegs,
3064 	    size, (caddr_t *)&sc->sc_psmbuf, BUS_DMA_NOWAIT);
3065 	if (error != 0) {
3066 		printf("%s: can not map DMA memory for powersave\n",
3067 		    sc->sc_dev.dv_xname);
3068 		goto out;
3069 	}
3070 
3071 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_psmdmam,
3072 	    sc->sc_psmbuf, size, NULL, BUS_DMA_WAITOK);
3073 	if (error != 0) {
3074 		printf("%s: can not load DMA map for powersave\n",
3075 		    sc->sc_dev.dv_xname);
3076 		goto out;
3077 	}
3078 
3079 	/*
3080 	 * fragments
3081 	 */
3082 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3083 	if (error != 0)
3084 		goto out;
3085 
3086 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3087 	if (error != 0)
3088 		goto out;
3089 
3090 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3091 	if (error != 0)
3092 		goto out;
3093 
3094 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3095 	if (error != 0)
3096 		goto out;
3097 
3098 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_RX);
3099 	if (error != 0)
3100 		goto out;
3101 
3102 	error = pgt_dma_alloc_queue(sc, PGT_QUEUE_MGMT_TX);
3103 	if (error != 0)
3104 		goto out;
3105 
3106 out:
3107 	if (error) {
3108 		printf("%s: error in DMA allocation\n", sc->sc_dev.dv_xname);
3109 		pgt_dma_free(sc);
3110 	}
3111 
3112 	return (error);
3113 }
3114 
3115 int
pgt_dma_alloc_queue(struct pgt_softc * sc,enum pgt_queue pq)3116 pgt_dma_alloc_queue(struct pgt_softc *sc, enum pgt_queue pq)
3117 {
3118 	struct pgt_desc *pd;
3119 	size_t i, qsize;
3120 	int error, nsegs;
3121 
3122 	switch (pq) {
3123 		case PGT_QUEUE_DATA_LOW_RX:
3124 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3125 			break;
3126 		case PGT_QUEUE_DATA_LOW_TX:
3127 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3128 			break;
3129 		case PGT_QUEUE_DATA_HIGH_RX:
3130 			qsize = PGT_QUEUE_DATA_RX_SIZE;
3131 			break;
3132 		case PGT_QUEUE_DATA_HIGH_TX:
3133 			qsize = PGT_QUEUE_DATA_TX_SIZE;
3134 			break;
3135 		case PGT_QUEUE_MGMT_RX:
3136 			qsize = PGT_QUEUE_MGMT_SIZE;
3137 			break;
3138 		case PGT_QUEUE_MGMT_TX:
3139 			qsize = PGT_QUEUE_MGMT_SIZE;
3140 			break;
3141 		default:
3142 			return (EINVAL);
3143 	}
3144 
3145 	for (i = 0; i < qsize; i++) {
3146 		pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3147 
3148 		error = bus_dmamap_create(sc->sc_dmat, PGT_FRAG_SIZE, 1,
3149 		    PGT_FRAG_SIZE, 0, BUS_DMA_ALLOCNOW, &pd->pd_dmam);
3150 		if (error != 0) {
3151 			printf("%s: can not create DMA tag for fragment\n",
3152 			    sc->sc_dev.dv_xname);
3153 			free(pd, M_DEVBUF, 0);
3154 			break;
3155 		}
3156 
3157 		error = bus_dmamem_alloc(sc->sc_dmat, PGT_FRAG_SIZE, PAGE_SIZE,
3158 		    0, &pd->pd_dmas, 1, &nsegs, BUS_DMA_WAITOK);
3159 		if (error != 0) {
3160 			printf("%s: error alloc frag %zu on queue %u\n",
3161 			    sc->sc_dev.dv_xname, i, pq);
3162 			free(pd, M_DEVBUF, 0);
3163 			break;
3164 		}
3165 
3166 		error = bus_dmamem_map(sc->sc_dmat, &pd->pd_dmas, nsegs,
3167 		    PGT_FRAG_SIZE, (caddr_t *)&pd->pd_mem, BUS_DMA_WAITOK);
3168 		if (error != 0) {
3169 			printf("%s: error map frag %zu on queue %u\n",
3170 			    sc->sc_dev.dv_xname, i, pq);
3171 			free(pd, M_DEVBUF, 0);
3172 			break;
3173 		}
3174 
3175 		if (pgt_queue_is_rx(pq)) {
3176 			error = bus_dmamap_load(sc->sc_dmat, pd->pd_dmam,
3177 			    pd->pd_mem, PGT_FRAG_SIZE, NULL, BUS_DMA_NOWAIT);
3178 			if (error != 0) {
3179 				printf("%s: error load frag %zu on queue %u\n",
3180 				    sc->sc_dev.dv_xname, i, pq);
3181 				bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas,
3182 				    nsegs);
3183 				free(pd, M_DEVBUF, 0);
3184 				break;
3185 			}
3186 			pd->pd_dmaaddr = pd->pd_dmam->dm_segs[0].ds_addr;
3187 		}
3188 		TAILQ_INSERT_TAIL(&sc->sc_freeq[pq], pd, pd_link);
3189 	}
3190 
3191 	return (error);
3192 }
3193 
3194 void
pgt_dma_free(struct pgt_softc * sc)3195 pgt_dma_free(struct pgt_softc *sc)
3196 {
3197 	/*
3198 	 * fragments
3199 	 */
3200 	if (sc->sc_dmat != NULL) {
3201 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_RX);
3202 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_LOW_TX);
3203 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_RX);
3204 		pgt_dma_free_queue(sc, PGT_QUEUE_DATA_HIGH_TX);
3205 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_RX);
3206 		pgt_dma_free_queue(sc, PGT_QUEUE_MGMT_TX);
3207 	}
3208 
3209 	/*
3210 	 * powersave
3211 	 */
3212 	if (sc->sc_psmbuf != NULL) {
3213 		bus_dmamap_unload(sc->sc_dmat, sc->sc_psmdmam);
3214 		bus_dmamem_free(sc->sc_dmat, &sc->sc_psmdmas, 1);
3215 		sc->sc_psmbuf = NULL;
3216 		sc->sc_psmdmam = NULL;
3217 	}
3218 
3219 	/*
3220 	 * control block
3221 	 */
3222 	if (sc->sc_cb != NULL) {
3223 		bus_dmamap_unload(sc->sc_dmat, sc->sc_cbdmam);
3224 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cbdmas, 1);
3225 		sc->sc_cb = NULL;
3226 		sc->sc_cbdmam = NULL;
3227 	}
3228 }
3229 
3230 void
pgt_dma_free_queue(struct pgt_softc * sc,enum pgt_queue pq)3231 pgt_dma_free_queue(struct pgt_softc *sc, enum pgt_queue pq)
3232 {
3233 	struct pgt_desc	*pd;
3234 
3235 	while (!TAILQ_EMPTY(&sc->sc_freeq[pq])) {
3236 		pd = TAILQ_FIRST(&sc->sc_freeq[pq]);
3237 		TAILQ_REMOVE(&sc->sc_freeq[pq], pd, pd_link);
3238 		if (pd->pd_dmam != NULL) {
3239 			bus_dmamap_unload(sc->sc_dmat, pd->pd_dmam);
3240 			pd->pd_dmam = NULL;
3241 		}
3242 		bus_dmamem_free(sc->sc_dmat, &pd->pd_dmas, 1);
3243 		free(pd, M_DEVBUF, 0);
3244 	}
3245 }
3246 
3247 int
pgt_activate(struct device * self,int act)3248 pgt_activate(struct device *self, int act)
3249 {
3250 	struct pgt_softc *sc = (struct pgt_softc *)self;
3251 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3252 
3253 	DPRINTF(("%s: %s(%d)\n", sc->sc_dev.dv_xname, __func__, why));
3254 
3255 	switch (act) {
3256 	case DVACT_SUSPEND:
3257 		if (ifp->if_flags & IFF_RUNNING) {
3258 			pgt_stop(sc, SC_NEEDS_RESET);
3259 			pgt_update_hw_from_sw(sc, 0);
3260 		}
3261 		if (sc->sc_power != NULL)
3262 			(*sc->sc_power)(sc, act);
3263 		break;
3264 	case DVACT_WAKEUP:
3265 		pgt_wakeup(sc);
3266 		break;
3267 	}
3268 	return 0;
3269 }
3270 
3271 void
pgt_wakeup(struct pgt_softc * sc)3272 pgt_wakeup(struct pgt_softc *sc)
3273 {
3274 	struct ifnet *ifp = &sc->sc_ic.ic_if;
3275 
3276 	if (sc->sc_power != NULL)
3277 		(*sc->sc_power)(sc, DVACT_RESUME);
3278 
3279 	pgt_stop(sc, SC_NEEDS_RESET);
3280 	pgt_update_hw_from_sw(sc, 0);
3281 
3282 	if (ifp->if_flags & IFF_UP) {
3283 		pgt_init(ifp);
3284 		pgt_update_hw_from_sw(sc, 0);
3285 	}
3286 }
3287