xref: /openbsd/sys/dev/pci/if_bwfm_pci.c (revision d415bd75)
1 /*	$OpenBSD: if_bwfm_pci.c,v 1.75 2022/12/30 14:10:17 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2010-2016 Broadcom Corporation
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/queue.h>
28 #include <sys/socket.h>
29 
30 #if defined(__HAVE_FDT)
31 #include <machine/fdt.h>
32 #include <dev/ofw/openfirm.h>
33 #endif
34 
35 #if NBPFILTER > 0
36 #include <net/bpf.h>
37 #endif
38 #include <net/if.h>
39 #include <net/if_dl.h>
40 #include <net/if_media.h>
41 
42 #include <netinet/in.h>
43 #include <netinet/if_ether.h>
44 
45 #include <net80211/ieee80211_var.h>
46 
47 #include <machine/bus.h>
48 
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcidevs.h>
52 
53 #include <dev/ic/bwfmvar.h>
54 #include <dev/ic/bwfmreg.h>
55 #include <dev/pci/if_bwfm_pci.h>
56 
57 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
58 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
59 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
60 
61 #define BWFM_NUM_TX_MSGRINGS			2
62 #define BWFM_NUM_RX_MSGRINGS			3
63 
64 #define BWFM_NUM_IOCTL_PKTIDS			8
65 #define BWFM_NUM_TX_PKTIDS			2048
66 #define BWFM_NUM_RX_PKTIDS			1024
67 
68 #define BWFM_NUM_IOCTL_DESCS			1
69 #define BWFM_NUM_TX_DESCS			1
70 #define BWFM_NUM_RX_DESCS			1
71 
72 #ifdef BWFM_DEBUG
73 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
74 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
75 static int bwfm_debug = 2;
76 #else
77 #define DPRINTF(x)	do { ; } while (0)
78 #define DPRINTFN(n, x)	do { ; } while (0)
79 #endif
80 
81 #define DEVNAME(sc)	((sc)->sc_sc.sc_dev.dv_xname)
82 
83 enum ring_status {
84 	RING_CLOSED,
85 	RING_CLOSING,
86 	RING_OPEN,
87 	RING_OPENING,
88 };
89 
90 struct bwfm_pci_msgring {
91 	uint32_t		 w_idx_addr;
92 	uint32_t		 r_idx_addr;
93 	uint32_t		 w_ptr;
94 	uint32_t		 r_ptr;
95 	int			 nitem;
96 	int			 itemsz;
97 	enum ring_status	 status;
98 	struct bwfm_pci_dmamem	*ring;
99 	struct mbuf		*m;
100 
101 	int			 fifo;
102 	uint8_t			 mac[ETHER_ADDR_LEN];
103 };
104 
105 struct bwfm_pci_ioctl {
106 	uint16_t		 transid;
107 	uint16_t		 retlen;
108 	int16_t			 status;
109 	struct mbuf		*m;
110 	TAILQ_ENTRY(bwfm_pci_ioctl) next;
111 };
112 
113 struct bwfm_pci_buf {
114 	bus_dmamap_t	 bb_map;
115 	struct mbuf	*bb_m;
116 };
117 
118 struct bwfm_pci_pkts {
119 	struct bwfm_pci_buf	*pkts;
120 	uint32_t		 npkt;
121 	int			 last;
122 };
123 
124 struct bwfm_pci_softc {
125 	struct bwfm_softc	 sc_sc;
126 	pci_chipset_tag_t	 sc_pc;
127 	pcitag_t		 sc_tag;
128 	pcireg_t		 sc_id;
129 	void 			*sc_ih;
130 
131 	int			 sc_initialized;
132 
133 	bus_space_tag_t		 sc_reg_iot;
134 	bus_space_handle_t	 sc_reg_ioh;
135 	bus_size_t		 sc_reg_ios;
136 
137 	bus_space_tag_t		 sc_tcm_iot;
138 	bus_space_handle_t	 sc_tcm_ioh;
139 	bus_size_t		 sc_tcm_ios;
140 
141 	bus_dma_tag_t		 sc_dmat;
142 
143 	uint32_t		 sc_shared_address;
144 	uint32_t		 sc_shared_flags;
145 	uint8_t			 sc_shared_version;
146 
147 	uint8_t			 sc_dma_idx_sz;
148 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
149 	size_t			 sc_dma_idx_bufsz;
150 
151 	uint16_t		 sc_max_rxbufpost;
152 	uint32_t		 sc_rx_dataoffset;
153 	uint32_t		 sc_htod_mb_data_addr;
154 	uint32_t		 sc_dtoh_mb_data_addr;
155 	uint32_t		 sc_ring_info_addr;
156 
157 	uint32_t		 sc_console_base_addr;
158 	uint32_t		 sc_console_buf_addr;
159 	uint32_t		 sc_console_buf_size;
160 	uint32_t		 sc_console_readidx;
161 
162 	uint16_t		 sc_max_flowrings;
163 	uint16_t		 sc_max_submissionrings;
164 	uint16_t		 sc_max_completionrings;
165 
166 	struct bwfm_pci_msgring	 sc_ctrl_submit;
167 	struct bwfm_pci_msgring	 sc_rxpost_submit;
168 	struct bwfm_pci_msgring	 sc_ctrl_complete;
169 	struct bwfm_pci_msgring	 sc_tx_complete;
170 	struct bwfm_pci_msgring	 sc_rx_complete;
171 	struct bwfm_pci_msgring	*sc_flowrings;
172 
173 	struct bwfm_pci_dmamem	*sc_scratch_buf;
174 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
175 
176 	TAILQ_HEAD(, bwfm_pci_ioctl) sc_ioctlq;
177 	uint16_t		 sc_ioctl_transid;
178 
179 	struct if_rxring	 sc_ioctl_ring;
180 	struct if_rxring	 sc_event_ring;
181 	struct if_rxring	 sc_rxbuf_ring;
182 
183 	struct bwfm_pci_pkts	 sc_ioctl_pkts;
184 	struct bwfm_pci_pkts	 sc_rx_pkts;
185 	struct bwfm_pci_pkts	 sc_tx_pkts;
186 	int			 sc_tx_pkts_full;
187 
188 	uint8_t			 sc_mbdata_done;
189 	uint8_t			 sc_pcireg64;
190 	uint8_t			 sc_mb_via_ctl;
191 };
192 
193 struct bwfm_pci_dmamem {
194 	bus_dmamap_t		bdm_map;
195 	bus_dma_segment_t	bdm_seg;
196 	size_t			bdm_size;
197 	caddr_t			bdm_kva;
198 };
199 
200 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
201 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
202 #define BWFM_PCI_DMA_DVA(_bdm)	((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
203 #define BWFM_PCI_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
204 
205 int		 bwfm_pci_match(struct device *, void *, void *);
206 void		 bwfm_pci_attach(struct device *, struct device *, void *);
207 int		 bwfm_pci_detach(struct device *, int);
208 int		 bwfm_pci_activate(struct device *, int);
209 void		 bwfm_pci_cleanup(struct bwfm_pci_softc *);
210 
211 #if defined(__HAVE_FDT)
212 int		 bwfm_pci_read_otp(struct bwfm_pci_softc *);
213 void		 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *, uint8_t,
214 		    uint8_t, uint8_t *);
215 #endif
216 
217 int		 bwfm_pci_intr(void *);
218 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
219 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
220 uint32_t	 bwfm_pci_intr_status(struct bwfm_pci_softc *);
221 void		 bwfm_pci_intr_ack(struct bwfm_pci_softc *, uint32_t);
222 uint32_t	 bwfm_pci_intmask(struct bwfm_pci_softc *);
223 void		 bwfm_pci_hostready(struct bwfm_pci_softc *);
224 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
225 		    size_t, const u_char *, size_t);
226 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
227 
228 struct bwfm_pci_dmamem *
229 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
230 		    bus_size_t);
231 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
232 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
233 		    struct bwfm_pci_pkts *);
234 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
235 		    struct bwfm_pci_pkts *, struct mbuf *,
236 		    uint32_t *, paddr_t *);
237 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
238 		    struct bwfm_pci_pkts *, uint32_t);
239 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
240 		    struct if_rxring *, uint32_t);
241 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
242 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
243 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
244 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
245 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
246 		    int, size_t);
247 
248 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
249 		    struct bwfm_pci_msgring *);
250 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
251 		    struct bwfm_pci_msgring *);
252 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
253 		    struct bwfm_pci_msgring *);
254 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
255 		    struct bwfm_pci_msgring *);
256 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
257 		    struct bwfm_pci_msgring *);
258 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
259 		    struct bwfm_pci_msgring *);
260 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
261 		    struct bwfm_pci_msgring *, int, int *);
262 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
263 		    struct bwfm_pci_msgring *, int *);
264 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
265 		    struct bwfm_pci_msgring *, int);
266 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
267 		    struct bwfm_pci_msgring *);
268 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
269 		    struct bwfm_pci_msgring *, int);
270 
271 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
272 		    struct bwfm_pci_msgring *, struct mbuf_list *);
273 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *,
274 		    struct mbuf_list *);
275 
276 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
277 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
278 		    uint32_t);
279 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
280 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
281 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
282 
283 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
284 		     struct mbuf *);
285 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
286 		     struct mbuf *);
287 void		 bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
288 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
289 void		 bwfm_pci_flowring_delete_cb(struct bwfm_softc *, void *);
290 
291 int		 bwfm_pci_preinit(struct bwfm_softc *);
292 void		 bwfm_pci_stop(struct bwfm_softc *);
293 int		 bwfm_pci_txcheck(struct bwfm_softc *);
294 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
295 
296 int		 bwfm_pci_send_mb_data(struct bwfm_pci_softc *, uint32_t);
297 void		 bwfm_pci_handle_mb_data(struct bwfm_pci_softc *);
298 
299 #ifdef BWFM_DEBUG
300 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
301 #endif
302 
303 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
304 		    int, char *, size_t *);
305 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
306 		    int, char *, size_t);
307 void		 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
308 		    struct msgbuf_ioctl_resp_hdr *);
309 int		 bwfm_pci_msgbuf_h2d_mb_write(struct bwfm_pci_softc *,
310 		    uint32_t);
311 
312 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
313 	.bc_read = bwfm_pci_buscore_read,
314 	.bc_write = bwfm_pci_buscore_write,
315 	.bc_prepare = bwfm_pci_buscore_prepare,
316 	.bc_reset = bwfm_pci_buscore_reset,
317 	.bc_setup = NULL,
318 	.bc_activate = bwfm_pci_buscore_activate,
319 };
320 
321 struct bwfm_bus_ops bwfm_pci_bus_ops = {
322 	.bs_preinit = bwfm_pci_preinit,
323 	.bs_stop = bwfm_pci_stop,
324 	.bs_txcheck = bwfm_pci_txcheck,
325 	.bs_txdata = bwfm_pci_txdata,
326 	.bs_txctl = NULL,
327 };
328 
329 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
330 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
331 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
332 	.proto_rx = NULL,
333 	.proto_rxctl = NULL,
334 };
335 
336 const struct cfattach bwfm_pci_ca = {
337 	sizeof(struct bwfm_pci_softc),
338 	bwfm_pci_match,
339 	bwfm_pci_attach,
340 	bwfm_pci_detach,
341 	bwfm_pci_activate,
342 };
343 
344 static const struct pci_matchid bwfm_pci_devices[] = {
345 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
346 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4356 },
347 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
348 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4371 },
349 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4378 },
350 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4387 },
351 };
352 
353 int
354 bwfm_pci_match(struct device *parent, void *match, void *aux)
355 {
356 	return (pci_matchbyid(aux, bwfm_pci_devices,
357 	    nitems(bwfm_pci_devices)));
358 }
359 
360 void
361 bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
362 {
363 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
364 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
365 	const char *intrstr;
366 	pci_intr_handle_t ih;
367 
368 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
369 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
370 	    NULL, &sc->sc_tcm_ios, 0)) {
371 		printf(": can't map bar1\n");
372 		return;
373 	}
374 
375 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
376 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
377 	    NULL, &sc->sc_reg_ios, 0)) {
378 		printf(": can't map bar0\n");
379 		goto bar1;
380 	}
381 
382 	sc->sc_pc = pa->pa_pc;
383 	sc->sc_tag = pa->pa_tag;
384 	sc->sc_id = pa->pa_id;
385 	sc->sc_dmat = pa->pa_dmat;
386 
387 	/* Map and establish the interrupt. */
388 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
389 		printf(": couldn't map interrupt\n");
390 		goto bar0;
391 	}
392 	intrstr = pci_intr_string(pa->pa_pc, ih);
393 
394 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
395 	    bwfm_pci_intr, sc, DEVNAME(sc));
396 	if (sc->sc_ih == NULL) {
397 		printf(": couldn't establish interrupt");
398 		if (intrstr != NULL)
399 			printf(" at %s", intrstr);
400 		printf("\n");
401 		goto bar1;
402 	}
403 	printf(": %s\n", intrstr);
404 
405 #if defined(__HAVE_FDT)
406 	sc->sc_sc.sc_node = PCITAG_NODE(pa->pa_tag);
407 	if (sc->sc_sc.sc_node) {
408 		if (OF_getproplen(sc->sc_sc.sc_node, "brcm,cal-blob") > 0) {
409 			sc->sc_sc.sc_calsize = OF_getproplen(sc->sc_sc.sc_node,
410 			    "brcm,cal-blob");
411 			sc->sc_sc.sc_cal = malloc(sc->sc_sc.sc_calsize,
412 			    M_DEVBUF, M_WAITOK);
413 			OF_getprop(sc->sc_sc.sc_node, "brcm,cal-blob",
414 			    sc->sc_sc.sc_cal, sc->sc_sc.sc_calsize);
415 		}
416 	}
417 #endif
418 
419 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
420 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
421 	bwfm_attach(&sc->sc_sc);
422 	config_mountroot(self, bwfm_attachhook);
423 	return;
424 
425 bar0:
426 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
427 bar1:
428 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
429 }
430 
431 int
432 bwfm_pci_preinit(struct bwfm_softc *bwfm)
433 {
434 	struct bwfm_pci_softc *sc = (void *)bwfm;
435 	struct bwfm_pci_ringinfo ringinfo;
436 	const char *chip = NULL;
437 	u_char *ucode, *nvram;
438 	size_t size, nvsize, nvlen;
439 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
440 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
441 	uint32_t idx_offset, reg;
442 	int i;
443 
444 	if (sc->sc_initialized)
445 		return 0;
446 
447 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
448 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
449 		printf("%s: cannot attach chip\n", DEVNAME(sc));
450 		return 1;
451 	}
452 
453 #if defined(__HAVE_FDT)
454 	if (bwfm_pci_read_otp(sc)) {
455 		printf("%s: cannot read OTP\n", DEVNAME(sc));
456 		return 1;
457 	}
458 #endif
459 
460 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
461 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
462 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
463 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
464 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
465 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
466 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
467 
468 	switch (bwfm->sc_chip.ch_chip) {
469 	case BRCM_CC_4350_CHIP_ID:
470 		if (bwfm->sc_chip.ch_chiprev <= 7)
471 			chip = "4350c2";
472 		else
473 			chip = "4350";
474 		break;
475 	case BRCM_CC_4355_CHIP_ID:
476 		chip = "4355c1";
477 		break;
478 	case BRCM_CC_4356_CHIP_ID:
479 		chip = "4356";
480 		break;
481 	case BRCM_CC_4364_CHIP_ID:
482 		if (bwfm->sc_chip.ch_chiprev <= 3)
483 			chip = "4364b2";
484 		else
485 			chip = "4364b3";
486 		break;
487 	case BRCM_CC_43602_CHIP_ID:
488 		chip = "43602";
489 		break;
490 	case BRCM_CC_4371_CHIP_ID:
491 		chip = "4371";
492 		break;
493 	case BRCM_CC_4377_CHIP_ID:
494 		chip = "4377b3";
495 		break;
496 	case BRCM_CC_4378_CHIP_ID:
497 		if (bwfm->sc_chip.ch_chiprev <= 3)
498 			chip = "4378b1";
499 		else
500 			chip = "4378b3";
501 		break;
502 	case BRCM_CC_4387_CHIP_ID:
503 		chip = "4387c2";
504 		break;
505 	default:
506 		printf("%s: unknown firmware for chip %s\n",
507 		    DEVNAME(sc), bwfm->sc_chip.ch_name);
508 		return 1;
509 	}
510 
511 	if (bwfm_loadfirmware(bwfm, chip, "-pcie", &ucode, &size,
512 	    &nvram, &nvsize, &nvlen) != 0)
513 		return 1;
514 
515 	/* Retrieve RAM size from firmware. */
516 	if (size >= BWFM_RAMSIZE + 8) {
517 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
518 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
519 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
520 	}
521 
522 	if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
523 		printf("%s: could not load microcode\n",
524 		    DEVNAME(sc));
525 		free(ucode, M_DEVBUF, size);
526 		free(nvram, M_DEVBUF, nvsize);
527 		return 1;
528 	}
529 	free(ucode, M_DEVBUF, size);
530 	free(nvram, M_DEVBUF, nvsize);
531 
532 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
533 	    sc->sc_shared_address + BWFM_SHARED_INFO);
534 	sc->sc_shared_version = sc->sc_shared_flags;
535 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
536 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
537 		printf("%s: PCIe version %d unsupported\n",
538 		    DEVNAME(sc), sc->sc_shared_version);
539 		return 1;
540 	}
541 
542 	if (sc->sc_shared_version >= 6) {
543 		uint32_t host_cap;
544 
545 		if ((sc->sc_shared_flags & BWFM_SHARED_INFO_USE_MAILBOX) == 0)
546 			sc->sc_mb_via_ctl = 1;
547 
548 		host_cap = sc->sc_shared_version;
549 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_HOSTRDY_DB1)
550 			host_cap |= BWFM_SHARED_HOST_CAP_H2D_ENABLE_HOSTRDY;
551 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_SHARED_DAR)
552 			host_cap |= BWFM_SHARED_HOST_CAP_H2D_DAR;
553 		host_cap |= BWFM_SHARED_HOST_CAP_DS_NO_OOB_DW;
554 
555 		bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
556 		    sc->sc_shared_address + BWFM_SHARED_HOST_CAP, host_cap);
557 		bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
558 		    sc->sc_shared_address + BWFM_SHARED_HOST_CAP2, 0);
559 	}
560 
561 	sc->sc_dma_idx_sz = 0;
562 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
563 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
564 			sc->sc_dma_idx_sz = sizeof(uint16_t);
565 		else
566 			sc->sc_dma_idx_sz = sizeof(uint32_t);
567 	}
568 
569 	/* Maximum RX data buffers in the ring. */
570 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
571 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
572 	if (sc->sc_max_rxbufpost == 0)
573 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
574 
575 	/* Alternative offset of data in a packet */
576 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
577 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
578 
579 	/* For Power Management */
580 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
581 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
582 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
583 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
584 
585 	/* Ring information */
586 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
587 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
588 
589 	/* Firmware's "dmesg" */
590 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
591 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
592 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
593 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
594 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
595 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
596 
597 	/* Read ring information. */
598 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
599 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
600 
601 	if (sc->sc_shared_version >= 6) {
602 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
603 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
604 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
605 	} else {
606 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
607 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
608 		    BWFM_NUM_TX_MSGRINGS;
609 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
610 	}
611 
612 	if (sc->sc_dma_idx_sz == 0) {
613 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
614 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
615 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
616 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
617 		idx_offset = sizeof(uint32_t);
618 	} else {
619 		uint64_t address;
620 
621 		/* Each TX/RX Ring has a Read and Write Ptr */
622 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
623 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
624 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
625 		    sc->sc_dma_idx_bufsz, 8);
626 		if (sc->sc_dma_idx_buf == NULL) {
627 			/* XXX: Fallback to TCM? */
628 			printf("%s: cannot allocate idx buf\n",
629 			    DEVNAME(sc));
630 			return 1;
631 		}
632 
633 		idx_offset = sc->sc_dma_idx_sz;
634 		h2d_w_idx_ptr = 0;
635 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
636 		ringinfo.h2d_w_idx_hostaddr_low =
637 		    htole32(address & 0xffffffff);
638 		ringinfo.h2d_w_idx_hostaddr_high =
639 		    htole32(address >> 32);
640 
641 		h2d_r_idx_ptr = h2d_w_idx_ptr +
642 		    sc->sc_max_submissionrings * idx_offset;
643 		address += sc->sc_max_submissionrings * idx_offset;
644 		ringinfo.h2d_r_idx_hostaddr_low =
645 		    htole32(address & 0xffffffff);
646 		ringinfo.h2d_r_idx_hostaddr_high =
647 		    htole32(address >> 32);
648 
649 		d2h_w_idx_ptr = h2d_r_idx_ptr +
650 		    sc->sc_max_submissionrings * idx_offset;
651 		address += sc->sc_max_submissionrings * idx_offset;
652 		ringinfo.d2h_w_idx_hostaddr_low =
653 		    htole32(address & 0xffffffff);
654 		ringinfo.d2h_w_idx_hostaddr_high =
655 		    htole32(address >> 32);
656 
657 		d2h_r_idx_ptr = d2h_w_idx_ptr +
658 		    sc->sc_max_completionrings * idx_offset;
659 		address += sc->sc_max_completionrings * idx_offset;
660 		ringinfo.d2h_r_idx_hostaddr_low =
661 		    htole32(address & 0xffffffff);
662 		ringinfo.d2h_r_idx_hostaddr_high =
663 		    htole32(address >> 32);
664 
665 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
666 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
667 	}
668 
669 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
670 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
671 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
672 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
673 	    &ring_mem_ptr))
674 		goto cleanup;
675 	/* TX rxpost ring: Send clean data mbufs for RX */
676 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 1024, 32,
677 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
678 	    &ring_mem_ptr))
679 		goto cleanup;
680 	/* RX completion rings: recv our filled buffers back */
681 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
682 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
683 	    &ring_mem_ptr))
684 		goto cleanup;
685 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024,
686 	    sc->sc_shared_version >= 7 ? 24 : 16,
687 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
688 	    &ring_mem_ptr))
689 		goto cleanup;
690 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 1024,
691 	    sc->sc_shared_version >= 7 ? 40 : 32,
692 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
693 	    &ring_mem_ptr))
694 		goto cleanup;
695 
696 	/* Dynamic TX rings for actual data */
697 	sc->sc_flowrings = malloc(sc->sc_max_flowrings *
698 	    sizeof(struct bwfm_pci_msgring), M_DEVBUF, M_WAITOK | M_ZERO);
699 	for (i = 0; i < sc->sc_max_flowrings; i++) {
700 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
701 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
702 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
703 	}
704 
705 	/* Scratch and ring update buffers for firmware */
706 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
707 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
708 		goto cleanup;
709 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
710 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
711 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
712 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
713 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
714 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
715 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
716 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
717 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
718 
719 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
720 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
721 		goto cleanup;
722 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
723 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
724 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
725 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
726 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
727 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
728 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
729 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
730 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
731 
732 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
733 	bwfm_pci_intr_enable(sc);
734 	bwfm_pci_hostready(sc);
735 
736 	/* Maps RX mbufs to a packet id and back. */
737 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
738 	sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS *
739 	    sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
740 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
741 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_CTL_PKT_SIZE,
742 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_CTL_PKT_SIZE, 0, BUS_DMA_WAITOK,
743 		    &sc->sc_rx_pkts.pkts[i].bb_map);
744 
745 	/* Maps TX mbufs to a packet id and back. */
746 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
747 	sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS
748 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
749 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
750 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
751 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
752 		    &sc->sc_tx_pkts.pkts[i].bb_map);
753 	sc->sc_tx_pkts_full = 0;
754 
755 	/* Maps IOCTL mbufs to a packet id and back. */
756 	sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS;
757 	sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS
758 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
759 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++)
760 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
761 		    BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
762 		    &sc->sc_ioctl_pkts.pkts[i].bb_map);
763 
764 	/*
765 	 * For whatever reason, could also be a bug somewhere in this
766 	 * driver, the firmware needs a bunch of RX buffers otherwise
767 	 * it won't send any RX complete messages.
768 	 */
769 	if_rxr_init(&sc->sc_rxbuf_ring, min(256, sc->sc_max_rxbufpost),
770 	    sc->sc_max_rxbufpost);
771 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
772 	if_rxr_init(&sc->sc_event_ring, 8, 8);
773 	bwfm_pci_fill_rx_rings(sc);
774 
775 	TAILQ_INIT(&sc->sc_ioctlq);
776 
777 #ifdef BWFM_DEBUG
778 	sc->sc_console_readidx = 0;
779 	bwfm_pci_debug_console(sc);
780 #endif
781 
782 	sc->sc_initialized = 1;
783 	return 0;
784 
785 cleanup:
786 	if (sc->sc_ringupd_buf)
787 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
788 	if (sc->sc_scratch_buf)
789 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
790 	if (sc->sc_rx_complete.ring)
791 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
792 	if (sc->sc_tx_complete.ring)
793 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
794 	if (sc->sc_ctrl_complete.ring)
795 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
796 	if (sc->sc_rxpost_submit.ring)
797 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
798 	if (sc->sc_ctrl_submit.ring)
799 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
800 	if (sc->sc_dma_idx_buf)
801 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
802 	return 1;
803 }
804 
805 int
806 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
807     const u_char *nvram, size_t nvlen)
808 {
809 	struct bwfm_softc *bwfm = (void *)sc;
810 	struct bwfm_core *core;
811 	struct bwfm_pci_random_seed_footer footer;
812 	uint32_t addr, shared, written;
813 	uint8_t *rndbuf;
814 	int i;
815 
816 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
817 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
818 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
819 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
820 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
821 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
822 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
823 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
824 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
825 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
826 	}
827 
828 	for (i = 0; i < size; i++)
829 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
830 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
831 
832 	/* Firmware replaces this with a pointer once up. */
833 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
834 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
835 
836 	if (nvram) {
837 		addr = bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize -
838 		    nvlen;
839 		for (i = 0; i < nvlen; i++)
840 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
841 			    addr + i, nvram[i]);
842 
843 		footer.length = htole32(BWFM_RANDOM_SEED_LENGTH);
844 		footer.magic = htole32(BWFM_RANDOM_SEED_MAGIC);
845 		addr -= sizeof(footer);
846 		for (i = 0; i < sizeof(footer); i++)
847 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
848 			    addr + i, ((uint8_t *)&footer)[i]);
849 
850 		rndbuf = malloc(BWFM_RANDOM_SEED_LENGTH, M_TEMP, M_WAITOK);
851 		arc4random_buf(rndbuf, BWFM_RANDOM_SEED_LENGTH);
852 		addr -= BWFM_RANDOM_SEED_LENGTH;
853 		for (i = 0; i < BWFM_RANDOM_SEED_LENGTH; i++)
854 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
855 			    addr + i, rndbuf[i]);
856 		free(rndbuf, M_TEMP, BWFM_RANDOM_SEED_LENGTH);
857 	}
858 
859 	written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
860 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
861 
862 	/* Load reset vector from firmware and kickstart core. */
863 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
864 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
865 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
866 	}
867 	bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
868 
869 	for (i = 0; i < 100; i++) {
870 		delay(50 * 1000);
871 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
872 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
873 		if (shared != written)
874 			break;
875 	}
876 	if (shared == written) {
877 		printf("%s: firmware did not come up\n", DEVNAME(sc));
878 		return 1;
879 	}
880 	if (shared < bwfm->sc_chip.ch_rambase ||
881 	    shared >= bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize) {
882 		printf("%s: invalid shared RAM address 0x%08x\n", DEVNAME(sc),
883 		    shared);
884 		return 1;
885 	}
886 
887 	sc->sc_shared_address = shared;
888 	return 0;
889 }
890 
891 int
892 bwfm_pci_detach(struct device *self, int flags)
893 {
894 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
895 
896 	bwfm_detach(&sc->sc_sc, flags);
897 	bwfm_pci_cleanup(sc);
898 
899 	return 0;
900 }
901 
902 void
903 bwfm_pci_cleanup(struct bwfm_pci_softc *sc)
904 {
905 	int i;
906 
907 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++) {
908 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_pkts.pkts[i].bb_map);
909 		if (sc->sc_rx_pkts.pkts[i].bb_m)
910 			m_freem(sc->sc_rx_pkts.pkts[i].bb_m);
911 	}
912 	free(sc->sc_rx_pkts.pkts, M_DEVBUF, BWFM_NUM_RX_PKTIDS *
913 	    sizeof(struct bwfm_pci_buf));
914 
915 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++) {
916 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_pkts.pkts[i].bb_map);
917 		if (sc->sc_tx_pkts.pkts[i].bb_m)
918 			m_freem(sc->sc_tx_pkts.pkts[i].bb_m);
919 	}
920 	free(sc->sc_tx_pkts.pkts, M_DEVBUF, BWFM_NUM_TX_PKTIDS *
921 	    sizeof(struct bwfm_pci_buf));
922 
923 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++) {
924 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_ioctl_pkts.pkts[i].bb_map);
925 		if (sc->sc_ioctl_pkts.pkts[i].bb_m)
926 			m_freem(sc->sc_ioctl_pkts.pkts[i].bb_m);
927 	}
928 	free(sc->sc_ioctl_pkts.pkts, M_DEVBUF, BWFM_NUM_IOCTL_PKTIDS *
929 	    sizeof(struct bwfm_pci_buf));
930 
931 	for (i = 0; i < sc->sc_max_flowrings; i++) {
932 		if (sc->sc_flowrings[i].status >= RING_OPEN)
933 			bwfm_pci_dmamem_free(sc, sc->sc_flowrings[i].ring);
934 	}
935 	free(sc->sc_flowrings, M_DEVBUF, sc->sc_max_flowrings *
936 	    sizeof(struct bwfm_pci_msgring));
937 
938 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
939 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
940 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
941 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
942 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
943 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
944 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
945 	if (sc->sc_dma_idx_buf) {
946 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
947 		sc->sc_dma_idx_buf = NULL;
948 	}
949 
950 	sc->sc_initialized = 0;
951 }
952 
953 int
954 bwfm_pci_activate(struct device *self, int act)
955 {
956 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
957 	struct bwfm_softc *bwfm = (void *)sc;
958 	int error = 0;
959 
960 	switch (act) {
961 	case DVACT_QUIESCE:
962 		error = bwfm_activate(bwfm, act);
963 		if (error)
964 			return error;
965 		if (sc->sc_initialized) {
966 			sc->sc_mbdata_done = 0;
967 			error = bwfm_pci_send_mb_data(sc,
968 			    BWFM_PCI_H2D_HOST_D3_INFORM);
969 			if (error)
970 				return error;
971 			tsleep_nsec(&sc->sc_mbdata_done, PCATCH,
972 			    DEVNAME(sc), SEC_TO_NSEC(2));
973 			if (!sc->sc_mbdata_done)
974 				return ETIMEDOUT;
975 		}
976 		break;
977 	case DVACT_WAKEUP:
978 		if (sc->sc_initialized) {
979 			/* If device can't be resumed, re-init. */
980 			if (bwfm_pci_intmask(sc) == 0 ||
981 			    bwfm_pci_send_mb_data(sc,
982 			    BWFM_PCI_H2D_HOST_D0_INFORM) != 0) {
983 				bwfm_cleanup(bwfm);
984 				bwfm_pci_cleanup(sc);
985 			} else {
986 				bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
987 				bwfm_pci_intr_enable(sc);
988 				bwfm_pci_hostready(sc);
989 			}
990 		}
991 		error = bwfm_activate(bwfm, act);
992 		if (error)
993 			return error;
994 		break;
995 	default:
996 		break;
997 	}
998 
999 	return 0;
1000 }
1001 
1002 #if defined(__HAVE_FDT)
1003 int
1004 bwfm_pci_read_otp(struct bwfm_pci_softc *sc)
1005 {
1006 	struct bwfm_softc *bwfm = (void *)sc;
1007 	struct bwfm_core *core;
1008 	uint32_t coreid, base, words;
1009 	uint32_t page, offset, sromctl;
1010 	uint8_t *otp;
1011 	int i;
1012 
1013 	switch (bwfm->sc_chip.ch_chip) {
1014 	case BRCM_CC_4355_CHIP_ID:
1015 		coreid = BWFM_AGENT_CORE_CHIPCOMMON;
1016 		base = 0x8c0;
1017 		words = 0xb2;
1018 		break;
1019 	case BRCM_CC_4364_CHIP_ID:
1020 		coreid = BWFM_AGENT_CORE_CHIPCOMMON;
1021 		base = 0x8c0;
1022 		words = 0x1a0;
1023 		break;
1024 	case BRCM_CC_4377_CHIP_ID:
1025 	case BRCM_CC_4378_CHIP_ID:
1026 		coreid = BWFM_AGENT_CORE_GCI;
1027 		base = 0x1120;
1028 		words = 0x170;
1029 		break;
1030 	case BRCM_CC_4387_CHIP_ID:
1031 		coreid = BWFM_AGENT_CORE_GCI;
1032 		base = 0x113c;
1033 		words = 0x170;
1034 		break;
1035 	default:
1036 		return 0;
1037 	}
1038 
1039 	core = bwfm_chip_get_core(bwfm, coreid);
1040 	if (core == NULL)
1041 		return 1;
1042 
1043 	/* Map OTP to shadow area */
1044 	if (coreid == BWFM_AGENT_CORE_CHIPCOMMON) {
1045 		bwfm_pci_select_core(sc, coreid);
1046 		sromctl = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1047 		    BWFM_CHIP_REG_SROMCONTROL);
1048 
1049 		if (!(sromctl & BWFM_CHIP_REG_SROMCONTROL_OTP_PRESENT))
1050 			return 0;
1051 
1052 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1053 		    BWFM_CHIP_REG_SROMCONTROL, sromctl |
1054 		    BWFM_CHIP_REG_SROMCONTROL_OTPSEL);
1055 	}
1056 
1057 	/* Map bus window to SROM/OTP shadow area */
1058 	page = (core->co_base + base) & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1059 	offset = (core->co_base + base) & (BWFM_PCI_BAR0_REG_SIZE - 1);
1060 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1061 
1062 	otp = mallocarray(words, sizeof(uint16_t), M_TEMP, M_WAITOK);
1063 	for (i = 0; i < words; i++)
1064 		((uint16_t *)otp)[i] = bus_space_read_2(sc->sc_reg_iot,
1065 		    sc->sc_reg_ioh, offset + i * sizeof(uint16_t));
1066 
1067 	/* Unmap OTP */
1068 	if (coreid == BWFM_AGENT_CORE_CHIPCOMMON) {
1069 		bwfm_pci_select_core(sc, coreid);
1070 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1071 		    BWFM_CHIP_REG_SROMCONTROL, sromctl);
1072 	}
1073 
1074 	for (i = 0; i < (words * sizeof(uint16_t)) - 1; i += otp[i + 1]) {
1075 		if (otp[i + 0] == 0)
1076 			break;
1077 		if (i + otp[i + 1] > words * sizeof(uint16_t))
1078 			break;
1079 		bwfm_pci_process_otp_tuple(sc, otp[i + 0], otp[i + 1],
1080 		    &otp[i + 2]);
1081 	}
1082 
1083 	free(otp, M_TEMP, words * sizeof(uint16_t));
1084 	return 0;
1085 }
1086 
1087 void
1088 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *sc, uint8_t type, uint8_t size,
1089     uint8_t *data)
1090 {
1091 	struct bwfm_softc *bwfm = (void *)sc;
1092 	char chiprev[8] = "", module[8] = "", modrev[8] = "", vendor[8] = "", chip[8] = "";
1093 	char board_type[128] = "";
1094 	int len;
1095 
1096 	switch (type) {
1097 	case 0x15: /* system vendor OTP */
1098 		DPRINTF(("%s: system vendor OTP\n", DEVNAME(sc)));
1099 		if (size < sizeof(uint32_t))
1100 			return;
1101 		if (data[0] != 0x08 || data[1] != 0x00 ||
1102 		    data[2] != 0x00 || data[3] != 0x00)
1103 			return;
1104 		size -= sizeof(uint32_t);
1105 		data += sizeof(uint32_t);
1106 		while (size) {
1107 			/* reached end */
1108 			if (data[0] == 0xff)
1109 				break;
1110 			for (len = 0; len < size; len++)
1111 				if (data[len] == 0x00 || data[len] == ' ' ||
1112 				    data[len] == 0xff)
1113 					break;
1114 			if (len < 3 || len > 9) /* X=abcdef */
1115 				goto next;
1116 			if (data[1] != '=')
1117 				goto next;
1118 			/* NULL-terminate string */
1119 			if (data[len] == ' ')
1120 				data[len] = '\0';
1121 			switch (data[0]) {
1122 			case 's':
1123 				strlcpy(chiprev, &data[2], sizeof(chiprev));
1124 				break;
1125 			case 'M':
1126 				strlcpy(module, &data[2], sizeof(module));
1127 				break;
1128 			case 'm':
1129 				strlcpy(modrev, &data[2], sizeof(modrev));
1130 				break;
1131 			case 'V':
1132 				strlcpy(vendor, &data[2], sizeof(vendor));
1133 				break;
1134 			}
1135 next:
1136 			/* skip content */
1137 			data += len;
1138 			size -= len;
1139 			/* skip spacer tag */
1140 			if (size) {
1141 				data++;
1142 				size--;
1143 			}
1144 		}
1145 		snprintf(chip, sizeof(chip),
1146 		    bwfm->sc_chip.ch_chip > 40000 ? "%05d" : "%04x",
1147 		    bwfm->sc_chip.ch_chip);
1148 		if (sc->sc_sc.sc_node) {
1149 			OF_getprop(sc->sc_sc.sc_node, "brcm,board-type",
1150 			    board_type, sizeof(board_type));
1151 			if (strncmp(board_type, "apple,", 6) == 0) {
1152 				strlcpy(sc->sc_sc.sc_fwdir, "apple-bwfm/",
1153 				    sizeof(sc->sc_sc.sc_fwdir));
1154 			}
1155 		}
1156 		strlcpy(sc->sc_sc.sc_board_type, board_type,
1157 		    sizeof(sc->sc_sc.sc_board_type));
1158 		strlcpy(sc->sc_sc.sc_module, module,
1159 		    sizeof(sc->sc_sc.sc_module));
1160 		strlcpy(sc->sc_sc.sc_vendor, vendor,
1161 		    sizeof(sc->sc_sc.sc_vendor));
1162 		strlcpy(sc->sc_sc.sc_modrev, modrev,
1163 		    sizeof(sc->sc_sc.sc_modrev));
1164 		break;
1165 	case 0x80: /* Broadcom CIS */
1166 		DPRINTF(("%s: Broadcom CIS\n", DEVNAME(sc)));
1167 		break;
1168 	default:
1169 		DPRINTF(("%s: unknown OTP tuple\n", DEVNAME(sc)));
1170 		break;
1171 	}
1172 }
1173 #endif
1174 
1175 /* DMA code */
1176 struct bwfm_pci_dmamem *
1177 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
1178 {
1179 	struct bwfm_pci_dmamem *bdm;
1180 	int nsegs;
1181 
1182 	bdm = malloc(sizeof(*bdm), M_DEVBUF, M_WAITOK | M_ZERO);
1183 	bdm->bdm_size = size;
1184 
1185 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1186 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
1187 		goto bdmfree;
1188 
1189 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
1190 	    &nsegs, BUS_DMA_WAITOK) != 0)
1191 		goto destroy;
1192 
1193 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
1194 	    &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1195 		goto free;
1196 
1197 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
1198 	    NULL, BUS_DMA_WAITOK) != 0)
1199 		goto unmap;
1200 
1201 	bzero(bdm->bdm_kva, size);
1202 
1203 	return (bdm);
1204 
1205 unmap:
1206 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
1207 free:
1208 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
1209 destroy:
1210 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
1211 bdmfree:
1212 	free(bdm, M_DEVBUF, sizeof(*bdm));
1213 
1214 	return (NULL);
1215 }
1216 
1217 void
1218 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
1219 {
1220 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
1221 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
1222 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
1223 	free(bdm, M_DEVBUF, sizeof(*bdm));
1224 }
1225 
1226 /*
1227  * We need a simple mapping from a packet ID to mbufs, because when
1228  * a transfer completed, we only know the ID so we have to look up
1229  * the memory for the ID.  This simply looks for an empty slot.
1230  */
1231 int
1232 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
1233 {
1234 	int i, idx;
1235 
1236 	idx = pkts->last + 1;
1237 	for (i = 0; i < pkts->npkt; i++) {
1238 		if (idx == pkts->npkt)
1239 			idx = 0;
1240 		if (pkts->pkts[idx].bb_m == NULL)
1241 			return 0;
1242 		idx++;
1243 	}
1244 	return ENOBUFS;
1245 }
1246 
1247 int
1248 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1249     struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
1250 {
1251 	int i, idx;
1252 
1253 	idx = pkts->last + 1;
1254 	for (i = 0; i < pkts->npkt; i++) {
1255 		if (idx == pkts->npkt)
1256 			idx = 0;
1257 		if (pkts->pkts[idx].bb_m == NULL) {
1258 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
1259 			    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
1260 				if (m_defrag(m, M_DONTWAIT))
1261 					return EFBIG;
1262 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
1263 				    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
1264 					return EFBIG;
1265 			}
1266 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
1267 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
1268 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1269 			pkts->last = idx;
1270 			pkts->pkts[idx].bb_m = m;
1271 			*pktid = idx;
1272 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
1273 			return 0;
1274 		}
1275 		idx++;
1276 	}
1277 	return ENOBUFS;
1278 }
1279 
1280 struct mbuf *
1281 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1282     uint32_t pktid)
1283 {
1284 	struct mbuf *m;
1285 
1286 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
1287 		return NULL;
1288 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
1289 	    pkts->pkts[pktid].bb_map->dm_mapsize,
1290 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1291 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1292 	m = pkts->pkts[pktid].bb_m;
1293 	pkts->pkts[pktid].bb_m = NULL;
1294 	return m;
1295 }
1296 
1297 void
1298 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1299 {
1300 	bwfm_pci_fill_rx_buf_ring(sc);
1301 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1302 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1303 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1304 	    MSGBUF_TYPE_EVENT_BUF_POST);
1305 }
1306 
1307 void
1308 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1309     uint32_t msgtype)
1310 {
1311 	struct msgbuf_rx_ioctl_resp_or_event *req;
1312 	struct mbuf *m;
1313 	uint32_t pktid;
1314 	paddr_t paddr;
1315 	int s, slots;
1316 
1317 	s = splnet();
1318 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1319 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1320 			break;
1321 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1322 		if (req == NULL)
1323 			break;
1324 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_CTL_PKT_SIZE);
1325 		if (m == NULL) {
1326 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1327 			break;
1328 		}
1329 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_CTL_PKT_SIZE;
1330 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1331 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1332 			m_freem(m);
1333 			break;
1334 		}
1335 		memset(req, 0, sizeof(*req));
1336 		req->msg.msgtype = msgtype;
1337 		req->msg.request_id = htole32(pktid);
1338 		req->host_buf_len = htole16(MSGBUF_MAX_CTL_PKT_SIZE);
1339 		req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1340 		req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1341 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1342 	}
1343 	if_rxr_put(rxring, slots);
1344 	splx(s);
1345 }
1346 
1347 void
1348 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1349 {
1350 	struct msgbuf_rx_bufpost *req;
1351 	struct mbuf *m;
1352 	uint32_t pktid;
1353 	paddr_t paddr;
1354 	int s, slots;
1355 
1356 	s = splnet();
1357 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1358 	    slots > 0; slots--) {
1359 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1360 			break;
1361 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1362 		if (req == NULL)
1363 			break;
1364 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE);
1365 		if (m == NULL) {
1366 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1367 			break;
1368 		}
1369 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1370 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1371 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1372 			m_freem(m);
1373 			break;
1374 		}
1375 		memset(req, 0, sizeof(*req));
1376 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1377 		req->msg.request_id = htole32(pktid);
1378 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1379 		req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1380 		req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1381 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1382 	}
1383 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1384 	splx(s);
1385 }
1386 
1387 int
1388 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1389     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1390     int idx, uint32_t idx_off, uint32_t *ring_mem)
1391 {
1392 	ring->w_idx_addr = w_idx + idx * idx_off;
1393 	ring->r_idx_addr = r_idx + idx * idx_off;
1394 	ring->w_ptr = 0;
1395 	ring->r_ptr = 0;
1396 	ring->nitem = nitem;
1397 	ring->itemsz = itemsz;
1398 	bwfm_pci_ring_write_rptr(sc, ring);
1399 	bwfm_pci_ring_write_wptr(sc, ring);
1400 
1401 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1402 	if (ring->ring == NULL)
1403 		return ENOMEM;
1404 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1405 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1406 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1407 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1408 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1409 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1410 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1411 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1412 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1413 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1414 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1415 	return 0;
1416 }
1417 
1418 int
1419 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1420     int nitem, size_t itemsz)
1421 {
1422 	ring->w_ptr = 0;
1423 	ring->r_ptr = 0;
1424 	ring->nitem = nitem;
1425 	ring->itemsz = itemsz;
1426 	bwfm_pci_ring_write_rptr(sc, ring);
1427 	bwfm_pci_ring_write_wptr(sc, ring);
1428 
1429 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1430 	if (ring->ring == NULL)
1431 		return ENOMEM;
1432 	return 0;
1433 }
1434 
1435 /* Ring helpers */
1436 void
1437 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1438     struct bwfm_pci_msgring *ring)
1439 {
1440 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_SHARED_DAR)
1441 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1442 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_0, 1);
1443 	else
1444 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1445 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_0, 1);
1446 }
1447 
1448 void
1449 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1450     struct bwfm_pci_msgring *ring)
1451 {
1452 	if (sc->sc_dma_idx_sz == 0) {
1453 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1454 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1455 	} else {
1456 		bus_dmamap_sync(sc->sc_dmat,
1457 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1458 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1459 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1460 		    + ring->r_idx_addr);
1461 	}
1462 }
1463 
1464 void
1465 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1466     struct bwfm_pci_msgring *ring)
1467 {
1468 	if (sc->sc_dma_idx_sz == 0) {
1469 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1470 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1471 	} else {
1472 		bus_dmamap_sync(sc->sc_dmat,
1473 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1474 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1475 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1476 		    + ring->w_idx_addr);
1477 	}
1478 }
1479 
1480 void
1481 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1482     struct bwfm_pci_msgring *ring)
1483 {
1484 	if (sc->sc_dma_idx_sz == 0) {
1485 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1486 		    ring->r_idx_addr, ring->r_ptr);
1487 	} else {
1488 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1489 		    + ring->r_idx_addr) = ring->r_ptr;
1490 		bus_dmamap_sync(sc->sc_dmat,
1491 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1492 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1493 	}
1494 }
1495 
1496 void
1497 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1498     struct bwfm_pci_msgring *ring)
1499 {
1500 	if (sc->sc_dma_idx_sz == 0) {
1501 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1502 		    ring->w_idx_addr, ring->w_ptr);
1503 	} else {
1504 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1505 		    + ring->w_idx_addr) = ring->w_ptr;
1506 		bus_dmamap_sync(sc->sc_dmat,
1507 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1508 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1509 	}
1510 }
1511 
1512 /*
1513  * Retrieve a free descriptor to put new stuff in, but don't commit
1514  * to it yet so we can rollback later if any error occurs.
1515  */
1516 void *
1517 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1518     struct bwfm_pci_msgring *ring)
1519 {
1520 	int available;
1521 	char *ret;
1522 
1523 	bwfm_pci_ring_update_rptr(sc, ring);
1524 
1525 	if (ring->r_ptr > ring->w_ptr)
1526 		available = ring->r_ptr - ring->w_ptr;
1527 	else
1528 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1529 
1530 	if (available <= 1)
1531 		return NULL;
1532 
1533 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1534 	ring->w_ptr += 1;
1535 	if (ring->w_ptr == ring->nitem)
1536 		ring->w_ptr = 0;
1537 	return ret;
1538 }
1539 
1540 void *
1541 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1542     struct bwfm_pci_msgring *ring, int count, int *avail)
1543 {
1544 	int available;
1545 	char *ret;
1546 
1547 	bwfm_pci_ring_update_rptr(sc, ring);
1548 
1549 	if (ring->r_ptr > ring->w_ptr)
1550 		available = ring->r_ptr - ring->w_ptr;
1551 	else
1552 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1553 
1554 	if (available <= 1)
1555 		return NULL;
1556 
1557 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1558 	*avail = min(count, available - 1);
1559 	if (*avail + ring->w_ptr > ring->nitem)
1560 		*avail = ring->nitem - ring->w_ptr;
1561 	ring->w_ptr += *avail;
1562 	if (ring->w_ptr == ring->nitem)
1563 		ring->w_ptr = 0;
1564 	return ret;
1565 }
1566 
1567 /*
1568  * Read number of descriptors available (submitted by the firmware)
1569  * and retrieve pointer to first descriptor.
1570  */
1571 void *
1572 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1573     struct bwfm_pci_msgring *ring, int *avail)
1574 {
1575 	bwfm_pci_ring_update_wptr(sc, ring);
1576 
1577 	if (ring->w_ptr >= ring->r_ptr)
1578 		*avail = ring->w_ptr - ring->r_ptr;
1579 	else
1580 		*avail = ring->nitem - ring->r_ptr;
1581 
1582 	if (*avail == 0)
1583 		return NULL;
1584 
1585 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1586 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1587 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1588 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1589 }
1590 
1591 /*
1592  * Let firmware know we read N descriptors.
1593  */
1594 void
1595 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1596     struct bwfm_pci_msgring *ring, int nitem)
1597 {
1598 	ring->r_ptr += nitem;
1599 	if (ring->r_ptr == ring->nitem)
1600 		ring->r_ptr = 0;
1601 	bwfm_pci_ring_write_rptr(sc, ring);
1602 }
1603 
1604 /*
1605  * Let firmware know that we submitted some descriptors.
1606  */
1607 void
1608 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1609     struct bwfm_pci_msgring *ring)
1610 {
1611 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1612 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1613 	    BUS_DMASYNC_PREWRITE);
1614 	bwfm_pci_ring_write_wptr(sc, ring);
1615 	bwfm_pci_ring_bell(sc, ring);
1616 }
1617 
1618 /*
1619  * Rollback N descriptors in case we don't actually want
1620  * to commit to it.
1621  */
1622 void
1623 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1624     struct bwfm_pci_msgring *ring, int nitem)
1625 {
1626 	if (ring->w_ptr == 0)
1627 		ring->w_ptr = ring->nitem - nitem;
1628 	else
1629 		ring->w_ptr -= nitem;
1630 }
1631 
1632 /*
1633  * Foreach written descriptor on the ring, pass the descriptor to
1634  * a message handler and let the firmware know we handled it.
1635  */
1636 void
1637 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1638     struct mbuf_list *ml)
1639 {
1640 	void *buf;
1641 	int avail, processed;
1642 
1643 again:
1644 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1645 	if (buf == NULL)
1646 		return;
1647 
1648 	processed = 0;
1649 	while (avail) {
1650 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset, ml);
1651 		buf += ring->itemsz;
1652 		processed++;
1653 		if (processed == 48) {
1654 			bwfm_pci_ring_read_commit(sc, ring, processed);
1655 			processed = 0;
1656 		}
1657 		avail--;
1658 	}
1659 	if (processed)
1660 		bwfm_pci_ring_read_commit(sc, ring, processed);
1661 	if (ring->r_ptr == 0)
1662 		goto again;
1663 }
1664 
1665 void
1666 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf, struct mbuf_list *ml)
1667 {
1668 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1669 	struct msgbuf_ioctl_resp_hdr *resp;
1670 	struct msgbuf_tx_status *tx;
1671 	struct msgbuf_rx_complete *rx;
1672 	struct msgbuf_rx_event *event;
1673 	struct msgbuf_d2h_mailbox_data *d2h;
1674 	struct msgbuf_common_hdr *msg;
1675 	struct msgbuf_flowring_create_resp *fcr;
1676 	struct msgbuf_flowring_delete_resp *fdr;
1677 	struct bwfm_cmd_flowring_create fdcmd;
1678 	struct bwfm_pci_msgring *ring;
1679 	struct mbuf *m;
1680 	int flowid;
1681 
1682 	msg = (struct msgbuf_common_hdr *)buf;
1683 	switch (msg->msgtype)
1684 	{
1685 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1686 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1687 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1688 		if (flowid < 2)
1689 			break;
1690 		flowid -= 2;
1691 		if (flowid >= sc->sc_max_flowrings)
1692 			break;
1693 		ring = &sc->sc_flowrings[flowid];
1694 		if (ring->status != RING_OPENING)
1695 			break;
1696 		if (fcr->compl_hdr.status) {
1697 			printf("%s: failed to open flowring %d\n",
1698 			    DEVNAME(sc), flowid);
1699 			ring->status = RING_CLOSED;
1700 			if (ring->m) {
1701 				m_freem(ring->m);
1702 				ring->m = NULL;
1703 			}
1704 			ifq_restart(&ifp->if_snd);
1705 			break;
1706 		}
1707 		ring->status = RING_OPEN;
1708 		if (ring->m != NULL) {
1709 			m = ring->m;
1710 			ring->m = NULL;
1711 			if (bwfm_pci_txdata(&sc->sc_sc, m))
1712 				m_freem(ring->m);
1713 		}
1714 		ifq_restart(&ifp->if_snd);
1715 		break;
1716 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1717 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1718 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1719 		if (flowid < 2)
1720 			break;
1721 		flowid -= 2;
1722 		if (flowid >= sc->sc_max_flowrings)
1723 			break;
1724 		ring = &sc->sc_flowrings[flowid];
1725 		if (ring->status != RING_CLOSING)
1726 			break;
1727 		if (fdr->compl_hdr.status) {
1728 			printf("%s: failed to delete flowring %d\n",
1729 			    DEVNAME(sc), flowid);
1730 			break;
1731 		}
1732 		fdcmd.flowid = flowid;
1733 		bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_delete_cb,
1734 		    &fdcmd, sizeof(fdcmd));
1735 		break;
1736 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1737 		m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1738 		    letoh32(msg->request_id));
1739 		if (m == NULL)
1740 			break;
1741 		m_freem(m);
1742 		break;
1743 	case MSGBUF_TYPE_IOCTL_CMPLT:
1744 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1745 		bwfm_pci_msgbuf_rxioctl(sc, resp);
1746 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1747 		bwfm_pci_fill_rx_rings(sc);
1748 		break;
1749 	case MSGBUF_TYPE_WL_EVENT:
1750 		event = (struct msgbuf_rx_event *)buf;
1751 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1752 		    letoh32(event->msg.request_id));
1753 		if (m == NULL)
1754 			break;
1755 		m_adj(m, sc->sc_rx_dataoffset);
1756 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1757 		bwfm_rx(&sc->sc_sc, m, ml);
1758 		if_rxr_put(&sc->sc_event_ring, 1);
1759 		bwfm_pci_fill_rx_rings(sc);
1760 		break;
1761 	case MSGBUF_TYPE_TX_STATUS:
1762 		tx = (struct msgbuf_tx_status *)buf;
1763 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1764 		    letoh32(tx->msg.request_id) - 1);
1765 		if (m == NULL)
1766 			break;
1767 		m_freem(m);
1768 		if (sc->sc_tx_pkts_full) {
1769 			sc->sc_tx_pkts_full = 0;
1770 			ifq_restart(&ifp->if_snd);
1771 		}
1772 		break;
1773 	case MSGBUF_TYPE_RX_CMPLT:
1774 		rx = (struct msgbuf_rx_complete *)buf;
1775 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1776 		    letoh32(rx->msg.request_id));
1777 		if (m == NULL)
1778 			break;
1779 		if (letoh16(rx->data_offset))
1780 			m_adj(m, letoh16(rx->data_offset));
1781 		else if (sc->sc_rx_dataoffset)
1782 			m_adj(m, sc->sc_rx_dataoffset);
1783 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1784 		bwfm_rx(&sc->sc_sc, m, ml);
1785 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1786 		bwfm_pci_fill_rx_rings(sc);
1787 		break;
1788 	case MSGBUF_TYPE_D2H_MAILBOX_DATA:
1789 		d2h = (struct msgbuf_d2h_mailbox_data *)buf;
1790 		if (d2h->data & BWFM_PCI_D2H_DEV_D3_ACK) {
1791 			sc->sc_mbdata_done = 1;
1792 			wakeup(&sc->sc_mbdata_done);
1793 		}
1794 		break;
1795 	default:
1796 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1797 		break;
1798 	}
1799 }
1800 
1801 /* Bus core helpers */
1802 void
1803 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1804 {
1805 	struct bwfm_softc *bwfm = (void *)sc;
1806 	struct bwfm_core *core;
1807 
1808 	core = bwfm_chip_get_core(bwfm, id);
1809 	if (core == NULL) {
1810 		printf("%s: could not find core to select", DEVNAME(sc));
1811 		return;
1812 	}
1813 
1814 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1815 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1816 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1817 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1818 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1819 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1820 }
1821 
1822 uint32_t
1823 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1824 {
1825 	struct bwfm_pci_softc *sc = (void *)bwfm;
1826 	uint32_t page, offset;
1827 
1828 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1829 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1830 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1831 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1832 }
1833 
1834 void
1835 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1836 {
1837 	struct bwfm_pci_softc *sc = (void *)bwfm;
1838 	uint32_t page, offset;
1839 
1840 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1841 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1842 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1843 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1844 }
1845 
1846 int
1847 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1848 {
1849 	return 0;
1850 }
1851 
1852 int
1853 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1854 {
1855 	struct bwfm_pci_softc *sc = (void *)bwfm;
1856 	struct bwfm_core *core;
1857 	uint32_t reg;
1858 	int i;
1859 
1860 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1861 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1862 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1863 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1864 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1865 
1866 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1867 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1868 	    BWFM_CHIP_REG_WATCHDOG, 4);
1869 	delay(100 * 1000);
1870 
1871 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1872 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1873 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1874 
1875 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1876 	if (core->co_rev <= 13) {
1877 		uint16_t cfg_offset[] = {
1878 		    BWFM_PCI_CFGREG_STATUS_CMD,
1879 		    BWFM_PCI_CFGREG_PM_CSR,
1880 		    BWFM_PCI_CFGREG_MSI_CAP,
1881 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1882 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1883 		    BWFM_PCI_CFGREG_MSI_DATA,
1884 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1885 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1886 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1887 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1888 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1889 		};
1890 
1891 		for (i = 0; i < nitems(cfg_offset); i++) {
1892 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1893 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1894 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1895 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1896 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1897 			    DEVNAME(sc), cfg_offset[i], reg));
1898 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1899 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1900 		}
1901 	}
1902 	if (core->co_rev >= 64)
1903 		sc->sc_pcireg64 = 1;
1904 
1905 	reg = bwfm_pci_intr_status(sc);
1906 	if (reg != 0xffffffff)
1907 		bwfm_pci_intr_ack(sc, reg);
1908 
1909 	return 0;
1910 }
1911 
1912 void
1913 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1914 {
1915 	struct bwfm_pci_softc *sc = (void *)bwfm;
1916 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1917 }
1918 
1919 static int bwfm_pci_prio2fifo[8] = {
1920 	0, /* best effort */
1921 	1, /* IPTOS_PREC_IMMEDIATE */
1922 	1, /* IPTOS_PREC_PRIORITY */
1923 	0, /* IPTOS_PREC_FLASH */
1924 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1925 	2, /* IPTOS_PREC_CRITIC_ECP */
1926 	3, /* IPTOS_PREC_INTERNETCONTROL */
1927 	3, /* IPTOS_PREC_NETCONTROL */
1928 };
1929 
1930 int
1931 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1932 {
1933 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1934 #ifndef IEEE80211_STA_ONLY
1935 	uint8_t *da = mtod(m, uint8_t *);
1936 #endif
1937 	int flowid, prio, fifo;
1938 	int i, found;
1939 
1940 	prio = ieee80211_classify(ic, m);
1941 	fifo = bwfm_pci_prio2fifo[prio];
1942 
1943 	switch (ic->ic_opmode)
1944 	{
1945 	case IEEE80211_M_STA:
1946 		flowid = fifo;
1947 		break;
1948 #ifndef IEEE80211_STA_ONLY
1949 	case IEEE80211_M_HOSTAP:
1950 		if (ETHER_IS_MULTICAST(da))
1951 			da = etherbroadcastaddr;
1952 		flowid = da[5] * 2 + fifo;
1953 		break;
1954 #endif
1955 	default:
1956 		printf("%s: state not supported\n", DEVNAME(sc));
1957 		return ENOBUFS;
1958 	}
1959 
1960 	found = 0;
1961 	flowid = flowid % sc->sc_max_flowrings;
1962 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1963 		if (ic->ic_opmode == IEEE80211_M_STA &&
1964 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1965 		    sc->sc_flowrings[flowid].fifo == fifo) {
1966 			found = 1;
1967 			break;
1968 		}
1969 #ifndef IEEE80211_STA_ONLY
1970 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1971 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1972 		    sc->sc_flowrings[flowid].fifo == fifo &&
1973 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1974 			found = 1;
1975 			break;
1976 		}
1977 #endif
1978 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1979 	}
1980 
1981 	if (found)
1982 		return flowid;
1983 
1984 	return -1;
1985 }
1986 
1987 void
1988 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1989 {
1990 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1991 	struct bwfm_cmd_flowring_create cmd;
1992 #ifndef IEEE80211_STA_ONLY
1993 	uint8_t *da = mtod(m, uint8_t *);
1994 #endif
1995 	struct bwfm_pci_msgring *ring;
1996 	int flowid, prio, fifo;
1997 	int i, found;
1998 
1999 	prio = ieee80211_classify(ic, m);
2000 	fifo = bwfm_pci_prio2fifo[prio];
2001 
2002 	switch (ic->ic_opmode)
2003 	{
2004 	case IEEE80211_M_STA:
2005 		flowid = fifo;
2006 		break;
2007 #ifndef IEEE80211_STA_ONLY
2008 	case IEEE80211_M_HOSTAP:
2009 		if (ETHER_IS_MULTICAST(da))
2010 			da = etherbroadcastaddr;
2011 		flowid = da[5] * 2 + fifo;
2012 		break;
2013 #endif
2014 	default:
2015 		printf("%s: state not supported\n", DEVNAME(sc));
2016 		return;
2017 	}
2018 
2019 	found = 0;
2020 	flowid = flowid % sc->sc_max_flowrings;
2021 	for (i = 0; i < sc->sc_max_flowrings; i++) {
2022 		ring = &sc->sc_flowrings[flowid];
2023 		if (ring->status == RING_CLOSED) {
2024 			ring->status = RING_OPENING;
2025 			found = 1;
2026 			break;
2027 		}
2028 		flowid = (flowid + 1) % sc->sc_max_flowrings;
2029 	}
2030 
2031 	/*
2032 	 * We cannot recover from that so far.  Only a stop/init
2033 	 * cycle can revive this if it ever happens at all.
2034 	 */
2035 	if (!found) {
2036 		printf("%s: no flowring available\n", DEVNAME(sc));
2037 		return;
2038 	}
2039 
2040 	cmd.m = m;
2041 	cmd.prio = prio;
2042 	cmd.flowid = flowid;
2043 	bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
2044 }
2045 
2046 void
2047 bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
2048 {
2049 	struct bwfm_pci_softc *sc = (void *)bwfm;
2050 #ifndef IEEE80211_STA_ONLY
2051 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
2052 #endif
2053 	struct bwfm_cmd_flowring_create *cmd = arg;
2054 	struct msgbuf_tx_flowring_create_req *req;
2055 	struct bwfm_pci_msgring *ring;
2056 	uint8_t *da, *sa;
2057 	int s;
2058 
2059 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
2060 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
2061 
2062 	ring = &sc->sc_flowrings[cmd->flowid];
2063 	if (ring->status != RING_OPENING) {
2064 		printf("%s: flowring not opening\n", DEVNAME(sc));
2065 		return;
2066 	}
2067 
2068 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
2069 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
2070 		return;
2071 	}
2072 
2073 	s = splnet();
2074 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2075 	if (req == NULL) {
2076 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
2077 		splx(s);
2078 		return;
2079 	}
2080 
2081 	ring->status = RING_OPENING;
2082 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
2083 	ring->m = cmd->m;
2084 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
2085 #ifndef IEEE80211_STA_ONLY
2086 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
2087 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
2088 #endif
2089 
2090 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
2091 	req->msg.ifidx = 0;
2092 	req->msg.request_id = 0;
2093 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
2094 	req->flow_ring_id = letoh16(cmd->flowid + 2);
2095 	memcpy(req->da, da, ETHER_ADDR_LEN);
2096 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
2097 	req->flow_ring_addr.high_addr =
2098 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
2099 	req->flow_ring_addr.low_addr =
2100 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
2101 	req->max_items = letoh16(512);
2102 	req->len_item = letoh16(48);
2103 
2104 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2105 	splx(s);
2106 }
2107 
2108 void
2109 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
2110 {
2111 	struct msgbuf_tx_flowring_delete_req *req;
2112 	struct bwfm_pci_msgring *ring;
2113 	int s;
2114 
2115 	ring = &sc->sc_flowrings[flowid];
2116 	if (ring->status != RING_OPEN) {
2117 		printf("%s: flowring not open\n", DEVNAME(sc));
2118 		return;
2119 	}
2120 
2121 	s = splnet();
2122 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2123 	if (req == NULL) {
2124 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
2125 		splx(s);
2126 		return;
2127 	}
2128 
2129 	ring->status = RING_CLOSING;
2130 
2131 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
2132 	req->msg.ifidx = 0;
2133 	req->msg.request_id = 0;
2134 	req->flow_ring_id = letoh16(flowid + 2);
2135 	req->reason = 0;
2136 
2137 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2138 	splx(s);
2139 
2140 	tsleep_nsec(ring, PCATCH, DEVNAME(sc), SEC_TO_NSEC(2));
2141 	if (ring->status != RING_CLOSED)
2142 		printf("%s: flowring not closing\n", DEVNAME(sc));
2143 }
2144 
2145 void
2146 bwfm_pci_flowring_delete_cb(struct bwfm_softc *bwfm, void *arg)
2147 {
2148 	struct bwfm_pci_softc *sc = (void *)bwfm;
2149 	struct bwfm_cmd_flowring_create *cmd = arg;
2150 	struct bwfm_pci_msgring *ring;
2151 
2152 	ring = &sc->sc_flowrings[cmd->flowid];
2153 	bwfm_pci_dmamem_free(sc, ring->ring);
2154 	ring->status = RING_CLOSED;
2155 	wakeup(ring);
2156 }
2157 
2158 void
2159 bwfm_pci_stop(struct bwfm_softc *bwfm)
2160 {
2161 	struct bwfm_pci_softc *sc = (void *)bwfm;
2162 	struct bwfm_pci_msgring *ring;
2163 	int i;
2164 
2165 	for (i = 0; i < sc->sc_max_flowrings; i++) {
2166 		ring = &sc->sc_flowrings[i];
2167 		if (ring->status == RING_OPEN)
2168 			bwfm_pci_flowring_delete(sc, i);
2169 	}
2170 }
2171 
2172 int
2173 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
2174 {
2175 	struct bwfm_pci_softc *sc = (void *)bwfm;
2176 	struct bwfm_pci_msgring *ring;
2177 	int i;
2178 
2179 	/* If we are transitioning, we cannot send. */
2180 	for (i = 0; i < sc->sc_max_flowrings; i++) {
2181 		ring = &sc->sc_flowrings[i];
2182 		if (ring->status == RING_OPENING)
2183 			return ENOBUFS;
2184 	}
2185 
2186 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
2187 		sc->sc_tx_pkts_full = 1;
2188 		return ENOBUFS;
2189 	}
2190 
2191 	return 0;
2192 }
2193 
2194 int
2195 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
2196 {
2197 	struct bwfm_pci_softc *sc = (void *)bwfm;
2198 	struct bwfm_pci_msgring *ring;
2199 	struct msgbuf_tx_msghdr *tx;
2200 	uint32_t pktid;
2201 	paddr_t paddr;
2202 	int flowid, ret;
2203 
2204 	flowid = bwfm_pci_flowring_lookup(sc, m);
2205 	if (flowid < 0) {
2206 		/*
2207 		 * We cannot send the packet right now as there is
2208 		 * no flowring yet.  The flowring will be created
2209 		 * asynchronously.  While the ring is transitioning
2210 		 * the TX check will tell the upper layers that we
2211 		 * cannot send packets right now.  When the flowring
2212 		 * is created the queue will be restarted and this
2213 		 * mbuf will be transmitted.
2214 		 */
2215 		bwfm_pci_flowring_create(sc, m);
2216 		return 0;
2217 	}
2218 
2219 	ring = &sc->sc_flowrings[flowid];
2220 	if (ring->status == RING_OPENING ||
2221 	    ring->status == RING_CLOSING) {
2222 		printf("%s: tried to use a flow that was "
2223 		    "transitioning in status %d\n",
2224 		    DEVNAME(sc), ring->status);
2225 		return ENOBUFS;
2226 	}
2227 
2228 	tx = bwfm_pci_ring_write_reserve(sc, ring);
2229 	if (tx == NULL)
2230 		return ENOBUFS;
2231 
2232 	memset(tx, 0, sizeof(*tx));
2233 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
2234 	tx->msg.ifidx = 0;
2235 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
2236 	tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
2237 	    BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
2238 	tx->seg_cnt = 1;
2239 	memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
2240 
2241 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
2242 	if (ret) {
2243 		if (ret == ENOBUFS) {
2244 			printf("%s: no pktid available for TX\n",
2245 			    DEVNAME(sc));
2246 			sc->sc_tx_pkts_full = 1;
2247 		}
2248 		bwfm_pci_ring_write_cancel(sc, ring, 1);
2249 		return ret;
2250 	}
2251 	paddr += ETHER_HDR_LEN;
2252 
2253 	tx->msg.request_id = htole32(pktid + 1);
2254 	tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
2255 	tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2256 	tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2257 
2258 	bwfm_pci_ring_write_commit(sc, ring);
2259 	return 0;
2260 }
2261 
2262 int
2263 bwfm_pci_send_mb_data(struct bwfm_pci_softc *sc, uint32_t htod_mb_data)
2264 {
2265 	struct bwfm_softc *bwfm = (void *)sc;
2266 	struct bwfm_core *core;
2267 	uint32_t reg;
2268 	int i;
2269 
2270 	if (sc->sc_mb_via_ctl)
2271 		return bwfm_pci_msgbuf_h2d_mb_write(sc, htod_mb_data);
2272 
2273 	for (i = 0; i < 100; i++) {
2274 		reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2275 		    sc->sc_htod_mb_data_addr);
2276 		if (reg == 0)
2277 			break;
2278 		delay(10 * 1000);
2279 	}
2280 	if (i == 100) {
2281 		DPRINTF(("%s: MB transaction already pending\n", DEVNAME(sc)));
2282 		return EIO;
2283 	}
2284 
2285 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2286 	    sc->sc_htod_mb_data_addr, htod_mb_data);
2287 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX, 1);
2288 
2289 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
2290 	if (core->co_rev <= 13)
2291 		pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX, 1);
2292 
2293 	return 0;
2294 }
2295 
2296 void
2297 bwfm_pci_handle_mb_data(struct bwfm_pci_softc *sc)
2298 {
2299 	uint32_t reg;
2300 
2301 	reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2302 	    sc->sc_dtoh_mb_data_addr);
2303 	if (reg == 0)
2304 		return;
2305 
2306 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2307 	    sc->sc_dtoh_mb_data_addr, 0);
2308 
2309 	if (reg & BWFM_PCI_D2H_DEV_D3_ACK) {
2310 		sc->sc_mbdata_done = 1;
2311 		wakeup(&sc->sc_mbdata_done);
2312 	}
2313 
2314 	/* TODO: support more events */
2315 	if (reg & ~BWFM_PCI_D2H_DEV_D3_ACK)
2316 		printf("%s: handle MB data 0x%08x\n", DEVNAME(sc), reg);
2317 }
2318 
2319 #ifdef BWFM_DEBUG
2320 void
2321 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
2322 {
2323 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2324 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
2325 
2326 	if (newidx != sc->sc_console_readidx)
2327 		DPRINTFN(3, ("BWFM CONSOLE: "));
2328 	while (newidx != sc->sc_console_readidx) {
2329 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2330 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
2331 		sc->sc_console_readidx++;
2332 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
2333 			sc->sc_console_readidx = 0;
2334 		if (ch == '\r')
2335 			continue;
2336 		DPRINTFN(3, ("%c", ch));
2337 	}
2338 }
2339 #endif
2340 
2341 int
2342 bwfm_pci_intr(void *v)
2343 {
2344 	struct bwfm_pci_softc *sc = (void *)v;
2345 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
2346 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2347 	uint32_t status, mask;
2348 
2349 	if (!sc->sc_initialized)
2350 		return 0;
2351 
2352 	status = bwfm_pci_intr_status(sc);
2353 	/* FIXME: interrupt status seems to be zero? */
2354 	if (status == 0 && sc->sc_pcireg64)
2355 		status |= BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2356 	if (status == 0)
2357 		return 0;
2358 
2359 	bwfm_pci_intr_disable(sc);
2360 	bwfm_pci_intr_ack(sc, status);
2361 
2362 	if (!sc->sc_pcireg64 &&
2363 	    (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2364 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1)))
2365 		bwfm_pci_handle_mb_data(sc);
2366 
2367 	mask = BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2368 	if (sc->sc_pcireg64)
2369 		mask = BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2370 
2371 	if (status & mask) {
2372 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete, &ml);
2373 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete, &ml);
2374 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete, &ml);
2375 
2376 		if (ifiq_input(&ifp->if_rcv, &ml))
2377 			if_rxr_livelocked(&sc->sc_rxbuf_ring);
2378 	}
2379 
2380 #ifdef BWFM_DEBUG
2381 	bwfm_pci_debug_console(sc);
2382 #endif
2383 
2384 	bwfm_pci_intr_enable(sc);
2385 	return 1;
2386 }
2387 
2388 void
2389 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2390 {
2391 	if (sc->sc_pcireg64)
2392 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2393 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK,
2394 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2395 	else
2396 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2397 		    BWFM_PCI_PCIE2REG_MAILBOXMASK,
2398 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2399 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2400 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2401 }
2402 
2403 void
2404 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2405 {
2406 	if (sc->sc_pcireg64)
2407 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2408 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK, 0);
2409 	else
2410 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2411 		    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2412 }
2413 
2414 uint32_t
2415 bwfm_pci_intr_status(struct bwfm_pci_softc *sc)
2416 {
2417 	if (sc->sc_pcireg64)
2418 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2419 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT);
2420 	else
2421 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2422 		    BWFM_PCI_PCIE2REG_MAILBOXINT);
2423 }
2424 
2425 void
2426 bwfm_pci_intr_ack(struct bwfm_pci_softc *sc, uint32_t status)
2427 {
2428 	if (sc->sc_pcireg64)
2429 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2430 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT, status);
2431 	else
2432 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2433 		    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2434 }
2435 
2436 uint32_t
2437 bwfm_pci_intmask(struct bwfm_pci_softc *sc)
2438 {
2439 	if (sc->sc_pcireg64)
2440 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2441 		    BWFM_PCI_64_PCIE2REG_INTMASK);
2442 	else
2443 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2444 		    BWFM_PCI_PCIE2REG_INTMASK);
2445 }
2446 
2447 void
2448 bwfm_pci_hostready(struct bwfm_pci_softc *sc)
2449 {
2450 	if ((sc->sc_shared_flags & BWFM_SHARED_INFO_HOSTRDY_DB1) == 0)
2451 		return;
2452 
2453 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_SHARED_DAR)
2454 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2455 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_1, 1);
2456 	else
2457 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2458 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_1, 1);
2459 }
2460 
2461 /* Msgbuf protocol implementation */
2462 int
2463 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2464     int cmd, char *buf, size_t *len)
2465 {
2466 	struct bwfm_pci_softc *sc = (void *)bwfm;
2467 	struct msgbuf_ioctl_req_hdr *req;
2468 	struct bwfm_pci_ioctl *ctl;
2469 	struct mbuf *m;
2470 	uint32_t pktid;
2471 	paddr_t paddr;
2472 	size_t buflen;
2473 	int s;
2474 
2475 	buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2476 	m = MCLGETL(NULL, M_DONTWAIT, buflen);
2477 	if (m == NULL)
2478 		return 1;
2479 	m->m_len = m->m_pkthdr.len = buflen;
2480 
2481 	if (buf)
2482 		memcpy(mtod(m, char *), buf, buflen);
2483 	else
2484 		memset(mtod(m, char *), 0, buflen);
2485 
2486 	s = splnet();
2487 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2488 	if (req == NULL) {
2489 		splx(s);
2490 		m_freem(m);
2491 		return 1;
2492 	}
2493 
2494 	if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
2495 		bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
2496 		splx(s);
2497 		m_freem(m);
2498 		return 1;
2499 	}
2500 
2501 	ctl = malloc(sizeof(*ctl), M_TEMP, M_WAITOK|M_ZERO);
2502 	ctl->transid = sc->sc_ioctl_transid++;
2503 	TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next);
2504 
2505 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2506 	req->msg.ifidx = 0;
2507 	req->msg.flags = 0;
2508 	req->msg.request_id = htole32(pktid);
2509 	req->cmd = htole32(cmd);
2510 	req->output_buf_len = htole16(*len);
2511 	req->trans_id = htole16(ctl->transid);
2512 
2513 	req->input_buf_len = htole16(m->m_len);
2514 	req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2515 	req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2516 
2517 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2518 	splx(s);
2519 
2520 	tsleep_nsec(ctl, PWAIT, "bwfm", SEC_TO_NSEC(5));
2521 	TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next);
2522 
2523 	if (ctl->m == NULL) {
2524 		free(ctl, M_TEMP, sizeof(*ctl));
2525 		return 1;
2526 	}
2527 
2528 	*len = min(ctl->retlen, m->m_len);
2529 	*len = min(*len, buflen);
2530 	if (buf)
2531 		m_copydata(ctl->m, 0, *len, buf);
2532 	m_freem(ctl->m);
2533 
2534 	if (ctl->status < 0) {
2535 		free(ctl, M_TEMP, sizeof(*ctl));
2536 		return 1;
2537 	}
2538 
2539 	free(ctl, M_TEMP, sizeof(*ctl));
2540 	return 0;
2541 }
2542 
2543 int
2544 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2545     int cmd, char *buf, size_t len)
2546 {
2547 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2548 }
2549 
2550 void
2551 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2552     struct msgbuf_ioctl_resp_hdr *resp)
2553 {
2554 	struct bwfm_pci_ioctl *ctl, *tmp;
2555 	struct mbuf *m;
2556 
2557 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2558 	    letoh32(resp->msg.request_id));
2559 
2560 	TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp) {
2561 		if (ctl->transid != letoh16(resp->trans_id))
2562 			continue;
2563 		ctl->m = m;
2564 		ctl->retlen = letoh16(resp->resp_len);
2565 		ctl->status = letoh16(resp->compl_hdr.status);
2566 		wakeup(ctl);
2567 		return;
2568 	}
2569 
2570 	m_freem(m);
2571 }
2572 
2573 int
2574 bwfm_pci_msgbuf_h2d_mb_write(struct bwfm_pci_softc *sc, uint32_t data)
2575 {
2576 	struct msgbuf_h2d_mailbox_data *req;
2577 	int s;
2578 
2579 	s = splnet();
2580 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2581 	if (req == NULL) {
2582 		splx(s);
2583 		return ENOBUFS;
2584 	}
2585 
2586 	req->msg.msgtype = MSGBUF_TYPE_H2D_MAILBOX_DATA;
2587 	req->msg.ifidx = -1;
2588 	req->msg.flags = 0;
2589 	req->msg.request_id = 0;
2590 	req->data = data;
2591 
2592 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2593 	splx(s);
2594 
2595 	return 0;
2596 }
2597