xref: /openbsd/sys/dev/pci/if_bwfm_pci.c (revision 73471bf0)
1 /*	$OpenBSD: if_bwfm_pci.c,v 1.57 2021/10/23 12:48:17 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2010-2016 Broadcom Corporation
4  * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bpfilter.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/queue.h>
28 #include <sys/socket.h>
29 
30 #if defined(__HAVE_FDT)
31 #include <machine/fdt.h>
32 #include <dev/ofw/openfirm.h>
33 #endif
34 
35 #if NBPFILTER > 0
36 #include <net/bpf.h>
37 #endif
38 #include <net/if.h>
39 #include <net/if_dl.h>
40 #include <net/if_media.h>
41 
42 #include <netinet/in.h>
43 #include <netinet/if_ether.h>
44 
45 #include <net80211/ieee80211_var.h>
46 
47 #include <machine/bus.h>
48 
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcidevs.h>
52 
53 #include <dev/ic/bwfmvar.h>
54 #include <dev/ic/bwfmreg.h>
55 #include <dev/pci/if_bwfm_pci.h>
56 
57 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN		8
58 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN		1024
59 #define BWFM_DMA_H2D_IOCTL_BUF_LEN		ETHER_MAX_LEN
60 
61 #define BWFM_NUM_TX_MSGRINGS			2
62 #define BWFM_NUM_RX_MSGRINGS			3
63 
64 #define BWFM_NUM_IOCTL_PKTIDS			8
65 #define BWFM_NUM_TX_PKTIDS			2048
66 #define BWFM_NUM_RX_PKTIDS			1024
67 
68 #define BWFM_NUM_IOCTL_DESCS			1
69 #define BWFM_NUM_TX_DESCS			1
70 #define BWFM_NUM_RX_DESCS			1
71 
72 #ifdef BWFM_DEBUG
73 #define DPRINTF(x)	do { if (bwfm_debug > 0) printf x; } while (0)
74 #define DPRINTFN(n, x)	do { if (bwfm_debug >= (n)) printf x; } while (0)
75 static int bwfm_debug = 2;
76 #else
77 #define DPRINTF(x)	do { ; } while (0)
78 #define DPRINTFN(n, x)	do { ; } while (0)
79 #endif
80 
81 #define DEVNAME(sc)	((sc)->sc_sc.sc_dev.dv_xname)
82 
83 enum ring_status {
84 	RING_CLOSED,
85 	RING_CLOSING,
86 	RING_OPEN,
87 	RING_OPENING,
88 };
89 
90 struct bwfm_pci_msgring {
91 	uint32_t		 w_idx_addr;
92 	uint32_t		 r_idx_addr;
93 	uint32_t		 w_ptr;
94 	uint32_t		 r_ptr;
95 	int			 nitem;
96 	int			 itemsz;
97 	enum ring_status	 status;
98 	struct bwfm_pci_dmamem	*ring;
99 	struct mbuf		*m;
100 
101 	int			 fifo;
102 	uint8_t			 mac[ETHER_ADDR_LEN];
103 };
104 
105 struct bwfm_pci_ioctl {
106 	uint16_t		 transid;
107 	uint16_t		 retlen;
108 	int16_t			 status;
109 	struct mbuf		*m;
110 	TAILQ_ENTRY(bwfm_pci_ioctl) next;
111 };
112 
113 struct bwfm_pci_buf {
114 	bus_dmamap_t	 bb_map;
115 	struct mbuf	*bb_m;
116 };
117 
118 struct bwfm_pci_pkts {
119 	struct bwfm_pci_buf	*pkts;
120 	uint32_t		 npkt;
121 	int			 last;
122 };
123 
124 struct bwfm_pci_softc {
125 	struct bwfm_softc	 sc_sc;
126 	pci_chipset_tag_t	 sc_pc;
127 	pcitag_t		 sc_tag;
128 	pcireg_t		 sc_id;
129 	void 			*sc_ih;
130 
131 	int			 sc_initialized;
132 
133 	bus_space_tag_t		 sc_reg_iot;
134 	bus_space_handle_t	 sc_reg_ioh;
135 	bus_size_t		 sc_reg_ios;
136 
137 	bus_space_tag_t		 sc_tcm_iot;
138 	bus_space_handle_t	 sc_tcm_ioh;
139 	bus_size_t		 sc_tcm_ios;
140 
141 	bus_dma_tag_t		 sc_dmat;
142 
143 	uint32_t		 sc_shared_address;
144 	uint32_t		 sc_shared_flags;
145 	uint8_t			 sc_shared_version;
146 
147 	uint8_t			 sc_dma_idx_sz;
148 	struct bwfm_pci_dmamem	*sc_dma_idx_buf;
149 	size_t			 sc_dma_idx_bufsz;
150 
151 	uint16_t		 sc_max_rxbufpost;
152 	uint32_t		 sc_rx_dataoffset;
153 	uint32_t		 sc_htod_mb_data_addr;
154 	uint32_t		 sc_dtoh_mb_data_addr;
155 	uint32_t		 sc_ring_info_addr;
156 
157 	uint32_t		 sc_console_base_addr;
158 	uint32_t		 sc_console_buf_addr;
159 	uint32_t		 sc_console_buf_size;
160 	uint32_t		 sc_console_readidx;
161 
162 	uint16_t		 sc_max_flowrings;
163 	uint16_t		 sc_max_submissionrings;
164 	uint16_t		 sc_max_completionrings;
165 
166 	struct bwfm_pci_msgring	 sc_ctrl_submit;
167 	struct bwfm_pci_msgring	 sc_rxpost_submit;
168 	struct bwfm_pci_msgring	 sc_ctrl_complete;
169 	struct bwfm_pci_msgring	 sc_tx_complete;
170 	struct bwfm_pci_msgring	 sc_rx_complete;
171 	struct bwfm_pci_msgring	*sc_flowrings;
172 
173 	struct bwfm_pci_dmamem	*sc_scratch_buf;
174 	struct bwfm_pci_dmamem	*sc_ringupd_buf;
175 
176 	TAILQ_HEAD(, bwfm_pci_ioctl) sc_ioctlq;
177 	uint16_t		 sc_ioctl_transid;
178 
179 	struct if_rxring	 sc_ioctl_ring;
180 	struct if_rxring	 sc_event_ring;
181 	struct if_rxring	 sc_rxbuf_ring;
182 
183 	struct bwfm_pci_pkts	 sc_ioctl_pkts;
184 	struct bwfm_pci_pkts	 sc_rx_pkts;
185 	struct bwfm_pci_pkts	 sc_tx_pkts;
186 	int			 sc_tx_pkts_full;
187 
188 	uint8_t			 sc_mbdata_done;
189 };
190 
191 struct bwfm_pci_dmamem {
192 	bus_dmamap_t		bdm_map;
193 	bus_dma_segment_t	bdm_seg;
194 	size_t			bdm_size;
195 	caddr_t			bdm_kva;
196 };
197 
198 #define BWFM_PCI_DMA_MAP(_bdm)	((_bdm)->bdm_map)
199 #define BWFM_PCI_DMA_LEN(_bdm)	((_bdm)->bdm_size)
200 #define BWFM_PCI_DMA_DVA(_bdm)	((uint64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
201 #define BWFM_PCI_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
202 
203 int		 bwfm_pci_match(struct device *, void *, void *);
204 void		 bwfm_pci_attach(struct device *, struct device *, void *);
205 int		 bwfm_pci_detach(struct device *, int);
206 int		 bwfm_pci_activate(struct device *, int);
207 void		 bwfm_pci_cleanup(struct bwfm_pci_softc *);
208 
209 #if defined(__HAVE_FDT)
210 int		 bwfm_pci_read_otp(struct bwfm_pci_softc *);
211 void		 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *, uint8_t,
212 		    uint8_t, uint8_t *);
213 #endif
214 
215 int		 bwfm_pci_intr(void *);
216 void		 bwfm_pci_intr_enable(struct bwfm_pci_softc *);
217 void		 bwfm_pci_intr_disable(struct bwfm_pci_softc *);
218 uint32_t	 bwfm_pci_intr_status(struct bwfm_pci_softc *);
219 void		 bwfm_pci_intr_ack(struct bwfm_pci_softc *, uint32_t);
220 uint32_t	 bwfm_pci_intmask(struct bwfm_pci_softc *);
221 void		 bwfm_pci_hostready(struct bwfm_pci_softc *);
222 int		 bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
223 		    size_t, const u_char *, size_t);
224 void		 bwfm_pci_select_core(struct bwfm_pci_softc *, int );
225 
226 struct bwfm_pci_dmamem *
227 		 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
228 		    bus_size_t);
229 void		 bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
230 int		 bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
231 		    struct bwfm_pci_pkts *);
232 int		 bwfm_pci_pktid_new(struct bwfm_pci_softc *,
233 		    struct bwfm_pci_pkts *, struct mbuf *,
234 		    uint32_t *, paddr_t *);
235 struct mbuf *	 bwfm_pci_pktid_free(struct bwfm_pci_softc *,
236 		    struct bwfm_pci_pkts *, uint32_t);
237 void		 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
238 		    struct if_rxring *, uint32_t);
239 void		 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
240 void		 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
241 int		 bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
242 		    int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
243 int		 bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
244 		    int, size_t);
245 
246 void		 bwfm_pci_ring_bell(struct bwfm_pci_softc *,
247 		    struct bwfm_pci_msgring *);
248 void		 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
249 		    struct bwfm_pci_msgring *);
250 void		 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
251 		    struct bwfm_pci_msgring *);
252 void		 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
253 		    struct bwfm_pci_msgring *);
254 void		 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
255 		    struct bwfm_pci_msgring *);
256 void *		 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
257 		    struct bwfm_pci_msgring *);
258 void *		 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
259 		    struct bwfm_pci_msgring *, int, int *);
260 void *		 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
261 		    struct bwfm_pci_msgring *, int *);
262 void		 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
263 		    struct bwfm_pci_msgring *, int);
264 void		 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
265 		    struct bwfm_pci_msgring *);
266 void		 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
267 		    struct bwfm_pci_msgring *, int);
268 
269 void		 bwfm_pci_ring_rx(struct bwfm_pci_softc *,
270 		    struct bwfm_pci_msgring *, struct mbuf_list *);
271 void		 bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *,
272 		    struct mbuf_list *);
273 
274 uint32_t	 bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
275 void		 bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
276 		    uint32_t);
277 int		 bwfm_pci_buscore_prepare(struct bwfm_softc *);
278 int		 bwfm_pci_buscore_reset(struct bwfm_softc *);
279 void		 bwfm_pci_buscore_activate(struct bwfm_softc *, uint32_t);
280 
281 int		 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
282 		     struct mbuf *);
283 void		 bwfm_pci_flowring_create(struct bwfm_pci_softc *,
284 		     struct mbuf *);
285 void		 bwfm_pci_flowring_create_cb(struct bwfm_softc *, void *);
286 void		 bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
287 
288 int		 bwfm_pci_preinit(struct bwfm_softc *);
289 void		 bwfm_pci_stop(struct bwfm_softc *);
290 int		 bwfm_pci_txcheck(struct bwfm_softc *);
291 int		 bwfm_pci_txdata(struct bwfm_softc *, struct mbuf *);
292 
293 int		 bwfm_pci_send_mb_data(struct bwfm_pci_softc *, uint32_t);
294 void		 bwfm_pci_handle_mb_data(struct bwfm_pci_softc *);
295 
296 #ifdef BWFM_DEBUG
297 void		 bwfm_pci_debug_console(struct bwfm_pci_softc *);
298 #endif
299 
300 int		 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
301 		    int, char *, size_t *);
302 int		 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
303 		    int, char *, size_t);
304 void		 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *,
305 		    struct msgbuf_ioctl_resp_hdr *);
306 
307 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
308 	.bc_read = bwfm_pci_buscore_read,
309 	.bc_write = bwfm_pci_buscore_write,
310 	.bc_prepare = bwfm_pci_buscore_prepare,
311 	.bc_reset = bwfm_pci_buscore_reset,
312 	.bc_setup = NULL,
313 	.bc_activate = bwfm_pci_buscore_activate,
314 };
315 
316 struct bwfm_bus_ops bwfm_pci_bus_ops = {
317 	.bs_preinit = bwfm_pci_preinit,
318 	.bs_stop = bwfm_pci_stop,
319 	.bs_txcheck = bwfm_pci_txcheck,
320 	.bs_txdata = bwfm_pci_txdata,
321 	.bs_txctl = NULL,
322 };
323 
324 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
325 	.proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
326 	.proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
327 	.proto_rx = NULL,
328 	.proto_rxctl = NULL,
329 };
330 
331 struct cfattach bwfm_pci_ca = {
332 	sizeof(struct bwfm_pci_softc),
333 	bwfm_pci_match,
334 	bwfm_pci_attach,
335 	bwfm_pci_detach,
336 	bwfm_pci_activate,
337 };
338 
339 static const struct pci_matchid bwfm_pci_devices[] = {
340 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
341 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4356 },
342 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
343 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4371 },
344 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4378 },
345 };
346 
347 int
348 bwfm_pci_match(struct device *parent, void *match, void *aux)
349 {
350 	return (pci_matchbyid(aux, bwfm_pci_devices,
351 	    nitems(bwfm_pci_devices)));
352 }
353 
354 void
355 bwfm_pci_attach(struct device *parent, struct device *self, void *aux)
356 {
357 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
358 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
359 	const char *intrstr;
360 	pci_intr_handle_t ih;
361 
362 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
363 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
364 	    NULL, &sc->sc_tcm_ios, 0)) {
365 		printf(": can't map bar1\n");
366 		return;
367 	}
368 
369 	if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
370 	    PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
371 	    NULL, &sc->sc_reg_ios, 0)) {
372 		printf(": can't map bar0\n");
373 		goto bar1;
374 	}
375 
376 	sc->sc_pc = pa->pa_pc;
377 	sc->sc_tag = pa->pa_tag;
378 	sc->sc_id = pa->pa_id;
379 	sc->sc_dmat = pa->pa_dmat;
380 
381 	/* Map and establish the interrupt. */
382 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
383 		printf(": couldn't map interrupt\n");
384 		goto bar0;
385 	}
386 	intrstr = pci_intr_string(pa->pa_pc, ih);
387 
388 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
389 	    bwfm_pci_intr, sc, DEVNAME(sc));
390 	if (sc->sc_ih == NULL) {
391 		printf(": couldn't establish interrupt");
392 		if (intrstr != NULL)
393 			printf(" at %s", intrstr);
394 		printf("\n");
395 		goto bar1;
396 	}
397 	printf(": %s\n", intrstr);
398 
399 #if defined(__HAVE_FDT)
400 	sc->sc_sc.sc_node = PCITAG_NODE(pa->pa_tag);
401 #endif
402 
403 	sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
404 	sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
405 	bwfm_attach(&sc->sc_sc);
406 	config_mountroot(self, bwfm_attachhook);
407 	return;
408 
409 bar0:
410 	bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
411 bar1:
412 	bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
413 }
414 
415 int
416 bwfm_pci_preinit(struct bwfm_softc *bwfm)
417 {
418 	struct bwfm_pci_softc *sc = (void *)bwfm;
419 	struct bwfm_pci_ringinfo ringinfo;
420 	const char *chip = NULL;
421 	u_char *ucode, *nvram;
422 	size_t size, nvsize, nvlen;
423 	uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
424 	uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
425 	uint32_t idx_offset, reg;
426 	int i;
427 
428 	if (sc->sc_initialized)
429 		return 0;
430 
431 	sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
432 	if (bwfm_chip_attach(&sc->sc_sc) != 0) {
433 		printf("%s: cannot attach chip\n", DEVNAME(sc));
434 		return 1;
435 	}
436 
437 #if defined(__HAVE_FDT)
438 	if (bwfm_pci_read_otp(sc)) {
439 		printf("%s: cannot read OTP\n", DEVNAME(sc));
440 		return 1;
441 	}
442 #endif
443 
444 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
445 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
446 	    BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
447 	reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
448 	    BWFM_PCI_PCIE2REG_CONFIGDATA);
449 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
450 	    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
451 
452 	switch (bwfm->sc_chip.ch_chip)
453 	{
454 	case BRCM_CC_4350_CHIP_ID:
455 		if (bwfm->sc_chip.ch_chiprev > 7)
456 			chip = "4350";
457 		else
458 			chip = "4350c2";
459 		break;
460 	case BRCM_CC_4356_CHIP_ID:
461 		chip = "4356";
462 		break;
463 	case BRCM_CC_43602_CHIP_ID:
464 		chip = "43602";
465 		break;
466 	case BRCM_CC_4371_CHIP_ID:
467 		chip = "4371";
468 		break;
469 	case BRCM_CC_4378_CHIP_ID:
470 		chip = "4378";
471 		break;
472 	default:
473 		printf("%s: unknown firmware for chip %s\n",
474 		    DEVNAME(sc), bwfm->sc_chip.ch_name);
475 		return 1;
476 	}
477 
478 	if (bwfm_loadfirmware(bwfm, chip, "-pcie", &ucode, &size,
479 	    &nvram, &nvsize, &nvlen) != 0)
480 		return 1;
481 
482 	/* Retrieve RAM size from firmware. */
483 	if (size >= BWFM_RAMSIZE + 8) {
484 		uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
485 		if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
486 			bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
487 	}
488 
489 	if (bwfm_pci_load_microcode(sc, ucode, size, nvram, nvlen) != 0) {
490 		printf("%s: could not load microcode\n",
491 		    DEVNAME(sc));
492 		free(ucode, M_DEVBUF, size);
493 		free(nvram, M_DEVBUF, nvsize);
494 		return 1;
495 	}
496 	free(ucode, M_DEVBUF, size);
497 	free(nvram, M_DEVBUF, nvsize);
498 
499 	sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
500 	    sc->sc_shared_address + BWFM_SHARED_INFO);
501 	sc->sc_shared_version = sc->sc_shared_flags;
502 	if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
503 	    sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
504 		printf("%s: PCIe version %d unsupported\n",
505 		    DEVNAME(sc), sc->sc_shared_version);
506 		return 1;
507 	}
508 
509 	sc->sc_dma_idx_sz = 0;
510 	if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
511 		if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
512 			sc->sc_dma_idx_sz = sizeof(uint16_t);
513 		else
514 			sc->sc_dma_idx_sz = sizeof(uint32_t);
515 	}
516 
517 	/* Maximum RX data buffers in the ring. */
518 	sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
519 	    sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
520 	if (sc->sc_max_rxbufpost == 0)
521 		sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
522 
523 	/* Alternative offset of data in a packet */
524 	sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
525 	    sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
526 
527 	/* For Power Management */
528 	sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
529 	    sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
530 	sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
531 	    sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
532 
533 	/* Ring information */
534 	sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
535 	    sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
536 
537 	/* Firmware's "dmesg" */
538 	sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
539 	    sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
540 	sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
541 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
542 	sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
543 	    sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
544 
545 	/* Read ring information. */
546 	bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
547 	    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
548 
549 	if (sc->sc_shared_version >= 6) {
550 		sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
551 		sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
552 		sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
553 	} else {
554 		sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
555 		sc->sc_max_flowrings = sc->sc_max_submissionrings -
556 		    BWFM_NUM_TX_MSGRINGS;
557 		sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
558 	}
559 
560 	if (sc->sc_dma_idx_sz == 0) {
561 		d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
562 		d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
563 		h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
564 		h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
565 		idx_offset = sizeof(uint32_t);
566 	} else {
567 		uint64_t address;
568 
569 		/* Each TX/RX Ring has a Read and Write Ptr */
570 		sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
571 		    sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
572 		sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
573 		    sc->sc_dma_idx_bufsz, 8);
574 		if (sc->sc_dma_idx_buf == NULL) {
575 			/* XXX: Fallback to TCM? */
576 			printf("%s: cannot allocate idx buf\n",
577 			    DEVNAME(sc));
578 			return 1;
579 		}
580 
581 		idx_offset = sc->sc_dma_idx_sz;
582 		h2d_w_idx_ptr = 0;
583 		address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
584 		ringinfo.h2d_w_idx_hostaddr_low =
585 		    htole32(address & 0xffffffff);
586 		ringinfo.h2d_w_idx_hostaddr_high =
587 		    htole32(address >> 32);
588 
589 		h2d_r_idx_ptr = h2d_w_idx_ptr +
590 		    sc->sc_max_submissionrings * idx_offset;
591 		address += sc->sc_max_submissionrings * idx_offset;
592 		ringinfo.h2d_r_idx_hostaddr_low =
593 		    htole32(address & 0xffffffff);
594 		ringinfo.h2d_r_idx_hostaddr_high =
595 		    htole32(address >> 32);
596 
597 		d2h_w_idx_ptr = h2d_r_idx_ptr +
598 		    sc->sc_max_submissionrings * idx_offset;
599 		address += sc->sc_max_submissionrings * idx_offset;
600 		ringinfo.d2h_w_idx_hostaddr_low =
601 		    htole32(address & 0xffffffff);
602 		ringinfo.d2h_w_idx_hostaddr_high =
603 		    htole32(address >> 32);
604 
605 		d2h_r_idx_ptr = d2h_w_idx_ptr +
606 		    sc->sc_max_completionrings * idx_offset;
607 		address += sc->sc_max_completionrings * idx_offset;
608 		ringinfo.d2h_r_idx_hostaddr_low =
609 		    htole32(address & 0xffffffff);
610 		ringinfo.d2h_r_idx_hostaddr_high =
611 		    htole32(address >> 32);
612 
613 		bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
614 		    sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
615 	}
616 
617 	uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
618 	/* TX ctrl ring: Send ctrl buffers, send IOCTLs */
619 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
620 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
621 	    &ring_mem_ptr))
622 		goto cleanup;
623 	/* TX rxpost ring: Send clean data mbufs for RX */
624 	if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
625 	    h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
626 	    &ring_mem_ptr))
627 		goto cleanup;
628 	/* RX completion rings: recv our filled buffers back */
629 	if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
630 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
631 	    &ring_mem_ptr))
632 		goto cleanup;
633 	if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024,
634 	    sc->sc_shared_version >= 7 ? 24 : 16,
635 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
636 	    &ring_mem_ptr))
637 		goto cleanup;
638 	if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512,
639 	    sc->sc_shared_version >= 7 ? 40 : 32,
640 	    d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
641 	    &ring_mem_ptr))
642 		goto cleanup;
643 
644 	/* Dynamic TX rings for actual data */
645 	sc->sc_flowrings = malloc(sc->sc_max_flowrings *
646 	    sizeof(struct bwfm_pci_msgring), M_DEVBUF, M_WAITOK | M_ZERO);
647 	for (i = 0; i < sc->sc_max_flowrings; i++) {
648 		struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
649 		ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
650 		ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
651 	}
652 
653 	/* Scratch and ring update buffers for firmware */
654 	if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
655 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
656 		goto cleanup;
657 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
658 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
659 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
660 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
661 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
662 	    BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
663 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
664 	    sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
665 	    BWFM_DMA_D2H_SCRATCH_BUF_LEN);
666 
667 	if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
668 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
669 		goto cleanup;
670 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
671 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
672 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
673 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
674 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
675 	    BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
676 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
677 	    sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
678 	    BWFM_DMA_D2H_RINGUPD_BUF_LEN);
679 
680 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
681 	bwfm_pci_intr_enable(sc);
682 	bwfm_pci_hostready(sc);
683 
684 	/* Maps RX mbufs to a packet id and back. */
685 	sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
686 	sc->sc_rx_pkts.pkts = malloc(BWFM_NUM_RX_PKTIDS *
687 	    sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
688 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
689 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_CTL_PKT_SIZE,
690 		    BWFM_NUM_RX_DESCS, MSGBUF_MAX_CTL_PKT_SIZE, 0, BUS_DMA_WAITOK,
691 		    &sc->sc_rx_pkts.pkts[i].bb_map);
692 
693 	/* Maps TX mbufs to a packet id and back. */
694 	sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
695 	sc->sc_tx_pkts.pkts = malloc(BWFM_NUM_TX_PKTIDS
696 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
697 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
698 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
699 		    BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
700 		    &sc->sc_tx_pkts.pkts[i].bb_map);
701 	sc->sc_tx_pkts_full = 0;
702 
703 	/* Maps IOCTL mbufs to a packet id and back. */
704 	sc->sc_ioctl_pkts.npkt = BWFM_NUM_IOCTL_PKTIDS;
705 	sc->sc_ioctl_pkts.pkts = malloc(BWFM_NUM_IOCTL_PKTIDS
706 	    * sizeof(struct bwfm_pci_buf), M_DEVBUF, M_WAITOK | M_ZERO);
707 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++)
708 		bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
709 		    BWFM_NUM_IOCTL_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
710 		    &sc->sc_ioctl_pkts.pkts[i].bb_map);
711 
712 	/*
713 	 * For whatever reason, could also be a bug somewhere in this
714 	 * driver, the firmware needs a bunch of RX buffers otherwise
715 	 * it won't send any RX complete messages.
716 	 */
717 	if_rxr_init(&sc->sc_rxbuf_ring, min(256, sc->sc_max_rxbufpost),
718 	    sc->sc_max_rxbufpost);
719 	if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
720 	if_rxr_init(&sc->sc_event_ring, 8, 8);
721 	bwfm_pci_fill_rx_rings(sc);
722 
723 	TAILQ_INIT(&sc->sc_ioctlq);
724 
725 #ifdef BWFM_DEBUG
726 	sc->sc_console_readidx = 0;
727 	bwfm_pci_debug_console(sc);
728 #endif
729 
730 	sc->sc_initialized = 1;
731 	return 0;
732 
733 cleanup:
734 	if (sc->sc_ringupd_buf)
735 		bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
736 	if (sc->sc_scratch_buf)
737 		bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
738 	if (sc->sc_rx_complete.ring)
739 		bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
740 	if (sc->sc_tx_complete.ring)
741 		bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
742 	if (sc->sc_ctrl_complete.ring)
743 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
744 	if (sc->sc_rxpost_submit.ring)
745 		bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
746 	if (sc->sc_ctrl_submit.ring)
747 		bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
748 	if (sc->sc_dma_idx_buf)
749 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
750 	return 1;
751 }
752 
753 int
754 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size,
755     const u_char *nvram, size_t nvlen)
756 {
757 	struct bwfm_softc *bwfm = (void *)sc;
758 	struct bwfm_core *core;
759 	uint32_t shared, written;
760 	int i;
761 
762 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
763 		bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
764 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
765 		    BWFM_PCI_ARMCR4REG_BANKIDX, 5);
766 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
767 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
768 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
769 		    BWFM_PCI_ARMCR4REG_BANKIDX, 7);
770 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
771 		    BWFM_PCI_ARMCR4REG_BANKPDA, 0);
772 	}
773 
774 	for (i = 0; i < size; i++)
775 		bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
776 		    bwfm->sc_chip.ch_rambase + i, ucode[i]);
777 
778 	/* Firmware replaces this with a pointer once up. */
779 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
780 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
781 
782 	if (nvram) {
783 		for (i = 0; i < nvlen; i++)
784 			bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
785 			    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize
786 			    - nvlen  + i, nvram[i]);
787 	}
788 
789 	written = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
790 	    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
791 
792 	/* Load reset vector from firmware and kickstart core. */
793 	if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
794 		core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
795 		bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
796 	}
797 	bwfm_chip_set_active(bwfm, *(uint32_t *)ucode);
798 
799 	for (i = 0; i < 100; i++) {
800 		delay(50 * 1000);
801 		shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
802 		    bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
803 		if (shared != written)
804 			break;
805 	}
806 	if (shared == written) {
807 		printf("%s: firmware did not come up\n", DEVNAME(sc));
808 		return 1;
809 	}
810 	if (shared < bwfm->sc_chip.ch_rambase ||
811 	    shared >= bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize) {
812 		printf("%s: invalid shared RAM address 0x%08x\n", DEVNAME(sc),
813 		    shared);
814 		return 1;
815 	}
816 
817 	sc->sc_shared_address = shared;
818 	return 0;
819 }
820 
821 int
822 bwfm_pci_detach(struct device *self, int flags)
823 {
824 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
825 
826 	bwfm_detach(&sc->sc_sc, flags);
827 	bwfm_pci_cleanup(sc);
828 
829 	return 0;
830 }
831 
832 void
833 bwfm_pci_cleanup(struct bwfm_pci_softc *sc)
834 {
835 	int i;
836 
837 	for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++) {
838 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_pkts.pkts[i].bb_map);
839 		if (sc->sc_rx_pkts.pkts[i].bb_m)
840 			m_freem(sc->sc_rx_pkts.pkts[i].bb_m);
841 	}
842 	free(sc->sc_rx_pkts.pkts, M_DEVBUF, BWFM_NUM_RX_PKTIDS *
843 	    sizeof(struct bwfm_pci_buf));
844 
845 	for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++) {
846 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_pkts.pkts[i].bb_map);
847 		if (sc->sc_tx_pkts.pkts[i].bb_m)
848 			m_freem(sc->sc_tx_pkts.pkts[i].bb_m);
849 	}
850 	free(sc->sc_tx_pkts.pkts, M_DEVBUF, BWFM_NUM_TX_PKTIDS *
851 	    sizeof(struct bwfm_pci_buf));
852 
853 	for (i = 0; i < BWFM_NUM_IOCTL_PKTIDS; i++) {
854 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_ioctl_pkts.pkts[i].bb_map);
855 		if (sc->sc_ioctl_pkts.pkts[i].bb_m)
856 			m_freem(sc->sc_ioctl_pkts.pkts[i].bb_m);
857 	}
858 	free(sc->sc_ioctl_pkts.pkts, M_DEVBUF, BWFM_NUM_IOCTL_PKTIDS *
859 	    sizeof(struct bwfm_pci_buf));
860 
861 	for (i = 0; i < sc->sc_max_flowrings; i++) {
862 		if (sc->sc_flowrings[i].status >= RING_OPEN)
863 			bwfm_pci_dmamem_free(sc, sc->sc_flowrings[i].ring);
864 	}
865 	free(sc->sc_flowrings, M_DEVBUF, sc->sc_max_flowrings *
866 	    sizeof(struct bwfm_pci_msgring));
867 
868 	bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
869 	bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
870 	bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
871 	bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
872 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
873 	bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
874 	bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
875 	if (sc->sc_dma_idx_buf) {
876 		bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
877 		sc->sc_dma_idx_buf = NULL;
878 	}
879 
880 	sc->sc_initialized = 0;
881 }
882 
883 int
884 bwfm_pci_activate(struct device *self, int act)
885 {
886 	struct bwfm_pci_softc *sc = (struct bwfm_pci_softc *)self;
887 	struct bwfm_softc *bwfm = (void *)sc;
888 	int error = 0;
889 
890 	switch (act) {
891 	case DVACT_QUIESCE:
892 		error = bwfm_activate(bwfm, act);
893 		if (error)
894 			return error;
895 		if (sc->sc_initialized) {
896 			sc->sc_mbdata_done = 0;
897 			error = bwfm_pci_send_mb_data(sc,
898 			    BWFM_PCI_H2D_HOST_D3_INFORM);
899 			if (error)
900 				return error;
901 			tsleep_nsec(&sc->sc_mbdata_done, PCATCH,
902 			    DEVNAME(sc), SEC_TO_NSEC(2));
903 			if (!sc->sc_mbdata_done)
904 				return ETIMEDOUT;
905 		}
906 		break;
907 	case DVACT_WAKEUP:
908 		if (sc->sc_initialized) {
909 			/* If device can't be resumed, re-init. */
910 			if (bwfm_pci_intmask(sc) == 0 ||
911 			    bwfm_pci_send_mb_data(sc,
912 			    BWFM_PCI_H2D_HOST_D0_INFORM) != 0) {
913 				bwfm_cleanup(bwfm);
914 				bwfm_pci_cleanup(sc);
915 			}
916 		}
917 		error = bwfm_activate(bwfm, act);
918 		if (error)
919 			return error;
920 		break;
921 	default:
922 		break;
923 	}
924 
925 	return 0;
926 }
927 
928 #if defined(__HAVE_FDT)
929 int
930 bwfm_pci_read_otp(struct bwfm_pci_softc *sc)
931 {
932 	struct bwfm_softc *bwfm = (void *)sc;
933 	struct bwfm_core *core;
934 	uint8_t otp[BWFM_OTP_SIZE];
935 	int i;
936 
937 	if (bwfm->sc_chip.ch_chip != BRCM_CC_4378_CHIP_ID)
938 		return 0;
939 
940 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_GCI);
941 	if (core == NULL)
942 		return 1;
943 
944 	for (i = 0; i < (sizeof(otp) / sizeof(uint32_t)); i++)
945 		((uint32_t *)otp)[i] = bwfm_pci_buscore_read(bwfm,
946 		    core->co_base + BWFM_OTP_4378_BASE + i * sizeof(uint32_t));
947 
948 	for (i = 0; i < BWFM_OTP_SIZE - 1; ) {
949 		if (otp[i + 0] == 0) {
950 			i++;
951 			continue;
952 		}
953 		if (i + otp[i + 1] > BWFM_OTP_SIZE)
954 			break;
955 		bwfm_pci_process_otp_tuple(sc, otp[i + 0], otp[i + 1],
956 		    &otp[i + 2]);
957 		i += otp[i + 1];
958 	}
959 
960 	return 0;
961 }
962 
963 void
964 bwfm_pci_process_otp_tuple(struct bwfm_pci_softc *sc, uint8_t type, uint8_t size,
965     uint8_t *data)
966 {
967 	struct bwfm_softc *bwfm = (void *)sc;
968 	char chiprev[8] = "", module[8] = "", modrev[8] = "", vendor[8] = "", chip[8] = "";
969 	char product[16] = "unknown";
970 	int node, len;
971 
972 	switch (type) {
973 	case 0x15: /* system vendor OTP */
974 		DPRINTF(("%s: system vendor OTP\n", DEVNAME(sc)));
975 		if (size < sizeof(uint32_t))
976 			return;
977 		if (data[0] != 0x08 || data[1] != 0x00 ||
978 		    data[2] != 0x00 || data[3] != 0x00)
979 			return;
980 		size -= sizeof(uint32_t);
981 		data += sizeof(uint32_t);
982 		while (size) {
983 			/* reached end */
984 			if (data[0] == 0xff)
985 				break;
986 			for (len = 0; len < size; len++)
987 				if (data[len] == 0x00 || data[len] == ' ' ||
988 				    data[len] == 0xff)
989 					break;
990 			if (len < 3 || len > 9) /* X=abcdef */
991 				goto next;
992 			if (data[1] != '=')
993 				goto next;
994 			/* NULL-terminate string */
995 			if (data[len] == ' ')
996 				data[len] = '\0';
997 			switch (data[0]) {
998 			case 's':
999 				strlcpy(chiprev, &data[2], sizeof(chiprev));
1000 				break;
1001 			case 'M':
1002 				strlcpy(module, &data[2], sizeof(module));
1003 				break;
1004 			case 'm':
1005 				strlcpy(modrev, &data[2], sizeof(modrev));
1006 				break;
1007 			case 'V':
1008 				strlcpy(vendor, &data[2], sizeof(vendor));
1009 				break;
1010 			}
1011 next:
1012 			/* skip content */
1013 			data += len;
1014 			size -= len;
1015 			/* skip spacer tag */
1016 			if (size) {
1017 				data++;
1018 				size--;
1019 			}
1020 		}
1021 		snprintf(chip, sizeof(chip),
1022 		    bwfm->sc_chip.ch_chip > 40000 ? "%05d" : "%04x",
1023 		    bwfm->sc_chip.ch_chip);
1024 		node = OF_finddevice("/chosen");
1025 		if (node != -1)
1026 			OF_getprop(node, "module-wlan0", product, sizeof(product));
1027 		printf("%s: firmware C-%s%s%s/P-%s_M-%s_V-%s__m-%s\n",
1028 		    DEVNAME(sc), chip,
1029 		    *chiprev ? "__s-" : "", *chiprev ? chiprev : "",
1030 		    product, module, vendor, modrev);
1031 		break;
1032 	case 0x80: /* Broadcom CIS */
1033 		DPRINTF(("%s: Broadcom CIS\n", DEVNAME(sc)));
1034 		break;
1035 	default:
1036 		DPRINTF(("%s: unknown OTP tuple\n", DEVNAME(sc)));
1037 		break;
1038 	}
1039 }
1040 #endif
1041 
1042 /* DMA code */
1043 struct bwfm_pci_dmamem *
1044 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
1045 {
1046 	struct bwfm_pci_dmamem *bdm;
1047 	int nsegs;
1048 
1049 	bdm = malloc(sizeof(*bdm), M_DEVBUF, M_WAITOK | M_ZERO);
1050 	bdm->bdm_size = size;
1051 
1052 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1053 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
1054 		goto bdmfree;
1055 
1056 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
1057 	    &nsegs, BUS_DMA_WAITOK) != 0)
1058 		goto destroy;
1059 
1060 	if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
1061 	    &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1062 		goto free;
1063 
1064 	if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
1065 	    NULL, BUS_DMA_WAITOK) != 0)
1066 		goto unmap;
1067 
1068 	bzero(bdm->bdm_kva, size);
1069 
1070 	return (bdm);
1071 
1072 unmap:
1073 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
1074 free:
1075 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
1076 destroy:
1077 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
1078 bdmfree:
1079 	free(bdm, M_DEVBUF, sizeof(*bdm));
1080 
1081 	return (NULL);
1082 }
1083 
1084 void
1085 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
1086 {
1087 	bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
1088 	bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
1089 	bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
1090 	free(bdm, M_DEVBUF, sizeof(*bdm));
1091 }
1092 
1093 /*
1094  * We need a simple mapping from a packet ID to mbufs, because when
1095  * a transfer completed, we only know the ID so we have to look up
1096  * the memory for the ID.  This simply looks for an empty slot.
1097  */
1098 int
1099 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
1100 {
1101 	int i, idx;
1102 
1103 	idx = pkts->last + 1;
1104 	for (i = 0; i < pkts->npkt; i++) {
1105 		if (idx == pkts->npkt)
1106 			idx = 0;
1107 		if (pkts->pkts[idx].bb_m == NULL)
1108 			return 0;
1109 		idx++;
1110 	}
1111 	return ENOBUFS;
1112 }
1113 
1114 int
1115 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1116     struct mbuf *m, uint32_t *pktid, paddr_t *paddr)
1117 {
1118 	int i, idx;
1119 
1120 	idx = pkts->last + 1;
1121 	for (i = 0; i < pkts->npkt; i++) {
1122 		if (idx == pkts->npkt)
1123 			idx = 0;
1124 		if (pkts->pkts[idx].bb_m == NULL) {
1125 			if (bus_dmamap_load_mbuf(sc->sc_dmat,
1126 			    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0) {
1127 				if (m_defrag(m, M_DONTWAIT))
1128 					return EFBIG;
1129 				if (bus_dmamap_load_mbuf(sc->sc_dmat,
1130 				    pkts->pkts[idx].bb_map, m, BUS_DMA_NOWAIT) != 0)
1131 					return EFBIG;
1132 			}
1133 			bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
1134 			    0, pkts->pkts[idx].bb_map->dm_mapsize,
1135 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1136 			pkts->last = idx;
1137 			pkts->pkts[idx].bb_m = m;
1138 			*pktid = idx;
1139 			*paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
1140 			return 0;
1141 		}
1142 		idx++;
1143 	}
1144 	return ENOBUFS;
1145 }
1146 
1147 struct mbuf *
1148 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
1149     uint32_t pktid)
1150 {
1151 	struct mbuf *m;
1152 
1153 	if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
1154 		return NULL;
1155 	bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
1156 	    pkts->pkts[pktid].bb_map->dm_mapsize,
1157 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1158 	bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1159 	m = pkts->pkts[pktid].bb_m;
1160 	pkts->pkts[pktid].bb_m = NULL;
1161 	return m;
1162 }
1163 
1164 void
1165 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1166 {
1167 	bwfm_pci_fill_rx_buf_ring(sc);
1168 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1169 	    MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1170 	bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1171 	    MSGBUF_TYPE_EVENT_BUF_POST);
1172 }
1173 
1174 void
1175 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1176     uint32_t msgtype)
1177 {
1178 	struct msgbuf_rx_ioctl_resp_or_event *req;
1179 	struct mbuf *m;
1180 	uint32_t pktid;
1181 	paddr_t paddr;
1182 	int s, slots;
1183 
1184 	s = splnet();
1185 	for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1186 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1187 			break;
1188 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1189 		if (req == NULL)
1190 			break;
1191 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_CTL_PKT_SIZE);
1192 		if (m == NULL) {
1193 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1194 			break;
1195 		}
1196 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_CTL_PKT_SIZE;
1197 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1198 			bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1199 			m_freem(m);
1200 			break;
1201 		}
1202 		memset(req, 0, sizeof(*req));
1203 		req->msg.msgtype = msgtype;
1204 		req->msg.request_id = htole32(pktid);
1205 		req->host_buf_len = htole16(MSGBUF_MAX_CTL_PKT_SIZE);
1206 		req->host_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1207 		req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1208 		bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1209 	}
1210 	if_rxr_put(rxring, slots);
1211 	splx(s);
1212 }
1213 
1214 void
1215 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1216 {
1217 	struct msgbuf_rx_bufpost *req;
1218 	struct mbuf *m;
1219 	uint32_t pktid;
1220 	paddr_t paddr;
1221 	int s, slots;
1222 
1223 	s = splnet();
1224 	for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1225 	    slots > 0; slots--) {
1226 		if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1227 			break;
1228 		req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1229 		if (req == NULL)
1230 			break;
1231 		m = MCLGETL(NULL, M_DONTWAIT, MSGBUF_MAX_PKT_SIZE);
1232 		if (m == NULL) {
1233 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1234 			break;
1235 		}
1236 		m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1237 		if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, m, &pktid, &paddr)) {
1238 			bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1239 			m_freem(m);
1240 			break;
1241 		}
1242 		memset(req, 0, sizeof(*req));
1243 		req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1244 		req->msg.request_id = htole32(pktid);
1245 		req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1246 		req->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
1247 		req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1248 		bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1249 	}
1250 	if_rxr_put(&sc->sc_rxbuf_ring, slots);
1251 	splx(s);
1252 }
1253 
1254 int
1255 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1256     int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1257     int idx, uint32_t idx_off, uint32_t *ring_mem)
1258 {
1259 	ring->w_idx_addr = w_idx + idx * idx_off;
1260 	ring->r_idx_addr = r_idx + idx * idx_off;
1261 	ring->w_ptr = 0;
1262 	ring->r_ptr = 0;
1263 	ring->nitem = nitem;
1264 	ring->itemsz = itemsz;
1265 	bwfm_pci_ring_write_rptr(sc, ring);
1266 	bwfm_pci_ring_write_wptr(sc, ring);
1267 
1268 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1269 	if (ring->ring == NULL)
1270 		return ENOMEM;
1271 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1272 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1273 	    BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1274 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1275 	    *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1276 	    BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1277 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1278 	    *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1279 	bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1280 	    *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1281 	*ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1282 	return 0;
1283 }
1284 
1285 int
1286 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1287     int nitem, size_t itemsz)
1288 {
1289 	ring->w_ptr = 0;
1290 	ring->r_ptr = 0;
1291 	ring->nitem = nitem;
1292 	ring->itemsz = itemsz;
1293 	bwfm_pci_ring_write_rptr(sc, ring);
1294 	bwfm_pci_ring_write_wptr(sc, ring);
1295 
1296 	ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1297 	if (ring->ring == NULL)
1298 		return ENOMEM;
1299 	return 0;
1300 }
1301 
1302 /* Ring helpers */
1303 void
1304 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1305     struct bwfm_pci_msgring *ring)
1306 {
1307 	struct bwfm_softc *bwfm = (void *)sc;
1308 
1309 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
1310 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1311 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_0, 1);
1312 	else
1313 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1314 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_0, 1);
1315 }
1316 
1317 void
1318 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1319     struct bwfm_pci_msgring *ring)
1320 {
1321 	if (sc->sc_dma_idx_sz == 0) {
1322 		ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1323 		    sc->sc_tcm_ioh, ring->r_idx_addr);
1324 	} else {
1325 		bus_dmamap_sync(sc->sc_dmat,
1326 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1327 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1328 		ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1329 		    + ring->r_idx_addr);
1330 	}
1331 }
1332 
1333 void
1334 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1335     struct bwfm_pci_msgring *ring)
1336 {
1337 	if (sc->sc_dma_idx_sz == 0) {
1338 		ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1339 		    sc->sc_tcm_ioh, ring->w_idx_addr);
1340 	} else {
1341 		bus_dmamap_sync(sc->sc_dmat,
1342 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1343 		    sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1344 		ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1345 		    + ring->w_idx_addr);
1346 	}
1347 }
1348 
1349 void
1350 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1351     struct bwfm_pci_msgring *ring)
1352 {
1353 	if (sc->sc_dma_idx_sz == 0) {
1354 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1355 		    ring->r_idx_addr, ring->r_ptr);
1356 	} else {
1357 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1358 		    + ring->r_idx_addr) = ring->r_ptr;
1359 		bus_dmamap_sync(sc->sc_dmat,
1360 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1361 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1362 	}
1363 }
1364 
1365 void
1366 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1367     struct bwfm_pci_msgring *ring)
1368 {
1369 	if (sc->sc_dma_idx_sz == 0) {
1370 		bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1371 		    ring->w_idx_addr, ring->w_ptr);
1372 	} else {
1373 		*(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1374 		    + ring->w_idx_addr) = ring->w_ptr;
1375 		bus_dmamap_sync(sc->sc_dmat,
1376 		    BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1377 		    sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1378 	}
1379 }
1380 
1381 /*
1382  * Retrieve a free descriptor to put new stuff in, but don't commit
1383  * to it yet so we can rollback later if any error occurs.
1384  */
1385 void *
1386 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1387     struct bwfm_pci_msgring *ring)
1388 {
1389 	int available;
1390 	char *ret;
1391 
1392 	bwfm_pci_ring_update_rptr(sc, ring);
1393 
1394 	if (ring->r_ptr > ring->w_ptr)
1395 		available = ring->r_ptr - ring->w_ptr;
1396 	else
1397 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1398 
1399 	if (available <= 1)
1400 		return NULL;
1401 
1402 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1403 	ring->w_ptr += 1;
1404 	if (ring->w_ptr == ring->nitem)
1405 		ring->w_ptr = 0;
1406 	return ret;
1407 }
1408 
1409 void *
1410 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1411     struct bwfm_pci_msgring *ring, int count, int *avail)
1412 {
1413 	int available;
1414 	char *ret;
1415 
1416 	bwfm_pci_ring_update_rptr(sc, ring);
1417 
1418 	if (ring->r_ptr > ring->w_ptr)
1419 		available = ring->r_ptr - ring->w_ptr;
1420 	else
1421 		available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1422 
1423 	if (available <= 1)
1424 		return NULL;
1425 
1426 	ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1427 	*avail = min(count, available - 1);
1428 	if (*avail + ring->w_ptr > ring->nitem)
1429 		*avail = ring->nitem - ring->w_ptr;
1430 	ring->w_ptr += *avail;
1431 	if (ring->w_ptr == ring->nitem)
1432 		ring->w_ptr = 0;
1433 	return ret;
1434 }
1435 
1436 /*
1437  * Read number of descriptors available (submitted by the firmware)
1438  * and retrieve pointer to first descriptor.
1439  */
1440 void *
1441 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1442     struct bwfm_pci_msgring *ring, int *avail)
1443 {
1444 	bwfm_pci_ring_update_wptr(sc, ring);
1445 
1446 	if (ring->w_ptr >= ring->r_ptr)
1447 		*avail = ring->w_ptr - ring->r_ptr;
1448 	else
1449 		*avail = ring->nitem - ring->r_ptr;
1450 
1451 	if (*avail == 0)
1452 		return NULL;
1453 
1454 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1455 	    ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1456 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1457 	return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1458 }
1459 
1460 /*
1461  * Let firmware know we read N descriptors.
1462  */
1463 void
1464 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1465     struct bwfm_pci_msgring *ring, int nitem)
1466 {
1467 	ring->r_ptr += nitem;
1468 	if (ring->r_ptr == ring->nitem)
1469 		ring->r_ptr = 0;
1470 	bwfm_pci_ring_write_rptr(sc, ring);
1471 }
1472 
1473 /*
1474  * Let firmware know that we submitted some descriptors.
1475  */
1476 void
1477 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1478     struct bwfm_pci_msgring *ring)
1479 {
1480 	bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1481 	    0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1482 	    BUS_DMASYNC_PREWRITE);
1483 	bwfm_pci_ring_write_wptr(sc, ring);
1484 	bwfm_pci_ring_bell(sc, ring);
1485 }
1486 
1487 /*
1488  * Rollback N descriptors in case we don't actually want
1489  * to commit to it.
1490  */
1491 void
1492 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1493     struct bwfm_pci_msgring *ring, int nitem)
1494 {
1495 	if (ring->w_ptr == 0)
1496 		ring->w_ptr = ring->nitem - nitem;
1497 	else
1498 		ring->w_ptr -= nitem;
1499 }
1500 
1501 /*
1502  * Foreach written descriptor on the ring, pass the descriptor to
1503  * a message handler and let the firmware know we handled it.
1504  */
1505 void
1506 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1507     struct mbuf_list *ml)
1508 {
1509 	void *buf;
1510 	int avail, processed;
1511 
1512 again:
1513 	buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1514 	if (buf == NULL)
1515 		return;
1516 
1517 	processed = 0;
1518 	while (avail) {
1519 		bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset, ml);
1520 		buf += ring->itemsz;
1521 		processed++;
1522 		if (processed == 48) {
1523 			bwfm_pci_ring_read_commit(sc, ring, processed);
1524 			processed = 0;
1525 		}
1526 		avail--;
1527 	}
1528 	if (processed)
1529 		bwfm_pci_ring_read_commit(sc, ring, processed);
1530 	if (ring->r_ptr == 0)
1531 		goto again;
1532 }
1533 
1534 void
1535 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf, struct mbuf_list *ml)
1536 {
1537 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
1538 	struct msgbuf_ioctl_resp_hdr *resp;
1539 	struct msgbuf_tx_status *tx;
1540 	struct msgbuf_rx_complete *rx;
1541 	struct msgbuf_rx_event *event;
1542 	struct msgbuf_common_hdr *msg;
1543 	struct msgbuf_flowring_create_resp *fcr;
1544 	struct msgbuf_flowring_delete_resp *fdr;
1545 	struct bwfm_pci_msgring *ring;
1546 	struct mbuf *m;
1547 	int flowid;
1548 
1549 	msg = (struct msgbuf_common_hdr *)buf;
1550 	switch (msg->msgtype)
1551 	{
1552 	case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1553 		fcr = (struct msgbuf_flowring_create_resp *)buf;
1554 		flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1555 		if (flowid < 2)
1556 			break;
1557 		flowid -= 2;
1558 		if (flowid >= sc->sc_max_flowrings)
1559 			break;
1560 		ring = &sc->sc_flowrings[flowid];
1561 		if (ring->status != RING_OPENING)
1562 			break;
1563 		if (fcr->compl_hdr.status) {
1564 			printf("%s: failed to open flowring %d\n",
1565 			    DEVNAME(sc), flowid);
1566 			ring->status = RING_CLOSED;
1567 			if (ring->m) {
1568 				m_freem(ring->m);
1569 				ring->m = NULL;
1570 			}
1571 			ifq_restart(&ifp->if_snd);
1572 			break;
1573 		}
1574 		ring->status = RING_OPEN;
1575 		if (ring->m != NULL) {
1576 			m = ring->m;
1577 			ring->m = NULL;
1578 			if (bwfm_pci_txdata(&sc->sc_sc, m))
1579 				m_freem(ring->m);
1580 		}
1581 		ifq_restart(&ifp->if_snd);
1582 		break;
1583 	case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1584 		fdr = (struct msgbuf_flowring_delete_resp *)buf;
1585 		flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1586 		if (flowid < 2)
1587 			break;
1588 		flowid -= 2;
1589 		if (flowid >= sc->sc_max_flowrings)
1590 			break;
1591 		ring = &sc->sc_flowrings[flowid];
1592 		if (ring->status != RING_CLOSING)
1593 			break;
1594 		if (fdr->compl_hdr.status) {
1595 			printf("%s: failed to delete flowring %d\n",
1596 			    DEVNAME(sc), flowid);
1597 			break;
1598 		}
1599 		bwfm_pci_dmamem_free(sc, ring->ring);
1600 		ring->status = RING_CLOSED;
1601 		break;
1602 	case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1603 		m = bwfm_pci_pktid_free(sc, &sc->sc_ioctl_pkts,
1604 		    letoh32(msg->request_id));
1605 		if (m == NULL)
1606 			break;
1607 		m_freem(m);
1608 		break;
1609 	case MSGBUF_TYPE_IOCTL_CMPLT:
1610 		resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1611 		bwfm_pci_msgbuf_rxioctl(sc, resp);
1612 		if_rxr_put(&sc->sc_ioctl_ring, 1);
1613 		bwfm_pci_fill_rx_rings(sc);
1614 		break;
1615 	case MSGBUF_TYPE_WL_EVENT:
1616 		event = (struct msgbuf_rx_event *)buf;
1617 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1618 		    letoh32(event->msg.request_id));
1619 		if (m == NULL)
1620 			break;
1621 		m_adj(m, sc->sc_rx_dataoffset);
1622 		m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1623 		bwfm_rx(&sc->sc_sc, m, ml);
1624 		if_rxr_put(&sc->sc_event_ring, 1);
1625 		bwfm_pci_fill_rx_rings(sc);
1626 		break;
1627 	case MSGBUF_TYPE_TX_STATUS:
1628 		tx = (struct msgbuf_tx_status *)buf;
1629 		m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1630 		    letoh32(tx->msg.request_id) - 1);
1631 		if (m == NULL)
1632 			break;
1633 		m_freem(m);
1634 		if (sc->sc_tx_pkts_full) {
1635 			sc->sc_tx_pkts_full = 0;
1636 			ifq_restart(&ifp->if_snd);
1637 		}
1638 		break;
1639 	case MSGBUF_TYPE_RX_CMPLT:
1640 		rx = (struct msgbuf_rx_complete *)buf;
1641 		m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1642 		    letoh32(rx->msg.request_id));
1643 		if (m == NULL)
1644 			break;
1645 		if (letoh16(rx->data_offset))
1646 			m_adj(m, letoh16(rx->data_offset));
1647 		else if (sc->sc_rx_dataoffset)
1648 			m_adj(m, sc->sc_rx_dataoffset);
1649 		m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1650 		bwfm_rx(&sc->sc_sc, m, ml);
1651 		if_rxr_put(&sc->sc_rxbuf_ring, 1);
1652 		bwfm_pci_fill_rx_rings(sc);
1653 		break;
1654 	default:
1655 		printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1656 		break;
1657 	}
1658 }
1659 
1660 /* Bus core helpers */
1661 void
1662 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1663 {
1664 	struct bwfm_softc *bwfm = (void *)sc;
1665 	struct bwfm_core *core;
1666 
1667 	core = bwfm_chip_get_core(bwfm, id);
1668 	if (core == NULL) {
1669 		printf("%s: could not find core to select", DEVNAME(sc));
1670 		return;
1671 	}
1672 
1673 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1674 	    BWFM_PCI_BAR0_WINDOW, core->co_base);
1675 	if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1676 	    BWFM_PCI_BAR0_WINDOW) != core->co_base)
1677 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1678 		    BWFM_PCI_BAR0_WINDOW, core->co_base);
1679 }
1680 
1681 uint32_t
1682 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1683 {
1684 	struct bwfm_pci_softc *sc = (void *)bwfm;
1685 	uint32_t page, offset;
1686 
1687 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1688 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1689 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1690 	return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1691 }
1692 
1693 void
1694 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1695 {
1696 	struct bwfm_pci_softc *sc = (void *)bwfm;
1697 	uint32_t page, offset;
1698 
1699 	page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1700 	offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1701 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1702 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1703 }
1704 
1705 int
1706 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1707 {
1708 	return 0;
1709 }
1710 
1711 int
1712 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1713 {
1714 	struct bwfm_pci_softc *sc = (void *)bwfm;
1715 	struct bwfm_core *core;
1716 	uint32_t reg;
1717 	int i;
1718 
1719 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1720 	reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1721 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1722 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1723 	    reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1724 
1725 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1726 	bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1727 	    BWFM_CHIP_REG_WATCHDOG, 4);
1728 	delay(100 * 1000);
1729 
1730 	bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1731 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1732 	    BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1733 
1734 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1735 	if (core->co_rev <= 13) {
1736 		uint16_t cfg_offset[] = {
1737 		    BWFM_PCI_CFGREG_STATUS_CMD,
1738 		    BWFM_PCI_CFGREG_PM_CSR,
1739 		    BWFM_PCI_CFGREG_MSI_CAP,
1740 		    BWFM_PCI_CFGREG_MSI_ADDR_L,
1741 		    BWFM_PCI_CFGREG_MSI_ADDR_H,
1742 		    BWFM_PCI_CFGREG_MSI_DATA,
1743 		    BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1744 		    BWFM_PCI_CFGREG_RBAR_CTRL,
1745 		    BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1746 		    BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1747 		    BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1748 		};
1749 
1750 		for (i = 0; i < nitems(cfg_offset); i++) {
1751 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1752 			    BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1753 			reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1754 			    BWFM_PCI_PCIE2REG_CONFIGDATA);
1755 			DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1756 			    DEVNAME(sc), cfg_offset[i], reg));
1757 			bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1758 			    BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1759 		}
1760 	}
1761 
1762 	reg = bwfm_pci_intr_status(sc);
1763 	if (reg != 0xffffffff)
1764 		bwfm_pci_intr_ack(sc, reg);
1765 
1766 	return 0;
1767 }
1768 
1769 void
1770 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, uint32_t rstvec)
1771 {
1772 	struct bwfm_pci_softc *sc = (void *)bwfm;
1773 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1774 }
1775 
1776 static int bwfm_pci_prio2fifo[8] = {
1777 	0, /* best effort */
1778 	1, /* IPTOS_PREC_IMMEDIATE */
1779 	1, /* IPTOS_PREC_PRIORITY */
1780 	0, /* IPTOS_PREC_FLASH */
1781 	2, /* IPTOS_PREC_FLASHOVERRIDE */
1782 	2, /* IPTOS_PREC_CRITIC_ECP */
1783 	3, /* IPTOS_PREC_INTERNETCONTROL */
1784 	3, /* IPTOS_PREC_NETCONTROL */
1785 };
1786 
1787 int
1788 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1789 {
1790 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1791 #ifndef IEEE80211_STA_ONLY
1792 	uint8_t *da = mtod(m, uint8_t *);
1793 #endif
1794 	int flowid, prio, fifo;
1795 	int i, found;
1796 
1797 	prio = ieee80211_classify(ic, m);
1798 	fifo = bwfm_pci_prio2fifo[prio];
1799 
1800 	switch (ic->ic_opmode)
1801 	{
1802 	case IEEE80211_M_STA:
1803 		flowid = fifo;
1804 		break;
1805 #ifndef IEEE80211_STA_ONLY
1806 	case IEEE80211_M_HOSTAP:
1807 		if (ETHER_IS_MULTICAST(da))
1808 			da = etherbroadcastaddr;
1809 		flowid = da[5] * 2 + fifo;
1810 		break;
1811 #endif
1812 	default:
1813 		printf("%s: state not supported\n", DEVNAME(sc));
1814 		return ENOBUFS;
1815 	}
1816 
1817 	found = 0;
1818 	flowid = flowid % sc->sc_max_flowrings;
1819 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1820 		if (ic->ic_opmode == IEEE80211_M_STA &&
1821 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1822 		    sc->sc_flowrings[flowid].fifo == fifo) {
1823 			found = 1;
1824 			break;
1825 		}
1826 #ifndef IEEE80211_STA_ONLY
1827 		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1828 		    sc->sc_flowrings[flowid].status >= RING_OPEN &&
1829 		    sc->sc_flowrings[flowid].fifo == fifo &&
1830 		    !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1831 			found = 1;
1832 			break;
1833 		}
1834 #endif
1835 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1836 	}
1837 
1838 	if (found)
1839 		return flowid;
1840 
1841 	return -1;
1842 }
1843 
1844 void
1845 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1846 {
1847 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1848 	struct bwfm_cmd_flowring_create cmd;
1849 #ifndef IEEE80211_STA_ONLY
1850 	uint8_t *da = mtod(m, uint8_t *);
1851 #endif
1852 	struct bwfm_pci_msgring *ring;
1853 	int flowid, prio, fifo;
1854 	int i, found;
1855 
1856 	prio = ieee80211_classify(ic, m);
1857 	fifo = bwfm_pci_prio2fifo[prio];
1858 
1859 	switch (ic->ic_opmode)
1860 	{
1861 	case IEEE80211_M_STA:
1862 		flowid = fifo;
1863 		break;
1864 #ifndef IEEE80211_STA_ONLY
1865 	case IEEE80211_M_HOSTAP:
1866 		if (ETHER_IS_MULTICAST(da))
1867 			da = etherbroadcastaddr;
1868 		flowid = da[5] * 2 + fifo;
1869 		break;
1870 #endif
1871 	default:
1872 		printf("%s: state not supported\n", DEVNAME(sc));
1873 		return;
1874 	}
1875 
1876 	found = 0;
1877 	flowid = flowid % sc->sc_max_flowrings;
1878 	for (i = 0; i < sc->sc_max_flowrings; i++) {
1879 		ring = &sc->sc_flowrings[flowid];
1880 		if (ring->status == RING_CLOSED) {
1881 			ring->status = RING_OPENING;
1882 			found = 1;
1883 			break;
1884 		}
1885 		flowid = (flowid + 1) % sc->sc_max_flowrings;
1886 	}
1887 
1888 	/*
1889 	 * We cannot recover from that so far.  Only a stop/init
1890 	 * cycle can revive this if it ever happens at all.
1891 	 */
1892 	if (!found) {
1893 		printf("%s: no flowring available\n", DEVNAME(sc));
1894 		return;
1895 	}
1896 
1897 	cmd.m = m;
1898 	cmd.prio = prio;
1899 	cmd.flowid = flowid;
1900 	bwfm_do_async(&sc->sc_sc, bwfm_pci_flowring_create_cb, &cmd, sizeof(cmd));
1901 }
1902 
1903 void
1904 bwfm_pci_flowring_create_cb(struct bwfm_softc *bwfm, void *arg)
1905 {
1906 	struct bwfm_pci_softc *sc = (void *)bwfm;
1907 #ifndef IEEE80211_STA_ONLY
1908 	struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1909 #endif
1910 	struct bwfm_cmd_flowring_create *cmd = arg;
1911 	struct msgbuf_tx_flowring_create_req *req;
1912 	struct bwfm_pci_msgring *ring;
1913 	uint8_t *da, *sa;
1914 	int s;
1915 
1916 	da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1917 	sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1918 
1919 	ring = &sc->sc_flowrings[cmd->flowid];
1920 	if (ring->status != RING_OPENING) {
1921 		printf("%s: flowring not opening\n", DEVNAME(sc));
1922 		return;
1923 	}
1924 
1925 	if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1926 		printf("%s: cannot setup flowring\n", DEVNAME(sc));
1927 		return;
1928 	}
1929 
1930 	s = splnet();
1931 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1932 	if (req == NULL) {
1933 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1934 		splx(s);
1935 		return;
1936 	}
1937 
1938 	ring->status = RING_OPENING;
1939 	ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1940 	ring->m = cmd->m;
1941 	memcpy(ring->mac, da, ETHER_ADDR_LEN);
1942 #ifndef IEEE80211_STA_ONLY
1943 	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1944 		memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1945 #endif
1946 
1947 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1948 	req->msg.ifidx = 0;
1949 	req->msg.request_id = 0;
1950 	req->tid = bwfm_pci_prio2fifo[cmd->prio];
1951 	req->flow_ring_id = letoh16(cmd->flowid + 2);
1952 	memcpy(req->da, da, ETHER_ADDR_LEN);
1953 	memcpy(req->sa, sa, ETHER_ADDR_LEN);
1954 	req->flow_ring_addr.high_addr =
1955 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1956 	req->flow_ring_addr.low_addr =
1957 	    letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1958 	req->max_items = letoh16(512);
1959 	req->len_item = letoh16(48);
1960 
1961 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1962 	splx(s);
1963 }
1964 
1965 void
1966 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1967 {
1968 	struct msgbuf_tx_flowring_delete_req *req;
1969 	struct bwfm_pci_msgring *ring;
1970 	int s;
1971 
1972 	ring = &sc->sc_flowrings[flowid];
1973 	if (ring->status != RING_OPEN) {
1974 		printf("%s: flowring not open\n", DEVNAME(sc));
1975 		return;
1976 	}
1977 
1978 	s = splnet();
1979 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1980 	if (req == NULL) {
1981 		printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1982 		splx(s);
1983 		return;
1984 	}
1985 
1986 	ring->status = RING_CLOSING;
1987 
1988 	req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1989 	req->msg.ifidx = 0;
1990 	req->msg.request_id = 0;
1991 	req->flow_ring_id = letoh16(flowid + 2);
1992 	req->reason = 0;
1993 
1994 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1995 	splx(s);
1996 }
1997 
1998 void
1999 bwfm_pci_stop(struct bwfm_softc *bwfm)
2000 {
2001 	struct bwfm_pci_softc *sc = (void *)bwfm;
2002 	struct bwfm_pci_msgring *ring;
2003 	int i;
2004 
2005 	for (i = 0; i < sc->sc_max_flowrings; i++) {
2006 		ring = &sc->sc_flowrings[i];
2007 		if (ring->status == RING_OPEN)
2008 			bwfm_pci_flowring_delete(sc, i);
2009 	}
2010 }
2011 
2012 int
2013 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
2014 {
2015 	struct bwfm_pci_softc *sc = (void *)bwfm;
2016 	struct bwfm_pci_msgring *ring;
2017 	int i;
2018 
2019 	/* If we are transitioning, we cannot send. */
2020 	for (i = 0; i < sc->sc_max_flowrings; i++) {
2021 		ring = &sc->sc_flowrings[i];
2022 		if (ring->status == RING_OPENING)
2023 			return ENOBUFS;
2024 	}
2025 
2026 	if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
2027 		sc->sc_tx_pkts_full = 1;
2028 		return ENOBUFS;
2029 	}
2030 
2031 	return 0;
2032 }
2033 
2034 int
2035 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf *m)
2036 {
2037 	struct bwfm_pci_softc *sc = (void *)bwfm;
2038 	struct bwfm_pci_msgring *ring;
2039 	struct msgbuf_tx_msghdr *tx;
2040 	uint32_t pktid;
2041 	paddr_t paddr;
2042 	int flowid, ret;
2043 
2044 	flowid = bwfm_pci_flowring_lookup(sc, m);
2045 	if (flowid < 0) {
2046 		/*
2047 		 * We cannot send the packet right now as there is
2048 		 * no flowring yet.  The flowring will be created
2049 		 * asynchronously.  While the ring is transitioning
2050 		 * the TX check will tell the upper layers that we
2051 		 * cannot send packets right now.  When the flowring
2052 		 * is created the queue will be restarted and this
2053 		 * mbuf will be transmitted.
2054 		 */
2055 		bwfm_pci_flowring_create(sc, m);
2056 		return 0;
2057 	}
2058 
2059 	ring = &sc->sc_flowrings[flowid];
2060 	if (ring->status == RING_OPENING ||
2061 	    ring->status == RING_CLOSING) {
2062 		printf("%s: tried to use a flow that was "
2063 		    "transitioning in status %d\n",
2064 		    DEVNAME(sc), ring->status);
2065 		return ENOBUFS;
2066 	}
2067 
2068 	tx = bwfm_pci_ring_write_reserve(sc, ring);
2069 	if (tx == NULL)
2070 		return ENOBUFS;
2071 
2072 	memset(tx, 0, sizeof(*tx));
2073 	tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
2074 	tx->msg.ifidx = 0;
2075 	tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
2076 	tx->flags |= ieee80211_classify(&sc->sc_sc.sc_ic, m) <<
2077 	    BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
2078 	tx->seg_cnt = 1;
2079 	memcpy(tx->txhdr, mtod(m, char *), ETHER_HDR_LEN);
2080 
2081 	ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, m, &pktid, &paddr);
2082 	if (ret) {
2083 		if (ret == ENOBUFS) {
2084 			printf("%s: no pktid available for TX\n",
2085 			    DEVNAME(sc));
2086 			sc->sc_tx_pkts_full = 1;
2087 		}
2088 		bwfm_pci_ring_write_cancel(sc, ring, 1);
2089 		return ret;
2090 	}
2091 	paddr += ETHER_HDR_LEN;
2092 
2093 	tx->msg.request_id = htole32(pktid + 1);
2094 	tx->data_len = htole16(m->m_len - ETHER_HDR_LEN);
2095 	tx->data_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2096 	tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2097 
2098 	bwfm_pci_ring_write_commit(sc, ring);
2099 	return 0;
2100 }
2101 
2102 int
2103 bwfm_pci_send_mb_data(struct bwfm_pci_softc *sc, uint32_t htod_mb_data)
2104 {
2105 	struct bwfm_softc *bwfm = (void *)sc;
2106 	struct bwfm_core *core;
2107 	uint32_t reg;
2108 	int i;
2109 
2110 	for (i = 0; i < 100; i++) {
2111 		reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2112 		    sc->sc_htod_mb_data_addr);
2113 		if (reg == 0)
2114 			break;
2115 		delay(10 * 1000);
2116 	}
2117 	if (i == 100) {
2118 		DPRINTF(("%s: MB transaction already pending\n", DEVNAME(sc)));
2119 		return EIO;
2120 	}
2121 
2122 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2123 	    sc->sc_htod_mb_data_addr, htod_mb_data);
2124 	pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX, 1);
2125 
2126 	core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
2127 	if (core->co_rev <= 13)
2128 		pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_REG_SBMBX, 1);
2129 
2130 	return 0;
2131 }
2132 
2133 void
2134 bwfm_pci_handle_mb_data(struct bwfm_pci_softc *sc)
2135 {
2136 	uint32_t reg;
2137 
2138 	reg = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2139 	    sc->sc_dtoh_mb_data_addr);
2140 	if (reg == 0)
2141 		return;
2142 
2143 	bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2144 	    sc->sc_dtoh_mb_data_addr, 0);
2145 
2146 	if (reg & BWFM_PCI_D2H_DEV_D3_ACK) {
2147 		sc->sc_mbdata_done = 1;
2148 		wakeup(&sc->sc_mbdata_done);
2149 	}
2150 
2151 	/* TODO: support more events */
2152 	if (reg & ~BWFM_PCI_D2H_DEV_D3_ACK)
2153 		printf("%s: handle MB data 0x%08x\n", DEVNAME(sc), reg);
2154 }
2155 
2156 #ifdef BWFM_DEBUG
2157 void
2158 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
2159 {
2160 	uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2161 	    sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
2162 
2163 	if (newidx != sc->sc_console_readidx)
2164 		DPRINTFN(3, ("BWFM CONSOLE: "));
2165 	while (newidx != sc->sc_console_readidx) {
2166 		uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2167 		    sc->sc_console_buf_addr + sc->sc_console_readidx);
2168 		sc->sc_console_readidx++;
2169 		if (sc->sc_console_readidx == sc->sc_console_buf_size)
2170 			sc->sc_console_readidx = 0;
2171 		if (ch == '\r')
2172 			continue;
2173 		DPRINTFN(3, ("%c", ch));
2174 	}
2175 }
2176 #endif
2177 
2178 int
2179 bwfm_pci_intr(void *v)
2180 {
2181 	struct bwfm_pci_softc *sc = (void *)v;
2182 	struct bwfm_softc *bwfm = (void *)sc;
2183 	struct ifnet *ifp = &sc->sc_sc.sc_ic.ic_if;
2184 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2185 	uint32_t status, mask;
2186 
2187 	if (!sc->sc_initialized)
2188 		return 0;
2189 
2190 	status = bwfm_pci_intr_status(sc);
2191 	/* FIXME: interrupt status seems to be zero? */
2192 	if (status == 0 && bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2193 		status |= BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2194 	if (status == 0)
2195 		return 0;
2196 
2197 	bwfm_pci_intr_disable(sc);
2198 	bwfm_pci_intr_ack(sc, status);
2199 
2200 	if (bwfm->sc_chip.ch_chip != BRCM_CC_4378_CHIP_ID &&
2201 	    (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2202 	    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1)))
2203 		bwfm_pci_handle_mb_data(sc);
2204 
2205 	mask = BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2206 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2207 		mask = BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB;
2208 
2209 	if (status & mask) {
2210 		bwfm_pci_ring_rx(sc, &sc->sc_rx_complete, &ml);
2211 		bwfm_pci_ring_rx(sc, &sc->sc_tx_complete, &ml);
2212 		bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete, &ml);
2213 
2214 		if (ifiq_input(&ifp->if_rcv, &ml))
2215 			if_rxr_livelocked(&sc->sc_rxbuf_ring);
2216 	}
2217 
2218 #ifdef BWFM_DEBUG
2219 	bwfm_pci_debug_console(sc);
2220 #endif
2221 
2222 	bwfm_pci_intr_enable(sc);
2223 	return 1;
2224 }
2225 
2226 void
2227 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2228 {
2229 	struct bwfm_softc *bwfm = (void *)sc;
2230 
2231 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2232 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2233 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK,
2234 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2235 	else
2236 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2237 		    BWFM_PCI_PCIE2REG_MAILBOXMASK,
2238 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2239 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2240 		    BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2241 }
2242 
2243 void
2244 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2245 {
2246 	struct bwfm_softc *bwfm = (void *)sc;
2247 
2248 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2249 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2250 		    BWFM_PCI_64_PCIE2REG_MAILBOXMASK, 0);
2251 	else
2252 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2253 		    BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2254 }
2255 
2256 uint32_t
2257 bwfm_pci_intr_status(struct bwfm_pci_softc *sc)
2258 {
2259 	struct bwfm_softc *bwfm = (void *)sc;
2260 
2261 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2262 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2263 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT);
2264 	else
2265 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2266 		    BWFM_PCI_PCIE2REG_MAILBOXINT);
2267 }
2268 
2269 void
2270 bwfm_pci_intr_ack(struct bwfm_pci_softc *sc, uint32_t status)
2271 {
2272 	struct bwfm_softc *bwfm = (void *)sc;
2273 
2274 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2275 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2276 		    BWFM_PCI_64_PCIE2REG_MAILBOXINT, status);
2277 	else
2278 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2279 		    BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2280 }
2281 
2282 uint32_t
2283 bwfm_pci_intmask(struct bwfm_pci_softc *sc)
2284 {
2285 	struct bwfm_softc *bwfm = (void *)sc;
2286 
2287 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2288 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2289 		    BWFM_PCI_64_PCIE2REG_INTMASK);
2290 	else
2291 		return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2292 		    BWFM_PCI_PCIE2REG_INTMASK);
2293 }
2294 
2295 void
2296 bwfm_pci_hostready(struct bwfm_pci_softc *sc)
2297 {
2298 	struct bwfm_softc *bwfm = (void *)sc;
2299 
2300 	if ((sc->sc_shared_flags & BWFM_SHARED_INFO_HOSTRDY_DB1) == 0)
2301 		return;
2302 
2303 	if (bwfm->sc_chip.ch_chip == BRCM_CC_4378_CHIP_ID)
2304 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2305 		    BWFM_PCI_64_PCIE2REG_H2D_MAILBOX_1, 1);
2306 	else
2307 		bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2308 		    BWFM_PCI_PCIE2REG_H2D_MAILBOX_1, 1);
2309 }
2310 
2311 /* Msgbuf protocol implementation */
2312 int
2313 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2314     int cmd, char *buf, size_t *len)
2315 {
2316 	struct bwfm_pci_softc *sc = (void *)bwfm;
2317 	struct msgbuf_ioctl_req_hdr *req;
2318 	struct bwfm_pci_ioctl *ctl;
2319 	struct mbuf *m;
2320 	uint32_t pktid;
2321 	paddr_t paddr;
2322 	size_t buflen;
2323 	int s;
2324 
2325 	buflen = min(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2326 	m = MCLGETL(NULL, M_DONTWAIT, buflen);
2327 	if (m == NULL)
2328 		return 1;
2329 	m->m_len = m->m_pkthdr.len = buflen;
2330 
2331 	if (buf)
2332 		memcpy(mtod(m, char *), buf, buflen);
2333 	else
2334 		memset(mtod(m, char *), 0, buflen);
2335 
2336 	s = splnet();
2337 	req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2338 	if (req == NULL) {
2339 		splx(s);
2340 		m_freem(m);
2341 		return 1;
2342 	}
2343 
2344 	if (bwfm_pci_pktid_new(sc, &sc->sc_ioctl_pkts, m, &pktid, &paddr)) {
2345 		bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
2346 		splx(s);
2347 		m_freem(m);
2348 		return 1;
2349 	}
2350 
2351 	ctl = malloc(sizeof(*ctl), M_TEMP, M_WAITOK|M_ZERO);
2352 	ctl->transid = sc->sc_ioctl_transid++;
2353 	TAILQ_INSERT_TAIL(&sc->sc_ioctlq, ctl, next);
2354 
2355 	req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2356 	req->msg.ifidx = 0;
2357 	req->msg.flags = 0;
2358 	req->msg.request_id = htole32(pktid);
2359 	req->cmd = htole32(cmd);
2360 	req->output_buf_len = htole16(*len);
2361 	req->trans_id = htole16(ctl->transid);
2362 
2363 	req->input_buf_len = htole16(m->m_len);
2364 	req->req_buf_addr.high_addr = htole32((uint64_t)paddr >> 32);
2365 	req->req_buf_addr.low_addr = htole32(paddr & 0xffffffff);
2366 
2367 	bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2368 	splx(s);
2369 
2370 	tsleep_nsec(ctl, PWAIT, "bwfm", SEC_TO_NSEC(1));
2371 	TAILQ_REMOVE(&sc->sc_ioctlq, ctl, next);
2372 
2373 	if (ctl->m == NULL) {
2374 		free(ctl, M_TEMP, sizeof(*ctl));
2375 		return 1;
2376 	}
2377 
2378 	*len = min(ctl->retlen, m->m_len);
2379 	*len = min(*len, buflen);
2380 	if (buf)
2381 		m_copydata(ctl->m, 0, *len, buf);
2382 	m_freem(ctl->m);
2383 
2384 	if (ctl->status < 0) {
2385 		free(ctl, M_TEMP, sizeof(*ctl));
2386 		return 1;
2387 	}
2388 
2389 	free(ctl, M_TEMP, sizeof(*ctl));
2390 	return 0;
2391 }
2392 
2393 int
2394 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2395     int cmd, char *buf, size_t len)
2396 {
2397 	return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2398 }
2399 
2400 void
2401 bwfm_pci_msgbuf_rxioctl(struct bwfm_pci_softc *sc,
2402     struct msgbuf_ioctl_resp_hdr *resp)
2403 {
2404 	struct bwfm_pci_ioctl *ctl, *tmp;
2405 	struct mbuf *m;
2406 
2407 	m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
2408 	    letoh32(resp->msg.request_id));
2409 
2410 	TAILQ_FOREACH_SAFE(ctl, &sc->sc_ioctlq, next, tmp) {
2411 		if (ctl->transid != letoh16(resp->trans_id))
2412 			continue;
2413 		ctl->m = m;
2414 		ctl->retlen = letoh16(resp->resp_len);
2415 		ctl->status = letoh16(resp->compl_hdr.status);
2416 		wakeup(ctl);
2417 		return;
2418 	}
2419 
2420 	m_freem(m);
2421 }
2422