xref: /openbsd/sys/dev/pci/if_mcx.c (revision 510d2225)
1 /*	$OpenBSD: if_mcx.c,v 1.111 2023/11/10 15:51:20 bluhm Exp $ */
2 
3 /*
4  * Copyright (c) 2017 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bpfilter.h"
21 #include "vlan.h"
22 #include "kstat.h"
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/sockio.h>
27 #include <sys/mbuf.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/device.h>
31 #include <sys/pool.h>
32 #include <sys/queue.h>
33 #include <sys/timeout.h>
34 #include <sys/task.h>
35 #include <sys/atomic.h>
36 #include <sys/timetc.h>
37 #include <sys/intrmap.h>
38 
39 #include <machine/bus.h>
40 #include <machine/intr.h>
41 
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 #include <net/toeplitz.h>
46 
47 #if NBPFILTER > 0
48 #include <net/bpf.h>
49 #endif
50 
51 #if NKSTAT > 0
52 #include <sys/kstat.h>
53 #endif
54 
55 #include <netinet/in.h>
56 #include <netinet/if_ether.h>
57 
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 #include <dev/pci/pcidevs.h>
61 
62 #define BUS_DMASYNC_PRERW	(BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
63 #define BUS_DMASYNC_POSTRW	(BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
64 
65 #define MCX_HCA_BAR	PCI_MAPREG_START /* BAR 0 */
66 
67 #define MCX_FW_VER			0x0000
68 #define  MCX_FW_VER_MAJOR(_v)			((_v) & 0xffff)
69 #define  MCX_FW_VER_MINOR(_v)			((_v) >> 16)
70 #define MCX_CMDIF_FW_SUBVER		0x0004
71 #define  MCX_FW_VER_SUBMINOR(_v)		((_v) & 0xffff)
72 #define  MCX_CMDIF(_v)				((_v) >> 16)
73 
74 #define MCX_ISSI			1 /* as per the PRM */
75 #define MCX_CMD_IF_SUPPORTED		5
76 
77 #define MCX_HARDMTU			9500
78 
79 enum mcx_cmdq_slot {
80 	MCX_CMDQ_SLOT_POLL = 0,
81 	MCX_CMDQ_SLOT_IOCTL,
82 	MCX_CMDQ_SLOT_KSTAT,
83 	MCX_CMDQ_SLOT_LINK,
84 
85 	MCX_CMDQ_NUM_SLOTS
86 };
87 
88 #define MCX_PAGE_SHIFT			12
89 #define MCX_PAGE_SIZE			(1 << MCX_PAGE_SHIFT)
90 
91 /* queue sizes */
92 #define MCX_LOG_EQ_SIZE			7
93 #define MCX_LOG_CQ_SIZE			12
94 #define MCX_LOG_RQ_SIZE			10
95 #define MCX_LOG_SQ_SIZE			11
96 
97 #define MCX_MAX_QUEUES			16
98 
99 /* completion event moderation - about 10khz, or 90% of the cq */
100 #define MCX_CQ_MOD_PERIOD		50
101 #define MCX_CQ_MOD_COUNTER		\
102 	(((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
103 
104 #define MCX_LOG_SQ_ENTRY_SIZE		6
105 #define MCX_SQ_ENTRY_MAX_SLOTS		4
106 #define MCX_SQ_SEGS_PER_SLOT		\
107 	(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
108 #define MCX_SQ_MAX_SEGMENTS		\
109 	1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
110 
111 #define MCX_LOG_FLOW_TABLE_SIZE		5
112 #define MCX_NUM_STATIC_FLOWS		4 /* promisc, allmulti, ucast, bcast */
113 #define MCX_NUM_MCAST_FLOWS 		\
114 	((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
115 
116 #define MCX_SQ_INLINE_SIZE		18
117 CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE);
118 
119 /* doorbell offsets */
120 #define MCX_DOORBELL_AREA_SIZE		MCX_PAGE_SIZE
121 
122 #define MCX_CQ_DOORBELL_BASE		0
123 #define MCX_CQ_DOORBELL_STRIDE		64
124 
125 #define MCX_WQ_DOORBELL_BASE		MCX_PAGE_SIZE/2
126 #define MCX_WQ_DOORBELL_STRIDE		64
127 /* make sure the doorbells fit */
128 CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE);
129 CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <
130     MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE);
131 
132 #define MCX_WQ_DOORBELL_MASK		0xffff
133 
134 /* uar registers */
135 #define MCX_UAR_CQ_DOORBELL		0x20
136 #define MCX_UAR_EQ_DOORBELL_ARM		0x40
137 #define MCX_UAR_EQ_DOORBELL		0x48
138 #define MCX_UAR_BF			0x800
139 
140 #define MCX_CMDQ_ADDR_HI		0x0010
141 #define MCX_CMDQ_ADDR_LO		0x0014
142 #define MCX_CMDQ_ADDR_NMASK		0xfff
143 #define MCX_CMDQ_LOG_SIZE(_v)		((_v) >> 4 & 0xf)
144 #define MCX_CMDQ_LOG_STRIDE(_v)		((_v) >> 0 & 0xf)
145 #define MCX_CMDQ_INTERFACE_MASK		(0x3 << 8)
146 #define MCX_CMDQ_INTERFACE_FULL_DRIVER	(0x0 << 8)
147 #define MCX_CMDQ_INTERFACE_DISABLED	(0x1 << 8)
148 
149 #define MCX_CMDQ_DOORBELL		0x0018
150 
151 #define MCX_STATE			0x01fc
152 #define MCX_STATE_MASK				(1U << 31)
153 #define MCX_STATE_INITIALIZING			(1U << 31)
154 #define MCX_STATE_READY				(0 << 31)
155 #define MCX_STATE_INTERFACE_MASK		(0x3 << 24)
156 #define MCX_STATE_INTERFACE_FULL_DRIVER		(0x0 << 24)
157 #define MCX_STATE_INTERFACE_DISABLED		(0x1 << 24)
158 
159 #define MCX_INTERNAL_TIMER		0x1000
160 #define MCX_INTERNAL_TIMER_H		0x1000
161 #define MCX_INTERNAL_TIMER_L		0x1004
162 
163 #define MCX_CLEAR_INT			0x100c
164 
165 #define MCX_REG_OP_WRITE		0
166 #define MCX_REG_OP_READ			1
167 
168 #define MCX_REG_PMLP			0x5002
169 #define MCX_REG_PMTU			0x5003
170 #define MCX_REG_PTYS			0x5004
171 #define MCX_REG_PAOS			0x5006
172 #define MCX_REG_PFCC			0x5007
173 #define MCX_REG_PPCNT			0x5008
174 #define MCX_REG_MTCAP			0x9009 /* mgmt temp capabilities */
175 #define MCX_REG_MTMP			0x900a /* mgmt temp */
176 #define MCX_REG_MCIA			0x9014
177 #define MCX_REG_MCAM			0x907f
178 
179 #define MCX_ETHER_CAP_SGMII		0
180 #define MCX_ETHER_CAP_1000_KX		1
181 #define MCX_ETHER_CAP_10G_CX4		2
182 #define MCX_ETHER_CAP_10G_KX4		3
183 #define MCX_ETHER_CAP_10G_KR		4
184 #define MCX_ETHER_CAP_40G_CR4		6
185 #define MCX_ETHER_CAP_40G_KR4		7
186 #define MCX_ETHER_CAP_10G_CR		12
187 #define MCX_ETHER_CAP_10G_SR		13
188 #define MCX_ETHER_CAP_10G_LR		14
189 #define MCX_ETHER_CAP_40G_SR4		15
190 #define MCX_ETHER_CAP_40G_LR4		16
191 #define MCX_ETHER_CAP_50G_SR2		18
192 #define MCX_ETHER_CAP_100G_CR4		20
193 #define MCX_ETHER_CAP_100G_SR4		21
194 #define MCX_ETHER_CAP_100G_KR4		22
195 #define MCX_ETHER_CAP_100G_LR4		23
196 #define MCX_ETHER_CAP_25G_CR		27
197 #define MCX_ETHER_CAP_25G_KR		28
198 #define MCX_ETHER_CAP_25G_SR		29
199 #define MCX_ETHER_CAP_50G_CR2		30
200 #define MCX_ETHER_CAP_50G_KR2		31
201 
202 #define MCX_MAX_CQE			32
203 
204 #define MCX_CMD_QUERY_HCA_CAP		0x100
205 #define MCX_CMD_QUERY_ADAPTER		0x101
206 #define MCX_CMD_INIT_HCA		0x102
207 #define MCX_CMD_TEARDOWN_HCA		0x103
208 #define MCX_CMD_ENABLE_HCA		0x104
209 #define MCX_CMD_DISABLE_HCA		0x105
210 #define MCX_CMD_QUERY_PAGES		0x107
211 #define MCX_CMD_MANAGE_PAGES		0x108
212 #define MCX_CMD_SET_HCA_CAP		0x109
213 #define MCX_CMD_QUERY_ISSI		0x10a
214 #define MCX_CMD_SET_ISSI		0x10b
215 #define MCX_CMD_SET_DRIVER_VERSION	0x10d
216 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS	0x203
217 #define MCX_CMD_CREATE_EQ		0x301
218 #define MCX_CMD_DESTROY_EQ		0x302
219 #define MCX_CMD_QUERY_EQ		0x303
220 #define MCX_CMD_CREATE_CQ		0x400
221 #define MCX_CMD_DESTROY_CQ		0x401
222 #define MCX_CMD_QUERY_CQ		0x402
223 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT	0x754
224 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
225 					0x755
226 #define MCX_CMD_QUERY_VPORT_COUNTERS	0x770
227 #define MCX_CMD_ALLOC_PD		0x800
228 #define MCX_CMD_ALLOC_UAR		0x802
229 #define MCX_CMD_ACCESS_REG		0x805
230 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN	0x816
231 #define MCX_CMD_CREATE_TIR		0x900
232 #define MCX_CMD_DESTROY_TIR		0x902
233 #define MCX_CMD_CREATE_SQ		0x904
234 #define MCX_CMD_MODIFY_SQ		0x905
235 #define MCX_CMD_DESTROY_SQ		0x906
236 #define MCX_CMD_QUERY_SQ		0x907
237 #define MCX_CMD_CREATE_RQ		0x908
238 #define MCX_CMD_MODIFY_RQ		0x909
239 #define MCX_CMD_DESTROY_RQ		0x90a
240 #define MCX_CMD_QUERY_RQ		0x90b
241 #define MCX_CMD_CREATE_TIS		0x912
242 #define MCX_CMD_DESTROY_TIS		0x914
243 #define MCX_CMD_CREATE_RQT		0x916
244 #define MCX_CMD_DESTROY_RQT		0x918
245 #define MCX_CMD_SET_FLOW_TABLE_ROOT	0x92f
246 #define MCX_CMD_CREATE_FLOW_TABLE	0x930
247 #define MCX_CMD_DESTROY_FLOW_TABLE	0x931
248 #define MCX_CMD_QUERY_FLOW_TABLE	0x932
249 #define MCX_CMD_CREATE_FLOW_GROUP	0x933
250 #define MCX_CMD_DESTROY_FLOW_GROUP	0x934
251 #define MCX_CMD_QUERY_FLOW_GROUP	0x935
252 #define MCX_CMD_SET_FLOW_TABLE_ENTRY	0x936
253 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY	0x937
254 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY	0x938
255 #define MCX_CMD_ALLOC_FLOW_COUNTER	0x939
256 #define MCX_CMD_QUERY_FLOW_COUNTER	0x93b
257 
258 #define MCX_QUEUE_STATE_RST		0
259 #define MCX_QUEUE_STATE_RDY		1
260 #define MCX_QUEUE_STATE_ERR		3
261 
262 #define MCX_FLOW_TABLE_TYPE_RX		0
263 #define MCX_FLOW_TABLE_TYPE_TX		1
264 
265 #define MCX_CMDQ_INLINE_DATASIZE 16
266 
267 struct mcx_cmdq_entry {
268 	uint8_t			cq_type;
269 #define MCX_CMDQ_TYPE_PCIE		0x7
270 	uint8_t			cq_reserved0[3];
271 
272 	uint32_t		cq_input_length;
273 	uint64_t		cq_input_ptr;
274 	uint8_t			cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
275 
276 	uint8_t			cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
277 	uint64_t		cq_output_ptr;
278 	uint32_t		cq_output_length;
279 
280 	uint8_t			cq_token;
281 	uint8_t			cq_signature;
282 	uint8_t			cq_reserved1[1];
283 	uint8_t			cq_status;
284 #define MCX_CQ_STATUS_SHIFT		1
285 #define MCX_CQ_STATUS_MASK		(0x7f << MCX_CQ_STATUS_SHIFT)
286 #define MCX_CQ_STATUS_OK		(0x00 << MCX_CQ_STATUS_SHIFT)
287 #define MCX_CQ_STATUS_INT_ERR		(0x01 << MCX_CQ_STATUS_SHIFT)
288 #define MCX_CQ_STATUS_BAD_OPCODE	(0x02 << MCX_CQ_STATUS_SHIFT)
289 #define MCX_CQ_STATUS_BAD_PARAM		(0x03 << MCX_CQ_STATUS_SHIFT)
290 #define MCX_CQ_STATUS_BAD_SYS_STATE	(0x04 << MCX_CQ_STATUS_SHIFT)
291 #define MCX_CQ_STATUS_BAD_RESOURCE	(0x05 << MCX_CQ_STATUS_SHIFT)
292 #define MCX_CQ_STATUS_RESOURCE_BUSY	(0x06 << MCX_CQ_STATUS_SHIFT)
293 #define MCX_CQ_STATUS_EXCEED_LIM	(0x08 << MCX_CQ_STATUS_SHIFT)
294 #define MCX_CQ_STATUS_BAD_RES_STATE	(0x09 << MCX_CQ_STATUS_SHIFT)
295 #define MCX_CQ_STATUS_BAD_INDEX		(0x0a << MCX_CQ_STATUS_SHIFT)
296 #define MCX_CQ_STATUS_NO_RESOURCES	(0x0f << MCX_CQ_STATUS_SHIFT)
297 #define MCX_CQ_STATUS_BAD_INPUT_LEN	(0x50 << MCX_CQ_STATUS_SHIFT)
298 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN	(0x51 << MCX_CQ_STATUS_SHIFT)
299 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
300 					(0x10 << MCX_CQ_STATUS_SHIFT)
301 #define MCX_CQ_STATUS_BAD_SIZE		(0x40 << MCX_CQ_STATUS_SHIFT)
302 #define MCX_CQ_STATUS_OWN_MASK		0x1
303 #define MCX_CQ_STATUS_OWN_SW		0x0
304 #define MCX_CQ_STATUS_OWN_HW		0x1
305 } __packed __aligned(8);
306 
307 #define MCX_CMDQ_MAILBOX_DATASIZE	512
308 
309 struct mcx_cmdq_mailbox {
310 	uint8_t			mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
311 	uint8_t			mb_reserved0[48];
312 	uint64_t		mb_next_ptr;
313 	uint32_t		mb_block_number;
314 	uint8_t			mb_reserved1[1];
315 	uint8_t			mb_token;
316 	uint8_t			mb_ctrl_signature;
317 	uint8_t			mb_signature;
318 } __packed __aligned(8);
319 
320 #define MCX_CMDQ_MAILBOX_ALIGN	(1 << 10)
321 #define MCX_CMDQ_MAILBOX_SIZE	roundup(sizeof(struct mcx_cmdq_mailbox), \
322 				    MCX_CMDQ_MAILBOX_ALIGN)
323 /*
324  * command mailbox structures
325  */
326 
327 struct mcx_cmd_enable_hca_in {
328 	uint16_t		cmd_opcode;
329 	uint8_t			cmd_reserved0[4];
330 	uint16_t		cmd_op_mod;
331 	uint8_t			cmd_reserved1[2];
332 	uint16_t		cmd_function_id;
333 	uint8_t			cmd_reserved2[4];
334 } __packed __aligned(4);
335 
336 struct mcx_cmd_enable_hca_out {
337 	uint8_t			cmd_status;
338 	uint8_t			cmd_reserved0[3];
339 	uint32_t		cmd_syndrome;
340 	uint8_t			cmd_reserved1[4];
341 } __packed __aligned(4);
342 
343 struct mcx_cmd_init_hca_in {
344 	uint16_t		cmd_opcode;
345 	uint8_t			cmd_reserved0[4];
346 	uint16_t		cmd_op_mod;
347 	uint8_t			cmd_reserved1[8];
348 } __packed __aligned(4);
349 
350 struct mcx_cmd_init_hca_out {
351 	uint8_t			cmd_status;
352 	uint8_t			cmd_reserved0[3];
353 	uint32_t		cmd_syndrome;
354 	uint8_t			cmd_reserved1[8];
355 } __packed __aligned(4);
356 
357 struct mcx_cmd_teardown_hca_in {
358 	uint16_t		cmd_opcode;
359 	uint8_t			cmd_reserved0[4];
360 	uint16_t		cmd_op_mod;
361 	uint8_t			cmd_reserved1[2];
362 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL	0x0
363 #define MCX_CMD_TEARDOWN_HCA_PANIC	0x1
364 	uint16_t		cmd_profile;
365 	uint8_t			cmd_reserved2[4];
366 } __packed __aligned(4);
367 
368 struct mcx_cmd_teardown_hca_out {
369 	uint8_t			cmd_status;
370 	uint8_t			cmd_reserved0[3];
371 	uint32_t		cmd_syndrome;
372 	uint8_t			cmd_reserved1[8];
373 } __packed __aligned(4);
374 
375 struct mcx_cmd_access_reg_in {
376 	uint16_t		cmd_opcode;
377 	uint8_t			cmd_reserved0[4];
378 	uint16_t		cmd_op_mod;
379 	uint8_t			cmd_reserved1[2];
380 	uint16_t		cmd_register_id;
381 	uint32_t		cmd_argument;
382 } __packed __aligned(4);
383 
384 struct mcx_cmd_access_reg_out {
385 	uint8_t			cmd_status;
386 	uint8_t			cmd_reserved0[3];
387 	uint32_t		cmd_syndrome;
388 	uint8_t			cmd_reserved1[8];
389 } __packed __aligned(4);
390 
391 struct mcx_reg_pmtu {
392 	uint8_t			rp_reserved1;
393 	uint8_t			rp_local_port;
394 	uint8_t			rp_reserved2[2];
395 	uint16_t		rp_max_mtu;
396 	uint8_t			rp_reserved3[2];
397 	uint16_t		rp_admin_mtu;
398 	uint8_t			rp_reserved4[2];
399 	uint16_t		rp_oper_mtu;
400 	uint8_t			rp_reserved5[2];
401 } __packed __aligned(4);
402 
403 struct mcx_reg_ptys {
404 	uint8_t			rp_reserved1;
405 	uint8_t			rp_local_port;
406 	uint8_t			rp_reserved2;
407 	uint8_t			rp_proto_mask;
408 #define MCX_REG_PTYS_PROTO_MASK_ETH		(1 << 2)
409 	uint8_t			rp_reserved3[8];
410 	uint32_t		rp_eth_proto_cap;
411 	uint8_t			rp_reserved4[8];
412 	uint32_t		rp_eth_proto_admin;
413 	uint8_t			rp_reserved5[8];
414 	uint32_t		rp_eth_proto_oper;
415 	uint8_t			rp_reserved6[24];
416 } __packed __aligned(4);
417 
418 struct mcx_reg_paos {
419 	uint8_t			rp_reserved1;
420 	uint8_t			rp_local_port;
421 	uint8_t			rp_admin_status;
422 #define MCX_REG_PAOS_ADMIN_STATUS_UP		1
423 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN		2
424 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE	3
425 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED	4
426 	uint8_t			rp_oper_status;
427 #define MCX_REG_PAOS_OPER_STATUS_UP		1
428 #define MCX_REG_PAOS_OPER_STATUS_DOWN		2
429 #define MCX_REG_PAOS_OPER_STATUS_FAILED		4
430 	uint8_t			rp_admin_state_update;
431 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN	(1 << 7)
432 	uint8_t			rp_reserved2[11];
433 } __packed __aligned(4);
434 
435 struct mcx_reg_pfcc {
436 	uint8_t			rp_reserved1;
437 	uint8_t			rp_local_port;
438 	uint8_t			rp_reserved2[3];
439 	uint8_t			rp_prio_mask_tx;
440 	uint8_t			rp_reserved3;
441 	uint8_t			rp_prio_mask_rx;
442 	uint8_t			rp_pptx_aptx;
443 	uint8_t			rp_pfctx;
444 	uint8_t			rp_fctx_dis;
445 	uint8_t			rp_reserved4;
446 	uint8_t			rp_pprx_aprx;
447 	uint8_t			rp_pfcrx;
448 	uint8_t			rp_reserved5[2];
449 	uint16_t		rp_dev_stall_min;
450 	uint16_t		rp_dev_stall_crit;
451 	uint8_t			rp_reserved6[12];
452 } __packed __aligned(4);
453 
454 #define MCX_PMLP_MODULE_NUM_MASK	0xff
455 struct mcx_reg_pmlp {
456 	uint8_t			rp_rxtx;
457 	uint8_t			rp_local_port;
458 	uint8_t			rp_reserved0;
459 	uint8_t			rp_width;
460 	uint32_t		rp_lane0_mapping;
461 	uint32_t		rp_lane1_mapping;
462 	uint32_t		rp_lane2_mapping;
463 	uint32_t		rp_lane3_mapping;
464 	uint8_t			rp_reserved1[44];
465 } __packed __aligned(4);
466 
467 struct mcx_reg_ppcnt {
468 	uint8_t			ppcnt_swid;
469 	uint8_t			ppcnt_local_port;
470 	uint8_t			ppcnt_pnat;
471 	uint8_t			ppcnt_grp;
472 #define MCX_REG_PPCNT_GRP_IEEE8023		0x00
473 #define MCX_REG_PPCNT_GRP_RFC2863		0x01
474 #define MCX_REG_PPCNT_GRP_RFC2819		0x02
475 #define MCX_REG_PPCNT_GRP_RFC3635		0x03
476 #define MCX_REG_PPCNT_GRP_PER_PRIO		0x10
477 #define MCX_REG_PPCNT_GRP_PER_TC		0x11
478 #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER		0x11
479 
480 	uint8_t			ppcnt_clr;
481 	uint8_t			ppcnt_reserved1[2];
482 	uint8_t			ppcnt_prio_tc;
483 #define MCX_REG_PPCNT_CLR			(1 << 7)
484 
485 	uint8_t			ppcnt_counter_set[248];
486 } __packed __aligned(8);
487 CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256);
488 CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) %
489     sizeof(uint64_t)) == 0);
490 
491 enum mcx_ppcnt_ieee8023 {
492 	frames_transmitted_ok,
493 	frames_received_ok,
494 	frame_check_sequence_errors,
495 	alignment_errors,
496 	octets_transmitted_ok,
497 	octets_received_ok,
498 	multicast_frames_xmitted_ok,
499 	broadcast_frames_xmitted_ok,
500 	multicast_frames_received_ok,
501 	broadcast_frames_received_ok,
502 	in_range_length_errors,
503 	out_of_range_length_field,
504 	frame_too_long_errors,
505 	symbol_error_during_carrier,
506 	mac_control_frames_transmitted,
507 	mac_control_frames_received,
508 	unsupported_opcodes_received,
509 	pause_mac_ctrl_frames_received,
510 	pause_mac_ctrl_frames_transmitted,
511 
512 	mcx_ppcnt_ieee8023_count
513 };
514 CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98);
515 
516 enum mcx_ppcnt_rfc2863 {
517 	in_octets,
518 	in_ucast_pkts,
519 	in_discards,
520 	in_errors,
521 	in_unknown_protos,
522 	out_octets,
523 	out_ucast_pkts,
524 	out_discards,
525 	out_errors,
526 	in_multicast_pkts,
527 	in_broadcast_pkts,
528 	out_multicast_pkts,
529 	out_broadcast_pkts,
530 
531 	mcx_ppcnt_rfc2863_count
532 };
533 CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68);
534 
535 enum mcx_ppcnt_rfc2819 {
536 	drop_events,
537 	octets,
538 	pkts,
539 	broadcast_pkts,
540 	multicast_pkts,
541 	crc_align_errors,
542 	undersize_pkts,
543 	oversize_pkts,
544 	fragments,
545 	jabbers,
546 	collisions,
547 	pkts64octets,
548 	pkts65to127octets,
549 	pkts128to255octets,
550 	pkts256to511octets,
551 	pkts512to1023octets,
552 	pkts1024to1518octets,
553 	pkts1519to2047octets,
554 	pkts2048to4095octets,
555 	pkts4096to8191octets,
556 	pkts8192to10239octets,
557 
558 	mcx_ppcnt_rfc2819_count
559 };
560 CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8);
561 
562 enum mcx_ppcnt_rfc3635 {
563 	dot3stats_alignment_errors,
564 	dot3stats_fcs_errors,
565 	dot3stats_single_collision_frames,
566 	dot3stats_multiple_collision_frames,
567 	dot3stats_sqe_test_errors,
568 	dot3stats_deferred_transmissions,
569 	dot3stats_late_collisions,
570 	dot3stats_excessive_collisions,
571 	dot3stats_internal_mac_transmit_errors,
572 	dot3stats_carrier_sense_errors,
573 	dot3stats_frame_too_longs,
574 	dot3stats_internal_mac_receive_errors,
575 	dot3stats_symbol_errors,
576 	dot3control_in_unknown_opcodes,
577 	dot3in_pause_frames,
578 	dot3out_pause_frames,
579 
580 	mcx_ppcnt_rfc3635_count
581 };
582 CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80);
583 
584 struct mcx_reg_mcam {
585 	uint8_t			_reserved1[1];
586 	uint8_t			mcam_feature_group;
587 	uint8_t			_reserved2[1];
588 	uint8_t			mcam_access_reg_group;
589 	uint8_t			_reserved3[4];
590 	uint8_t			mcam_access_reg_cap_mask[16];
591 	uint8_t			_reserved4[16];
592 	uint8_t			mcam_feature_cap_mask[16];
593 	uint8_t			_reserved5[16];
594 } __packed __aligned(4);
595 
596 #define MCX_BITFIELD_BIT(bf, b)	(bf[(sizeof bf - 1) - (b / 8)] & (b % 8))
597 
598 #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP	6
599 
600 struct mcx_reg_mtcap {
601 	uint8_t			_reserved1[3];
602 	uint8_t			mtcap_sensor_count;
603 	uint8_t			_reserved2[4];
604 
605 	uint64_t		mtcap_sensor_map;
606 };
607 
608 struct mcx_reg_mtmp {
609 	uint8_t			_reserved1[2];
610 	uint16_t		mtmp_sensor_index;
611 
612 	uint8_t			_reserved2[2];
613 	uint16_t		mtmp_temperature;
614 
615 	uint16_t		mtmp_mte_mtr;
616 #define MCX_REG_MTMP_MTE		(1 << 15)
617 #define MCX_REG_MTMP_MTR		(1 << 14)
618 	uint16_t		mtmp_max_temperature;
619 
620 	uint16_t		mtmp_tee;
621 #define MCX_REG_MTMP_TEE_NOPE		(0 << 14)
622 #define MCX_REG_MTMP_TEE_GENERATE	(1 << 14)
623 #define MCX_REG_MTMP_TEE_GENERATE_ONE	(2 << 14)
624 	uint16_t		mtmp_temperature_threshold_hi;
625 
626 	uint8_t			_reserved3[2];
627 	uint16_t		mtmp_temperature_threshold_lo;
628 
629 	uint8_t			_reserved4[4];
630 
631 	uint8_t			mtmp_sensor_name[8];
632 };
633 CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20);
634 CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18);
635 
636 #define MCX_MCIA_EEPROM_BYTES	32
637 struct mcx_reg_mcia {
638 	uint8_t			rm_l;
639 	uint8_t			rm_module;
640 	uint8_t			rm_reserved0;
641 	uint8_t			rm_status;
642 	uint8_t			rm_i2c_addr;
643 	uint8_t			rm_page_num;
644 	uint16_t		rm_dev_addr;
645 	uint16_t		rm_reserved1;
646 	uint16_t		rm_size;
647 	uint32_t		rm_reserved2;
648 	uint8_t			rm_data[48];
649 } __packed __aligned(4);
650 
651 struct mcx_cmd_query_issi_in {
652 	uint16_t		cmd_opcode;
653 	uint8_t			cmd_reserved0[4];
654 	uint16_t		cmd_op_mod;
655 	uint8_t			cmd_reserved1[8];
656 } __packed __aligned(4);
657 
658 struct mcx_cmd_query_issi_il_out {
659 	uint8_t			cmd_status;
660 	uint8_t			cmd_reserved0[3];
661 	uint32_t		cmd_syndrome;
662 	uint8_t			cmd_reserved1[2];
663 	uint16_t		cmd_current_issi;
664 	uint8_t			cmd_reserved2[4];
665 } __packed __aligned(4);
666 
667 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
668 
669 struct mcx_cmd_query_issi_mb_out {
670 	uint8_t			cmd_reserved2[16];
671 	uint8_t			cmd_supported_issi[80]; /* very big endian */
672 } __packed __aligned(4);
673 
674 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
675 
676 struct mcx_cmd_set_issi_in {
677 	uint16_t		cmd_opcode;
678 	uint8_t			cmd_reserved0[4];
679 	uint16_t		cmd_op_mod;
680 	uint8_t			cmd_reserved1[2];
681 	uint16_t		cmd_current_issi;
682 	uint8_t			cmd_reserved2[4];
683 } __packed __aligned(4);
684 
685 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
686 
687 struct mcx_cmd_set_issi_out {
688 	uint8_t			cmd_status;
689 	uint8_t			cmd_reserved0[3];
690 	uint32_t		cmd_syndrome;
691 	uint8_t			cmd_reserved1[8];
692 } __packed __aligned(4);
693 
694 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
695 
696 struct mcx_cmd_query_pages_in {
697 	uint16_t		cmd_opcode;
698 	uint8_t			cmd_reserved0[4];
699 	uint16_t		cmd_op_mod;
700 #define MCX_CMD_QUERY_PAGES_BOOT	0x01
701 #define MCX_CMD_QUERY_PAGES_INIT	0x02
702 #define MCX_CMD_QUERY_PAGES_REGULAR	0x03
703 	uint8_t			cmd_reserved1[8];
704 } __packed __aligned(4);
705 
706 struct mcx_cmd_query_pages_out {
707 	uint8_t			cmd_status;
708 	uint8_t			cmd_reserved0[3];
709 	uint32_t		cmd_syndrome;
710 	uint8_t			cmd_reserved1[2];
711 	uint16_t		cmd_func_id;
712 	int32_t			cmd_num_pages;
713 } __packed __aligned(4);
714 
715 struct mcx_cmd_manage_pages_in {
716 	uint16_t		cmd_opcode;
717 	uint8_t			cmd_reserved0[4];
718 	uint16_t		cmd_op_mod;
719 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
720 					0x00
721 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
722 					0x01
723 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
724 					0x02
725 	uint8_t			cmd_reserved1[2];
726 	uint16_t		cmd_func_id;
727 	uint32_t		cmd_input_num_entries;
728 } __packed __aligned(4);
729 
730 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
731 
732 struct mcx_cmd_manage_pages_out {
733 	uint8_t			cmd_status;
734 	uint8_t			cmd_reserved0[3];
735 	uint32_t		cmd_syndrome;
736 	uint32_t		cmd_output_num_entries;
737 	uint8_t			cmd_reserved1[4];
738 } __packed __aligned(4);
739 
740 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
741 
742 struct mcx_cmd_query_hca_cap_in {
743 	uint16_t		cmd_opcode;
744 	uint8_t			cmd_reserved0[4];
745 	uint16_t		cmd_op_mod;
746 #define MCX_CMD_QUERY_HCA_CAP_MAX	(0x0 << 0)
747 #define MCX_CMD_QUERY_HCA_CAP_CURRENT	(0x1 << 0)
748 #define MCX_CMD_QUERY_HCA_CAP_DEVICE	(0x0 << 1)
749 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD	(0x1 << 1)
750 #define MCX_CMD_QUERY_HCA_CAP_FLOW	(0x7 << 1)
751 	uint8_t			cmd_reserved1[8];
752 } __packed __aligned(4);
753 
754 struct mcx_cmd_query_hca_cap_out {
755 	uint8_t			cmd_status;
756 	uint8_t			cmd_reserved0[3];
757 	uint32_t		cmd_syndrome;
758 	uint8_t			cmd_reserved1[8];
759 } __packed __aligned(4);
760 
761 #define MCX_HCA_CAP_LEN			0x1000
762 #define MCX_HCA_CAP_NMAILBOXES		\
763 	(MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
764 
765 #if __GNUC_PREREQ__(4, 3)
766 #define __counter__		__COUNTER__
767 #else
768 #define __counter__		__LINE__
769 #endif
770 
771 #define __token(_tok, _num)	_tok##_num
772 #define _token(_tok, _num)	__token(_tok, _num)
773 #define __reserved__		_token(__reserved, __counter__)
774 
775 struct mcx_cap_device {
776 	uint8_t			reserved0[16];
777 
778 	uint8_t			log_max_srq_sz;
779 	uint8_t			log_max_qp_sz;
780 	uint8_t			__reserved__[1];
781 	uint8_t			log_max_qp; /* 5 bits */
782 #define MCX_CAP_DEVICE_LOG_MAX_QP	0x1f
783 
784 	uint8_t			__reserved__[1];
785 	uint8_t			log_max_srq; /* 5 bits */
786 #define MCX_CAP_DEVICE_LOG_MAX_SRQ	0x1f
787 	uint8_t			__reserved__[2];
788 
789 	uint8_t			__reserved__[1];
790 	uint8_t			log_max_cq_sz;
791 	uint8_t			__reserved__[1];
792 	uint8_t			log_max_cq; /* 5 bits */
793 #define MCX_CAP_DEVICE_LOG_MAX_CQ	0x1f
794 
795 	uint8_t			log_max_eq_sz;
796 	uint8_t			log_max_mkey; /* 6 bits */
797 #define MCX_CAP_DEVICE_LOG_MAX_MKEY	0x3f
798 	uint8_t			__reserved__[1];
799 	uint8_t			log_max_eq; /* 4 bits */
800 #define MCX_CAP_DEVICE_LOG_MAX_EQ	0x0f
801 
802 	uint8_t			max_indirection;
803 	uint8_t			log_max_mrw_sz; /* 7 bits */
804 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ	0x7f
805 	uint8_t			teardown_log_max_msf_list_size;
806 #define MCX_CAP_DEVICE_FORCE_TEARDOWN	0x80
807 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
808 					0x3f
809 	uint8_t			log_max_klm_list_size; /* 6 bits */
810 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
811 					0x3f
812 
813 	uint8_t			__reserved__[1];
814 	uint8_t			log_max_ra_req_dc; /* 6 bits */
815 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC	0x3f
816 	uint8_t			__reserved__[1];
817 	uint8_t			log_max_ra_res_dc; /* 6 bits */
818 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
819 					0x3f
820 
821 	uint8_t			__reserved__[1];
822 	uint8_t			log_max_ra_req_qp; /* 6 bits */
823 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
824 					0x3f
825 	uint8_t			__reserved__[1];
826 	uint8_t			log_max_ra_res_qp; /* 6 bits */
827 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
828 					0x3f
829 
830 	uint8_t			flags1;
831 #define MCX_CAP_DEVICE_END_PAD		0x80
832 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED	0x40
833 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
834 					0x20
835 #define MCX_CAP_DEVICE_START_PAD	0x10
836 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
837 					0x08
838 	uint8_t			__reserved__[1];
839 	uint16_t		gid_table_size;
840 
841 	uint16_t		flags2;
842 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT	0x8000
843 #define MCX_CAP_DEVICE_VPORT_COUNTERS	0x4000
844 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
845 					0x2000
846 #define MCX_CAP_DEVICE_DEBUG		0x1000
847 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
848 					0x8000
849 #define MCX_CAP_DEVICE_RQ_DELAY_DROP	0x4000
850 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK	0x03ff
851 	uint16_t		pkey_table_size;
852 
853 	uint8_t			flags3;
854 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
855 					0x80
856 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
857 					0x40
858 #define MCX_CAP_DEVICE_IB_VIRTUAL	0x20
859 #define MCX_CAP_DEVICE_ETH_VIRTUAL	0x10
860 #define MCX_CAP_DEVICE_ETS		0x04
861 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE	0x02
862 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
863 					0x01
864 	uint8_t			local_ca_ack_delay; /* 5 bits */
865 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
866 					0x1f
867 #define MCX_CAP_DEVICE_MCAM_REG		0x40
868 	uint8_t			port_type;
869 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
870 					0x80
871 #define MCX_CAP_DEVICE_PORT_TYPE	0x03
872 #define MCX_CAP_DEVICE_PORT_TYPE_ETH	0x01
873 	uint8_t			num_ports;
874 
875 	uint8_t			snapshot_log_max_msg;
876 #define MCX_CAP_DEVICE_SNAPSHOT		0x80
877 #define MCX_CAP_DEVICE_LOG_MAX_MSG	0x1f
878 	uint8_t			max_tc; /* 4 bits */
879 #define MCX_CAP_DEVICE_MAX_TC		0x0f
880 	uint8_t			flags4;
881 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT	0x80
882 #define MCX_CAP_DEVICE_DCBX		0x40
883 #define MCX_CAP_DEVICE_ROL_S		0x02
884 #define MCX_CAP_DEVICE_ROL_G		0x01
885 	uint8_t			wol;
886 #define MCX_CAP_DEVICE_WOL_S		0x40
887 #define MCX_CAP_DEVICE_WOL_G		0x20
888 #define MCX_CAP_DEVICE_WOL_A		0x10
889 #define MCX_CAP_DEVICE_WOL_B		0x08
890 #define MCX_CAP_DEVICE_WOL_M		0x04
891 #define MCX_CAP_DEVICE_WOL_U		0x02
892 #define MCX_CAP_DEVICE_WOL_P		0x01
893 
894 	uint16_t		stat_rate_support;
895 	uint8_t			__reserved__[1];
896 	uint8_t			cqe_version; /* 4 bits */
897 #define MCX_CAP_DEVICE_CQE_VERSION	0x0f
898 
899 	uint32_t		flags5;
900 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
901 					0x80000000
902 #define MCX_CAP_DEVICE_STRIDING_RQ	0x40000000
903 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
904 					0x10000000
905 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
906 					0x08000000
907 #define MCX_CAP_DEVICE_DC_CONNECT_CP	0x00040000
908 #define MCX_CAP_DEVICE_DC_CNAK_DRACE	0x00020000
909 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
910 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
911 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM	0x0000c000
912 #define MCX_CAP_DEVICE_SIGERR_QCE	0x00002000
913 #define MCX_CAP_DEVICE_WQ_SIGNATURE	0x00000800
914 #define MCX_CAP_DEVICE_SCTR_DATA_CQE	0x00000400
915 #define MCX_CAP_DEVICE_SHO		0x00000100
916 #define MCX_CAP_DEVICE_TPH		0x00000080
917 #define MCX_CAP_DEVICE_RF		0x00000040
918 #define MCX_CAP_DEVICE_DCT		0x00000020
919 #define MCX_CAP_DEVICE_QOS		0x00000010
920 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS	0x00000008
921 #define MCX_CAP_DEVICE_ROCE		0x00000004
922 #define MCX_CAP_DEVICE_ATOMIC		0x00000002
923 
924 	uint32_t		flags6;
925 #define MCX_CAP_DEVICE_CQ_OI		0x80000000
926 #define MCX_CAP_DEVICE_CQ_RESIZE	0x40000000
927 #define MCX_CAP_DEVICE_CQ_MODERATION	0x20000000
928 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
929 					0x10000000
930 #define MCX_CAP_DEVICE_CQ_INVALIDATE	0x08000000
931 #define MCX_CAP_DEVICE_RESERVED_AT_255	0x04000000
932 #define MCX_CAP_DEVICE_CQ_EQ_REMAP	0x02000000
933 #define MCX_CAP_DEVICE_PG		0x01000000
934 #define MCX_CAP_DEVICE_BLOCK_LB_MC	0x00800000
935 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
936 					0x00400000
937 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
938 					0x00200000
939 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
940 					0x00100000
941 #define MCX_CAP_DEVICE_CD		0x00080000
942 #define MCX_CAP_DEVICE_ATM		0x00040000
943 #define MCX_CAP_DEVICE_APM		0x00020000
944 #define MCX_CAP_DEVICE_IMAICL		0x00010000
945 #define MCX_CAP_DEVICE_QKV		0x00000200
946 #define MCX_CAP_DEVICE_PKV		0x00000100
947 #define MCX_CAP_DEVICE_SET_DETH_SQPN	0x00000080
948 #define MCX_CAP_DEVICE_XRC		0x00000008
949 #define MCX_CAP_DEVICE_UD		0x00000004
950 #define MCX_CAP_DEVICE_UC		0x00000002
951 #define MCX_CAP_DEVICE_RC		0x00000001
952 
953 	uint8_t			uar_flags;
954 #define MCX_CAP_DEVICE_UAR_4K		0x80
955 	uint8_t			uar_sz;	/* 6 bits */
956 #define MCX_CAP_DEVICE_UAR_SZ		0x3f
957 	uint8_t			__reserved__[1];
958 	uint8_t			log_pg_sz;
959 
960 	uint8_t			flags7;
961 #define MCX_CAP_DEVICE_BF		0x80
962 #define MCX_CAP_DEVICE_DRIVER_VERSION	0x40
963 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
964 					0x20
965 	uint8_t			log_bf_reg_size; /* 5 bits */
966 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE	0x1f
967 	uint8_t			__reserved__[2];
968 
969 	uint16_t		num_of_diagnostic_counters;
970 	uint16_t		max_wqe_sz_sq;
971 
972 	uint8_t			__reserved__[2];
973 	uint16_t		max_wqe_sz_rq;
974 
975 	uint8_t			__reserved__[2];
976 	uint16_t		max_wqe_sz_sq_dc;
977 
978 	uint32_t		max_qp_mcg; /* 25 bits */
979 #define MCX_CAP_DEVICE_MAX_QP_MCG	0x1ffffff
980 
981 	uint8_t			__reserved__[3];
982 	uint8_t			log_max_mcq;
983 
984 	uint8_t			log_max_transport_domain; /* 5 bits */
985 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
986 					0x1f
987 	uint8_t			log_max_pd; /* 5 bits */
988 #define MCX_CAP_DEVICE_LOG_MAX_PD	0x1f
989 	uint8_t			__reserved__[1];
990 	uint8_t			log_max_xrcd; /* 5 bits */
991 #define MCX_CAP_DEVICE_LOG_MAX_XRCD	0x1f
992 
993 	uint8_t			__reserved__[2];
994 	uint16_t		max_flow_counter;
995 
996 	uint8_t			log_max_rq; /* 5 bits */
997 #define MCX_CAP_DEVICE_LOG_MAX_RQ	0x1f
998 	uint8_t			log_max_sq; /* 5 bits */
999 #define MCX_CAP_DEVICE_LOG_MAX_SQ	0x1f
1000 	uint8_t			log_max_tir; /* 5 bits */
1001 #define MCX_CAP_DEVICE_LOG_MAX_TIR	0x1f
1002 	uint8_t			log_max_tis; /* 5 bits */
1003 #define MCX_CAP_DEVICE_LOG_MAX_TIS	0x1f
1004 
1005 	uint8_t 		flags8;
1006 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
1007 					0x80
1008 #define MCX_CAP_DEVICE_LOG_MAX_RMP	0x1f
1009 	uint8_t			log_max_rqt; /* 5 bits */
1010 #define MCX_CAP_DEVICE_LOG_MAX_RQT	0x1f
1011 	uint8_t			log_max_rqt_size; /* 5 bits */
1012 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE	0x1f
1013 	uint8_t			log_max_tis_per_sq; /* 5 bits */
1014 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
1015 					0x1f
1016 
1017 	uint8_t			flags9;
1018 #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES \
1019 					0x80
1020 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ \
1021 					0x1f
1022 	uint8_t			log_min_stride_sz_rq; /* 5 bits */
1023 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ \
1024 					0x1f
1025 	uint8_t			log_max_stride_sz_sq; /* 5 bits */
1026 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ \
1027 					0x1f
1028 	uint8_t			log_min_stride_sz_sq; /* 5 bits */
1029 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ \
1030 					0x1f
1031 
1032 	uint8_t			log_max_hairpin_queues;
1033 #define MXC_CAP_DEVICE_HAIRPIN		0x80
1034 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES \
1035 					0x1f
1036 	uint8_t			log_min_hairpin_queues;
1037 #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES \
1038 					0x1f
1039 	uint8_t			log_max_hairpin_num_packets;
1040 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS \
1041 					0x1f
1042 	uint8_t			log_max_mq_sz;
1043 #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ \
1044 					0x1f
1045 
1046 	uint8_t			log_min_hairpin_wq_data_sz;
1047 #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT \
1048 					0x80
1049 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC \
1050 					0x40
1051 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC \
1052 					0x20
1053 #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ \
1054 					0x1f
1055 	uint8_t			log_max_vlan_list;
1056 #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE \
1057 					0x80
1058 #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST \
1059 					0x1f
1060 	uint8_t			log_max_current_mc_list;
1061 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST \
1062 					0x1f
1063 	uint8_t			log_max_current_uc_list;
1064 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST \
1065 					0x1f
1066 
1067 	uint8_t			__reserved__[4];
1068 
1069 	uint32_t		create_qp_start_hint; /* 24 bits */
1070 
1071 	uint8_t			log_max_uctx; /* 5 bits */
1072 #define MXC_CAP_DEVICE_LOG_MAX_UCTX	0x1f
1073 	uint8_t			log_max_umem; /* 5 bits */
1074 #define MXC_CAP_DEVICE_LOG_MAX_UMEM	0x1f
1075 	uint16_t		max_num_eqs;
1076 
1077 	uint8_t			log_max_l2_table; /* 5 bits */
1078 #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE	0x1f
1079 	uint8_t			__reserved__[1];
1080 	uint16_t		log_uar_page_sz;
1081 
1082 	uint8_t			__reserved__[8];
1083 
1084 	uint32_t		device_frequency_mhz;
1085 	uint32_t		device_frequency_khz;
1086 } __packed __aligned(8);
1087 
1088 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
1089 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
1090 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
1091 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
1092 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
1093 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
1094 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98);
1095 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c);
1096 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
1097 
1098 struct mcx_cmd_set_driver_version_in {
1099 	uint16_t		cmd_opcode;
1100 	uint8_t			cmd_reserved0[4];
1101 	uint16_t		cmd_op_mod;
1102 	uint8_t			cmd_reserved1[8];
1103 } __packed __aligned(4);
1104 
1105 struct mcx_cmd_set_driver_version_out {
1106 	uint8_t			cmd_status;
1107 	uint8_t			cmd_reserved0[3];
1108 	uint32_t		cmd_syndrome;
1109 	uint8_t			cmd_reserved1[8];
1110 } __packed __aligned(4);
1111 
1112 struct mcx_cmd_set_driver_version {
1113 	uint8_t			cmd_driver_version[64];
1114 } __packed __aligned(8);
1115 
1116 struct mcx_cmd_modify_nic_vport_context_in {
1117 	uint16_t		cmd_opcode;
1118 	uint8_t			cmd_reserved0[4];
1119 	uint16_t		cmd_op_mod;
1120 	uint8_t			cmd_reserved1[4];
1121 	uint32_t		cmd_field_select;
1122 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR	0x04
1123 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC	0x10
1124 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU	0x40
1125 } __packed __aligned(4);
1126 
1127 struct mcx_cmd_modify_nic_vport_context_out {
1128 	uint8_t			cmd_status;
1129 	uint8_t			cmd_reserved0[3];
1130 	uint32_t		cmd_syndrome;
1131 	uint8_t			cmd_reserved1[8];
1132 } __packed __aligned(4);
1133 
1134 struct mcx_cmd_query_nic_vport_context_in {
1135 	uint16_t		cmd_opcode;
1136 	uint8_t			cmd_reserved0[4];
1137 	uint16_t		cmd_op_mod;
1138 	uint8_t			cmd_reserved1[4];
1139 	uint8_t			cmd_allowed_list_type;
1140 	uint8_t			cmd_reserved2[3];
1141 } __packed __aligned(4);
1142 
1143 struct mcx_cmd_query_nic_vport_context_out {
1144 	uint8_t			cmd_status;
1145 	uint8_t			cmd_reserved0[3];
1146 	uint32_t		cmd_syndrome;
1147 	uint8_t			cmd_reserved1[8];
1148 } __packed __aligned(4);
1149 
1150 struct mcx_nic_vport_ctx {
1151 	uint32_t		vp_min_wqe_inline_mode;
1152 	uint8_t			vp_reserved0[32];
1153 	uint32_t		vp_mtu;
1154 	uint8_t			vp_reserved1[200];
1155 	uint16_t		vp_flags;
1156 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC			(0)
1157 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC			(1 << 24)
1158 #define MCX_NIC_VPORT_CTX_LIST_VLAN			(2 << 24)
1159 #define MCX_NIC_VPORT_CTX_PROMISC_ALL			(1 << 13)
1160 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST			(1 << 14)
1161 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST			(1 << 15)
1162 	uint16_t		vp_allowed_list_size;
1163 	uint64_t		vp_perm_addr;
1164 	uint8_t			vp_reserved2[4];
1165 	/* allowed list follows */
1166 } __packed __aligned(4);
1167 
1168 struct mcx_counter {
1169 	uint64_t		packets;
1170 	uint64_t		octets;
1171 } __packed __aligned(4);
1172 
1173 struct mcx_nic_vport_counters {
1174 	struct mcx_counter	rx_err;
1175 	struct mcx_counter	tx_err;
1176 	uint8_t			reserved0[64]; /* 0x30 */
1177 	struct mcx_counter	rx_bcast;
1178 	struct mcx_counter	tx_bcast;
1179 	struct mcx_counter	rx_ucast;
1180 	struct mcx_counter	tx_ucast;
1181 	struct mcx_counter	rx_mcast;
1182 	struct mcx_counter	tx_mcast;
1183 	uint8_t			reserved1[0x210 - 0xd0];
1184 } __packed __aligned(4);
1185 
1186 struct mcx_cmd_query_vport_counters_in {
1187 	uint16_t		cmd_opcode;
1188 	uint8_t			cmd_reserved0[4];
1189 	uint16_t		cmd_op_mod;
1190 	uint8_t			cmd_reserved1[8];
1191 } __packed __aligned(4);
1192 
1193 struct mcx_cmd_query_vport_counters_mb_in {
1194 	uint8_t			cmd_reserved0[8];
1195 	uint8_t			cmd_clear;
1196 	uint8_t			cmd_reserved1[7];
1197 } __packed __aligned(4);
1198 
1199 struct mcx_cmd_query_vport_counters_out {
1200 	uint8_t			cmd_status;
1201 	uint8_t			cmd_reserved0[3];
1202 	uint32_t		cmd_syndrome;
1203 	uint8_t			cmd_reserved1[8];
1204 } __packed __aligned(4);
1205 
1206 struct mcx_cmd_query_flow_counter_in {
1207 	uint16_t		cmd_opcode;
1208 	uint8_t			cmd_reserved0[4];
1209 	uint16_t		cmd_op_mod;
1210 	uint8_t			cmd_reserved1[8];
1211 } __packed __aligned(4);
1212 
1213 struct mcx_cmd_query_flow_counter_mb_in {
1214 	uint8_t			cmd_reserved0[8];
1215 	uint8_t			cmd_clear;
1216 	uint8_t			cmd_reserved1[5];
1217 	uint16_t		cmd_flow_counter_id;
1218 } __packed __aligned(4);
1219 
1220 struct mcx_cmd_query_flow_counter_out {
1221 	uint8_t			cmd_status;
1222 	uint8_t			cmd_reserved0[3];
1223 	uint32_t		cmd_syndrome;
1224 	uint8_t			cmd_reserved1[8];
1225 } __packed __aligned(4);
1226 
1227 struct mcx_cmd_alloc_uar_in {
1228 	uint16_t		cmd_opcode;
1229 	uint8_t			cmd_reserved0[4];
1230 	uint16_t		cmd_op_mod;
1231 	uint8_t			cmd_reserved1[8];
1232 } __packed __aligned(4);
1233 
1234 struct mcx_cmd_alloc_uar_out {
1235 	uint8_t			cmd_status;
1236 	uint8_t			cmd_reserved0[3];
1237 	uint32_t		cmd_syndrome;
1238 	uint32_t		cmd_uar;
1239 	uint8_t			cmd_reserved1[4];
1240 } __packed __aligned(4);
1241 
1242 struct mcx_cmd_query_special_ctx_in {
1243 	uint16_t		cmd_opcode;
1244 	uint8_t			cmd_reserved0[4];
1245 	uint16_t		cmd_op_mod;
1246 	uint8_t			cmd_reserved1[8];
1247 } __packed __aligned(4);
1248 
1249 struct mcx_cmd_query_special_ctx_out {
1250 	uint8_t			cmd_status;
1251 	uint8_t			cmd_reserved0[3];
1252 	uint32_t		cmd_syndrome;
1253 	uint8_t			cmd_reserved1[4];
1254 	uint32_t		cmd_resd_lkey;
1255 } __packed __aligned(4);
1256 
1257 struct mcx_eq_ctx {
1258 	uint32_t		eq_status;
1259 #define MCX_EQ_CTX_STATE_SHIFT		8
1260 #define MCX_EQ_CTX_STATE_MASK		(0xf << MCX_EQ_CTX_STATE_SHIFT)
1261 #define MCX_EQ_CTX_STATE_ARMED		0x9
1262 #define MCX_EQ_CTX_STATE_FIRED		0xa
1263 #define MCX_EQ_CTX_OI_SHIFT		17
1264 #define MCX_EQ_CTX_OI			(1 << MCX_EQ_CTX_OI_SHIFT)
1265 #define MCX_EQ_CTX_EC_SHIFT		18
1266 #define MCX_EQ_CTX_EC			(1 << MCX_EQ_CTX_EC_SHIFT)
1267 #define MCX_EQ_CTX_STATUS_SHIFT		28
1268 #define MCX_EQ_CTX_STATUS_MASK		(0xf << MCX_EQ_CTX_STATUS_SHIFT)
1269 #define MCX_EQ_CTX_STATUS_OK		0x0
1270 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE 0xa
1271 	uint32_t		eq_reserved1;
1272 	uint32_t		eq_page_offset;
1273 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT	5
1274 	uint32_t		eq_uar_size;
1275 #define MCX_EQ_CTX_UAR_PAGE_MASK	0xffffff
1276 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT	24
1277 	uint32_t		eq_reserved2;
1278 	uint8_t			eq_reserved3[3];
1279 	uint8_t			eq_intr;
1280 	uint32_t		eq_log_page_size;
1281 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1282 	uint32_t		eq_reserved4[3];
1283 	uint32_t		eq_consumer_counter;
1284 	uint32_t		eq_producer_counter;
1285 #define MCX_EQ_CTX_COUNTER_MASK		0xffffff
1286 	uint32_t		eq_reserved5[4];
1287 } __packed __aligned(4);
1288 
1289 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1290 
1291 struct mcx_cmd_create_eq_in {
1292 	uint16_t		cmd_opcode;
1293 	uint8_t			cmd_reserved0[4];
1294 	uint16_t		cmd_op_mod;
1295 	uint8_t			cmd_reserved1[8];
1296 } __packed __aligned(4);
1297 
1298 struct mcx_cmd_create_eq_mb_in {
1299 	struct mcx_eq_ctx	cmd_eq_ctx;
1300 	uint8_t			cmd_reserved0[8];
1301 	uint64_t		cmd_event_bitmask;
1302 #define MCX_EVENT_TYPE_COMPLETION	0x00
1303 #define MCX_EVENT_TYPE_CQ_ERROR		0x04
1304 #define MCX_EVENT_TYPE_INTERNAL_ERROR	0x08
1305 #define MCX_EVENT_TYPE_PORT_CHANGE	0x09
1306 #define MCX_EVENT_TYPE_CMD_COMPLETION	0x0a
1307 #define MCX_EVENT_TYPE_PAGE_REQUEST	0x0b
1308 #define MCX_EVENT_TYPE_LAST_WQE		0x13
1309 	uint8_t			cmd_reserved1[176];
1310 } __packed __aligned(4);
1311 
1312 struct mcx_cmd_create_eq_out {
1313 	uint8_t			cmd_status;
1314 	uint8_t			cmd_reserved0[3];
1315 	uint32_t		cmd_syndrome;
1316 	uint32_t		cmd_eqn;
1317 	uint8_t			cmd_reserved1[4];
1318 } __packed __aligned(4);
1319 
1320 struct mcx_cmd_query_eq_in {
1321 	uint16_t		cmd_opcode;
1322 	uint8_t			cmd_reserved0[4];
1323 	uint16_t		cmd_op_mod;
1324 	uint32_t		cmd_eqn;
1325 	uint8_t			cmd_reserved1[4];
1326 } __packed __aligned(4);
1327 
1328 struct mcx_cmd_query_eq_out {
1329 	uint8_t			cmd_status;
1330 	uint8_t			cmd_reserved0[3];
1331 	uint32_t		cmd_syndrome;
1332 	uint8_t			cmd_reserved1[8];
1333 } __packed __aligned(4);
1334 
1335 struct mcx_eq_entry {
1336 	uint8_t			eq_reserved1;
1337 	uint8_t			eq_event_type;
1338 	uint8_t			eq_reserved2;
1339 	uint8_t			eq_event_sub_type;
1340 
1341 	uint8_t			eq_reserved3[28];
1342 	uint32_t		eq_event_data[7];
1343 	uint8_t			eq_reserved4[2];
1344 	uint8_t			eq_signature;
1345 	uint8_t			eq_owner;
1346 #define MCX_EQ_ENTRY_OWNER_INIT			1
1347 } __packed __aligned(4);
1348 
1349 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1350 
1351 struct mcx_cmd_alloc_pd_in {
1352 	uint16_t		cmd_opcode;
1353 	uint8_t			cmd_reserved0[4];
1354 	uint16_t		cmd_op_mod;
1355 	uint8_t			cmd_reserved1[8];
1356 } __packed __aligned(4);
1357 
1358 struct mcx_cmd_alloc_pd_out {
1359 	uint8_t			cmd_status;
1360 	uint8_t			cmd_reserved0[3];
1361 	uint32_t		cmd_syndrome;
1362 	uint32_t		cmd_pd;
1363 	uint8_t			cmd_reserved1[4];
1364 } __packed __aligned(4);
1365 
1366 struct mcx_cmd_alloc_td_in {
1367 	uint16_t		cmd_opcode;
1368 	uint8_t			cmd_reserved0[4];
1369 	uint16_t		cmd_op_mod;
1370 	uint8_t			cmd_reserved1[8];
1371 } __packed __aligned(4);
1372 
1373 struct mcx_cmd_alloc_td_out {
1374 	uint8_t			cmd_status;
1375 	uint8_t			cmd_reserved0[3];
1376 	uint32_t		cmd_syndrome;
1377 	uint32_t		cmd_tdomain;
1378 	uint8_t			cmd_reserved1[4];
1379 } __packed __aligned(4);
1380 
1381 struct mcx_cmd_create_tir_in {
1382 	uint16_t		cmd_opcode;
1383 	uint8_t			cmd_reserved0[4];
1384 	uint16_t		cmd_op_mod;
1385 	uint8_t			cmd_reserved1[8];
1386 } __packed __aligned(4);
1387 
1388 struct mcx_cmd_create_tir_mb_in {
1389 	uint8_t			cmd_reserved0[20];
1390 	uint32_t		cmd_disp_type;
1391 #define MCX_TIR_CTX_DISP_TYPE_DIRECT	0
1392 #define MCX_TIR_CTX_DISP_TYPE_INDIRECT	1
1393 #define MCX_TIR_CTX_DISP_TYPE_SHIFT	28
1394 	uint8_t			cmd_reserved1[8];
1395 	uint32_t		cmd_lro;
1396 	uint8_t			cmd_reserved2[8];
1397 	uint32_t		cmd_inline_rqn;
1398 	uint32_t		cmd_indir_table;
1399 	uint32_t		cmd_tdomain;
1400 #define MCX_TIR_CTX_HASH_TOEPLITZ	2
1401 #define MCX_TIR_CTX_HASH_SHIFT		28
1402 	uint8_t			cmd_rx_hash_key[40];
1403 	uint32_t		cmd_rx_hash_sel_outer;
1404 #define MCX_TIR_CTX_HASH_SEL_SRC_IP	(1 << 0)
1405 #define MCX_TIR_CTX_HASH_SEL_DST_IP	(1 << 1)
1406 #define MCX_TIR_CTX_HASH_SEL_SPORT	(1 << 2)
1407 #define MCX_TIR_CTX_HASH_SEL_DPORT	(1 << 3)
1408 #define MCX_TIR_CTX_HASH_SEL_IPV4	(0 << 31)
1409 #define MCX_TIR_CTX_HASH_SEL_IPV6	(1U << 31)
1410 #define MCX_TIR_CTX_HASH_SEL_TCP	(0 << 30)
1411 #define MCX_TIR_CTX_HASH_SEL_UDP	(1 << 30)
1412 	uint32_t		cmd_rx_hash_sel_inner;
1413 	uint8_t			cmd_reserved3[152];
1414 } __packed __aligned(4);
1415 
1416 struct mcx_cmd_create_tir_out {
1417 	uint8_t			cmd_status;
1418 	uint8_t			cmd_reserved0[3];
1419 	uint32_t		cmd_syndrome;
1420 	uint32_t		cmd_tirn;
1421 	uint8_t			cmd_reserved1[4];
1422 } __packed __aligned(4);
1423 
1424 struct mcx_cmd_destroy_tir_in {
1425 	uint16_t		cmd_opcode;
1426 	uint8_t			cmd_reserved0[4];
1427 	uint16_t		cmd_op_mod;
1428 	uint32_t		cmd_tirn;
1429 	uint8_t			cmd_reserved1[4];
1430 } __packed __aligned(4);
1431 
1432 struct mcx_cmd_destroy_tir_out {
1433 	uint8_t			cmd_status;
1434 	uint8_t			cmd_reserved0[3];
1435 	uint32_t		cmd_syndrome;
1436 	uint8_t			cmd_reserved1[8];
1437 } __packed __aligned(4);
1438 
1439 struct mcx_cmd_create_tis_in {
1440 	uint16_t		cmd_opcode;
1441 	uint8_t			cmd_reserved0[4];
1442 	uint16_t		cmd_op_mod;
1443 	uint8_t			cmd_reserved1[8];
1444 } __packed __aligned(4);
1445 
1446 struct mcx_cmd_create_tis_mb_in {
1447 	uint8_t			cmd_reserved[16];
1448 	uint32_t		cmd_prio;
1449 	uint8_t			cmd_reserved1[32];
1450 	uint32_t		cmd_tdomain;
1451 	uint8_t			cmd_reserved2[120];
1452 } __packed __aligned(4);
1453 
1454 struct mcx_cmd_create_tis_out {
1455 	uint8_t			cmd_status;
1456 	uint8_t			cmd_reserved0[3];
1457 	uint32_t		cmd_syndrome;
1458 	uint32_t		cmd_tisn;
1459 	uint8_t			cmd_reserved1[4];
1460 } __packed __aligned(4);
1461 
1462 struct mcx_cmd_destroy_tis_in {
1463 	uint16_t		cmd_opcode;
1464 	uint8_t			cmd_reserved0[4];
1465 	uint16_t		cmd_op_mod;
1466 	uint32_t		cmd_tisn;
1467 	uint8_t			cmd_reserved1[4];
1468 } __packed __aligned(4);
1469 
1470 struct mcx_cmd_destroy_tis_out {
1471 	uint8_t			cmd_status;
1472 	uint8_t			cmd_reserved0[3];
1473 	uint32_t		cmd_syndrome;
1474 	uint8_t			cmd_reserved1[8];
1475 } __packed __aligned(4);
1476 
1477 struct mcx_cmd_create_rqt_in {
1478 	uint16_t		cmd_opcode;
1479 	uint8_t			cmd_reserved0[4];
1480 	uint16_t		cmd_op_mod;
1481 	uint8_t			cmd_reserved1[8];
1482 } __packed __aligned(4);
1483 
1484 struct mcx_rqt_ctx {
1485 	uint8_t			cmd_reserved0[20];
1486 	uint16_t		cmd_reserved1;
1487 	uint16_t		cmd_rqt_max_size;
1488 	uint16_t		cmd_reserved2;
1489 	uint16_t		cmd_rqt_actual_size;
1490 	uint8_t			cmd_reserved3[212];
1491 } __packed __aligned(4);
1492 
1493 struct mcx_cmd_create_rqt_mb_in {
1494 	uint8_t			cmd_reserved0[16];
1495 	struct mcx_rqt_ctx	cmd_rqt;
1496 } __packed __aligned(4);
1497 
1498 struct mcx_cmd_create_rqt_out {
1499 	uint8_t			cmd_status;
1500 	uint8_t			cmd_reserved0[3];
1501 	uint32_t		cmd_syndrome;
1502 	uint32_t		cmd_rqtn;
1503 	uint8_t			cmd_reserved1[4];
1504 } __packed __aligned(4);
1505 
1506 struct mcx_cmd_destroy_rqt_in {
1507 	uint16_t		cmd_opcode;
1508 	uint8_t			cmd_reserved0[4];
1509 	uint16_t		cmd_op_mod;
1510 	uint32_t		cmd_rqtn;
1511 	uint8_t			cmd_reserved1[4];
1512 } __packed __aligned(4);
1513 
1514 struct mcx_cmd_destroy_rqt_out {
1515 	uint8_t			cmd_status;
1516 	uint8_t			cmd_reserved0[3];
1517 	uint32_t		cmd_syndrome;
1518 	uint8_t			cmd_reserved1[8];
1519 } __packed __aligned(4);
1520 
1521 struct mcx_cq_ctx {
1522 	uint32_t		cq_status;
1523 #define MCX_CQ_CTX_STATUS_SHIFT		28
1524 #define MCX_CQ_CTX_STATUS_MASK		(0xf << MCX_CQ_CTX_STATUS_SHIFT)
1525 #define MCX_CQ_CTX_STATUS_OK		0x0
1526 #define MCX_CQ_CTX_STATUS_OVERFLOW	0x9
1527 #define MCX_CQ_CTX_STATUS_WRITE_FAIL	0xa
1528 #define MCX_CQ_CTX_STATE_SHIFT		8
1529 #define MCX_CQ_CTX_STATE_MASK		(0xf << MCX_CQ_CTX_STATE_SHIFT)
1530 #define MCX_CQ_CTX_STATE_SOLICITED	0x6
1531 #define MCX_CQ_CTX_STATE_ARMED		0x9
1532 #define MCX_CQ_CTX_STATE_FIRED		0xa
1533 	uint32_t		cq_reserved1;
1534 	uint32_t		cq_page_offset;
1535 	uint32_t		cq_uar_size;
1536 #define MCX_CQ_CTX_UAR_PAGE_MASK	0xffffff
1537 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT	24
1538 	uint32_t		cq_period_max_count;
1539 #define MCX_CQ_CTX_PERIOD_SHIFT		16
1540 	uint32_t		cq_eqn;
1541 	uint32_t		cq_log_page_size;
1542 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1543 	uint32_t		cq_reserved2;
1544 	uint32_t		cq_last_notified;
1545 	uint32_t		cq_last_solicit;
1546 	uint32_t		cq_consumer_counter;
1547 	uint32_t		cq_producer_counter;
1548 	uint8_t			cq_reserved3[8];
1549 	uint64_t		cq_doorbell;
1550 } __packed __aligned(4);
1551 
1552 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1553 
1554 struct mcx_cmd_create_cq_in {
1555 	uint16_t		cmd_opcode;
1556 	uint8_t			cmd_reserved0[4];
1557 	uint16_t		cmd_op_mod;
1558 	uint8_t			cmd_reserved1[8];
1559 } __packed __aligned(4);
1560 
1561 struct mcx_cmd_create_cq_mb_in {
1562 	struct mcx_cq_ctx	cmd_cq_ctx;
1563 	uint8_t			cmd_reserved1[192];
1564 } __packed __aligned(4);
1565 
1566 struct mcx_cmd_create_cq_out {
1567 	uint8_t			cmd_status;
1568 	uint8_t			cmd_reserved0[3];
1569 	uint32_t		cmd_syndrome;
1570 	uint32_t		cmd_cqn;
1571 	uint8_t			cmd_reserved1[4];
1572 } __packed __aligned(4);
1573 
1574 struct mcx_cmd_destroy_cq_in {
1575 	uint16_t		cmd_opcode;
1576 	uint8_t			cmd_reserved0[4];
1577 	uint16_t		cmd_op_mod;
1578 	uint32_t		cmd_cqn;
1579 	uint8_t			cmd_reserved1[4];
1580 } __packed __aligned(4);
1581 
1582 struct mcx_cmd_destroy_cq_out {
1583 	uint8_t			cmd_status;
1584 	uint8_t			cmd_reserved0[3];
1585 	uint32_t		cmd_syndrome;
1586 	uint8_t			cmd_reserved1[8];
1587 } __packed __aligned(4);
1588 
1589 struct mcx_cmd_query_cq_in {
1590 	uint16_t		cmd_opcode;
1591 	uint8_t			cmd_reserved0[4];
1592 	uint16_t		cmd_op_mod;
1593 	uint32_t		cmd_cqn;
1594 	uint8_t			cmd_reserved1[4];
1595 } __packed __aligned(4);
1596 
1597 struct mcx_cmd_query_cq_out {
1598 	uint8_t			cmd_status;
1599 	uint8_t			cmd_reserved0[3];
1600 	uint32_t		cmd_syndrome;
1601 	uint8_t			cmd_reserved1[8];
1602 } __packed __aligned(4);
1603 
1604 struct mcx_cq_entry {
1605 	uint32_t		__reserved__;
1606 	uint32_t		cq_lro;
1607 	uint32_t		cq_lro_ack_seq_num;
1608 	uint32_t		cq_rx_hash;
1609 	uint8_t			cq_rx_hash_type;
1610 	uint8_t			cq_ml_path;
1611 	uint16_t		__reserved__;
1612 	uint32_t		cq_checksum;
1613 	uint32_t		__reserved__;
1614 	uint32_t		cq_flags;
1615 #define MCX_CQ_ENTRY_FLAGS_L4_OK		(1 << 26)
1616 #define MCX_CQ_ENTRY_FLAGS_L3_OK		(1 << 25)
1617 #define MCX_CQ_ENTRY_FLAGS_L2_OK		(1 << 24)
1618 #define MCX_CQ_ENTRY_FLAGS_CV			(1 << 16)
1619 #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK		(0xffff)
1620 
1621 	uint32_t		cq_lro_srqn;
1622 	uint32_t		__reserved__[2];
1623 	uint32_t		cq_byte_cnt;
1624 	uint64_t		cq_timestamp;
1625 	uint8_t			cq_rx_drops;
1626 	uint8_t			cq_flow_tag[3];
1627 	uint16_t		cq_wqe_count;
1628 	uint8_t			cq_signature;
1629 	uint8_t			cq_opcode_owner;
1630 #define MCX_CQ_ENTRY_FLAG_OWNER			(1 << 0)
1631 #define MCX_CQ_ENTRY_FLAG_SE			(1 << 1)
1632 #define MCX_CQ_ENTRY_FORMAT_SHIFT		2
1633 #define MCX_CQ_ENTRY_OPCODE_SHIFT		4
1634 
1635 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE		0
1636 #define MCX_CQ_ENTRY_FORMAT_INLINE_32		1
1637 #define MCX_CQ_ENTRY_FORMAT_INLINE_64		2
1638 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED		3
1639 
1640 #define MCX_CQ_ENTRY_OPCODE_REQ			0
1641 #define MCX_CQ_ENTRY_OPCODE_SEND		2
1642 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR		13
1643 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR		14
1644 #define MCX_CQ_ENTRY_OPCODE_INVALID		15
1645 
1646 } __packed __aligned(4);
1647 
1648 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1649 
1650 struct mcx_cq_doorbell {
1651 	uint32_t		 db_update_ci;
1652 	uint32_t		 db_arm_ci;
1653 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT	28
1654 #define MCX_CQ_DOORBELL_ARM_CMD			(1 << 24)
1655 #define MCX_CQ_DOORBELL_ARM_CI_MASK		(0xffffff)
1656 } __packed __aligned(8);
1657 
1658 struct mcx_wq_ctx {
1659 	uint8_t			 wq_type;
1660 #define MCX_WQ_CTX_TYPE_CYCLIC			(1 << 4)
1661 #define MCX_WQ_CTX_TYPE_SIGNATURE		(1 << 3)
1662 	uint8_t			 wq_reserved0[5];
1663 	uint16_t		 wq_lwm;
1664 	uint32_t		 wq_pd;
1665 	uint32_t		 wq_uar_page;
1666 	uint64_t		 wq_doorbell;
1667 	uint32_t		 wq_hw_counter;
1668 	uint32_t		 wq_sw_counter;
1669 	uint16_t		 wq_log_stride;
1670 	uint8_t			 wq_log_page_sz;
1671 	uint8_t			 wq_log_size;
1672 	uint8_t			 wq_reserved1[156];
1673 } __packed __aligned(4);
1674 
1675 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1676 
1677 struct mcx_sq_ctx {
1678 	uint32_t		sq_flags;
1679 #define MCX_SQ_CTX_RLKEY			(1U << 31)
1680 #define MCX_SQ_CTX_FRE_SHIFT			(1 << 29)
1681 #define MCX_SQ_CTX_FLUSH_IN_ERROR		(1 << 28)
1682 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT		24
1683 #define MCX_SQ_CTX_STATE_SHIFT			20
1684 #define MCX_SQ_CTX_STATE_MASK			(0xf << 20)
1685 #define MCX_SQ_CTX_STATE_RST			0
1686 #define MCX_SQ_CTX_STATE_RDY			1
1687 #define MCX_SQ_CTX_STATE_ERR			3
1688 	uint32_t		sq_user_index;
1689 	uint32_t		sq_cqn;
1690 	uint32_t		sq_reserved1[5];
1691 	uint32_t		sq_tis_lst_sz;
1692 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT		16
1693 	uint32_t		sq_reserved2[2];
1694 	uint32_t		sq_tis_num;
1695 	struct mcx_wq_ctx	sq_wq;
1696 } __packed __aligned(4);
1697 
1698 struct mcx_sq_entry_seg {
1699 	uint32_t		sqs_byte_count;
1700 	uint32_t		sqs_lkey;
1701 	uint64_t		sqs_addr;
1702 } __packed __aligned(4);
1703 
1704 struct mcx_sq_entry {
1705 	/* control segment */
1706 	uint32_t		sqe_opcode_index;
1707 #define MCX_SQE_WQE_INDEX_SHIFT			8
1708 #define MCX_SQE_WQE_OPCODE_NOP			0x00
1709 #define MCX_SQE_WQE_OPCODE_SEND			0x0a
1710 	uint32_t		sqe_ds_sq_num;
1711 #define MCX_SQE_SQ_NUM_SHIFT			8
1712 	uint32_t		sqe_signature;
1713 #define MCX_SQE_SIGNATURE_SHIFT			24
1714 #define MCX_SQE_SOLICITED_EVENT			0x02
1715 #define MCX_SQE_CE_CQE_ON_ERR			0x00
1716 #define MCX_SQE_CE_CQE_FIRST_ERR		0x04
1717 #define MCX_SQE_CE_CQE_ALWAYS			0x08
1718 #define MCX_SQE_CE_CQE_SOLICIT			0x0C
1719 #define MCX_SQE_FM_NO_FENCE			0x00
1720 #define MCX_SQE_FM_SMALL_FENCE			0x40
1721 	uint32_t		sqe_mkey;
1722 
1723 	/* ethernet segment */
1724 	uint32_t		sqe_reserved1;
1725 	uint32_t		sqe_mss_csum;
1726 #define MCX_SQE_L4_CSUM				(1U << 31)
1727 #define MCX_SQE_L3_CSUM				(1 << 30)
1728 	uint32_t		sqe_reserved2;
1729 	uint16_t		sqe_inline_header_size;
1730 	uint16_t		sqe_inline_headers[9];
1731 
1732 	/* data segment */
1733 	struct mcx_sq_entry_seg sqe_segs[1];
1734 } __packed __aligned(64);
1735 
1736 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1737 
1738 struct mcx_cmd_create_sq_in {
1739 	uint16_t		cmd_opcode;
1740 	uint8_t			cmd_reserved0[4];
1741 	uint16_t		cmd_op_mod;
1742 	uint8_t			cmd_reserved1[8];
1743 } __packed __aligned(4);
1744 
1745 struct mcx_cmd_create_sq_out {
1746 	uint8_t			cmd_status;
1747 	uint8_t			cmd_reserved0[3];
1748 	uint32_t		cmd_syndrome;
1749 	uint32_t		cmd_sqn;
1750 	uint8_t			cmd_reserved1[4];
1751 } __packed __aligned(4);
1752 
1753 struct mcx_cmd_modify_sq_in {
1754 	uint16_t		cmd_opcode;
1755 	uint8_t			cmd_reserved0[4];
1756 	uint16_t		cmd_op_mod;
1757 	uint32_t		cmd_sq_state;
1758 	uint8_t			cmd_reserved1[4];
1759 } __packed __aligned(4);
1760 
1761 struct mcx_cmd_modify_sq_mb_in {
1762 	uint32_t		cmd_modify_hi;
1763 	uint32_t		cmd_modify_lo;
1764 	uint8_t			cmd_reserved0[8];
1765 	struct mcx_sq_ctx	cmd_sq_ctx;
1766 } __packed __aligned(4);
1767 
1768 struct mcx_cmd_modify_sq_out {
1769 	uint8_t			cmd_status;
1770 	uint8_t			cmd_reserved0[3];
1771 	uint32_t		cmd_syndrome;
1772 	uint8_t			cmd_reserved1[8];
1773 } __packed __aligned(4);
1774 
1775 struct mcx_cmd_destroy_sq_in {
1776 	uint16_t		cmd_opcode;
1777 	uint8_t			cmd_reserved0[4];
1778 	uint16_t		cmd_op_mod;
1779 	uint32_t		cmd_sqn;
1780 	uint8_t			cmd_reserved1[4];
1781 } __packed __aligned(4);
1782 
1783 struct mcx_cmd_destroy_sq_out {
1784 	uint8_t			cmd_status;
1785 	uint8_t			cmd_reserved0[3];
1786 	uint32_t		cmd_syndrome;
1787 	uint8_t			cmd_reserved1[8];
1788 } __packed __aligned(4);
1789 
1790 
1791 struct mcx_rq_ctx {
1792 	uint32_t		rq_flags;
1793 #define MCX_RQ_CTX_RLKEY			(1U << 31)
1794 #define MCX_RQ_CTX_VLAN_STRIP_DIS		(1 << 28)
1795 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT		24
1796 #define MCX_RQ_CTX_STATE_SHIFT			20
1797 #define MCX_RQ_CTX_STATE_MASK			(0xf << 20)
1798 #define MCX_RQ_CTX_STATE_RST			0
1799 #define MCX_RQ_CTX_STATE_RDY			1
1800 #define MCX_RQ_CTX_STATE_ERR			3
1801 #define MCX_RQ_CTX_FLUSH_IN_ERROR		(1 << 18)
1802 	uint32_t		rq_user_index;
1803 	uint32_t		rq_cqn;
1804 	uint32_t		rq_reserved1;
1805 	uint32_t		rq_rmpn;
1806 	uint32_t		rq_reserved2[7];
1807 	struct mcx_wq_ctx	rq_wq;
1808 } __packed __aligned(4);
1809 
1810 struct mcx_rq_entry {
1811 	uint32_t		rqe_byte_count;
1812 	uint32_t		rqe_lkey;
1813 	uint64_t		rqe_addr;
1814 } __packed __aligned(16);
1815 
1816 struct mcx_cmd_create_rq_in {
1817 	uint16_t		cmd_opcode;
1818 	uint8_t			cmd_reserved0[4];
1819 	uint16_t		cmd_op_mod;
1820 	uint8_t			cmd_reserved1[8];
1821 } __packed __aligned(4);
1822 
1823 struct mcx_cmd_create_rq_out {
1824 	uint8_t			cmd_status;
1825 	uint8_t			cmd_reserved0[3];
1826 	uint32_t		cmd_syndrome;
1827 	uint32_t		cmd_rqn;
1828 	uint8_t			cmd_reserved1[4];
1829 } __packed __aligned(4);
1830 
1831 struct mcx_cmd_modify_rq_in {
1832 	uint16_t		cmd_opcode;
1833 	uint8_t			cmd_reserved0[4];
1834 	uint16_t		cmd_op_mod;
1835 	uint32_t		cmd_rq_state;
1836 	uint8_t			cmd_reserved1[4];
1837 } __packed __aligned(4);
1838 
1839 struct mcx_cmd_modify_rq_mb_in {
1840 	uint32_t		cmd_modify_hi;
1841 	uint32_t		cmd_modify_lo;
1842 	uint8_t			cmd_reserved0[8];
1843 	struct mcx_rq_ctx	cmd_rq_ctx;
1844 } __packed __aligned(4);
1845 
1846 struct mcx_cmd_modify_rq_out {
1847 	uint8_t			cmd_status;
1848 	uint8_t			cmd_reserved0[3];
1849 	uint32_t		cmd_syndrome;
1850 	uint8_t			cmd_reserved1[8];
1851 } __packed __aligned(4);
1852 
1853 struct mcx_cmd_destroy_rq_in {
1854 	uint16_t		cmd_opcode;
1855 	uint8_t			cmd_reserved0[4];
1856 	uint16_t		cmd_op_mod;
1857 	uint32_t		cmd_rqn;
1858 	uint8_t			cmd_reserved1[4];
1859 } __packed __aligned(4);
1860 
1861 struct mcx_cmd_destroy_rq_out {
1862 	uint8_t			cmd_status;
1863 	uint8_t			cmd_reserved0[3];
1864 	uint32_t		cmd_syndrome;
1865 	uint8_t			cmd_reserved1[8];
1866 } __packed __aligned(4);
1867 
1868 struct mcx_cmd_create_flow_table_in {
1869 	uint16_t		cmd_opcode;
1870 	uint8_t			cmd_reserved0[4];
1871 	uint16_t		cmd_op_mod;
1872 	uint8_t			cmd_reserved1[8];
1873 } __packed __aligned(4);
1874 
1875 struct mcx_flow_table_ctx {
1876 	uint8_t			ft_miss_action;
1877 	uint8_t			ft_level;
1878 	uint8_t			ft_reserved0;
1879 	uint8_t			ft_log_size;
1880 	uint32_t		ft_table_miss_id;
1881 	uint8_t			ft_reserved1[28];
1882 } __packed __aligned(4);
1883 
1884 struct mcx_cmd_create_flow_table_mb_in {
1885 	uint8_t			cmd_table_type;
1886 	uint8_t			cmd_reserved0[7];
1887 	struct mcx_flow_table_ctx cmd_ctx;
1888 } __packed __aligned(4);
1889 
1890 struct mcx_cmd_create_flow_table_out {
1891 	uint8_t			cmd_status;
1892 	uint8_t			cmd_reserved0[3];
1893 	uint32_t		cmd_syndrome;
1894 	uint32_t		cmd_table_id;
1895 	uint8_t			cmd_reserved1[4];
1896 } __packed __aligned(4);
1897 
1898 struct mcx_cmd_destroy_flow_table_in {
1899 	uint16_t		cmd_opcode;
1900 	uint8_t			cmd_reserved0[4];
1901 	uint16_t		cmd_op_mod;
1902 	uint8_t			cmd_reserved1[8];
1903 } __packed __aligned(4);
1904 
1905 struct mcx_cmd_destroy_flow_table_mb_in {
1906 	uint8_t			cmd_table_type;
1907 	uint8_t			cmd_reserved0[3];
1908 	uint32_t		cmd_table_id;
1909 	uint8_t			cmd_reserved1[40];
1910 } __packed __aligned(4);
1911 
1912 struct mcx_cmd_destroy_flow_table_out {
1913 	uint8_t			cmd_status;
1914 	uint8_t			cmd_reserved0[3];
1915 	uint32_t		cmd_syndrome;
1916 	uint8_t			cmd_reserved1[8];
1917 } __packed __aligned(4);
1918 
1919 struct mcx_cmd_set_flow_table_root_in {
1920 	uint16_t		cmd_opcode;
1921 	uint8_t			cmd_reserved0[4];
1922 	uint16_t		cmd_op_mod;
1923 	uint8_t			cmd_reserved1[8];
1924 } __packed __aligned(4);
1925 
1926 struct mcx_cmd_set_flow_table_root_mb_in {
1927 	uint8_t			cmd_table_type;
1928 	uint8_t			cmd_reserved0[3];
1929 	uint32_t		cmd_table_id;
1930 	uint8_t			cmd_reserved1[56];
1931 } __packed __aligned(4);
1932 
1933 struct mcx_cmd_set_flow_table_root_out {
1934 	uint8_t			cmd_status;
1935 	uint8_t			cmd_reserved0[3];
1936 	uint32_t		cmd_syndrome;
1937 	uint8_t			cmd_reserved1[8];
1938 } __packed __aligned(4);
1939 
1940 struct mcx_flow_match {
1941 	/* outer headers */
1942 	uint8_t			mc_src_mac[6];
1943 	uint16_t		mc_ethertype;
1944 	uint8_t			mc_dest_mac[6];
1945 	uint16_t		mc_first_vlan;
1946 	uint8_t			mc_ip_proto;
1947 	uint8_t			mc_ip_dscp_ecn;
1948 	uint8_t			mc_vlan_flags;
1949 #define MCX_FLOW_MATCH_IP_FRAG	(1 << 5)
1950 	uint8_t			mc_tcp_flags;
1951 	uint16_t		mc_tcp_sport;
1952 	uint16_t		mc_tcp_dport;
1953 	uint32_t		mc_reserved0;
1954 	uint16_t		mc_udp_sport;
1955 	uint16_t		mc_udp_dport;
1956 	uint8_t			mc_src_ip[16];
1957 	uint8_t			mc_dest_ip[16];
1958 
1959 	/* misc parameters */
1960 	uint8_t			mc_reserved1[8];
1961 	uint16_t		mc_second_vlan;
1962 	uint8_t			mc_reserved2[2];
1963 	uint8_t			mc_second_vlan_flags;
1964 	uint8_t			mc_reserved3[15];
1965 	uint32_t		mc_outer_ipv6_flow_label;
1966 	uint8_t			mc_reserved4[32];
1967 
1968 	uint8_t			mc_reserved[384];
1969 } __packed __aligned(4);
1970 
1971 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1972 
1973 struct mcx_cmd_create_flow_group_in {
1974 	uint16_t		cmd_opcode;
1975 	uint8_t			cmd_reserved0[4];
1976 	uint16_t		cmd_op_mod;
1977 	uint8_t			cmd_reserved1[8];
1978 } __packed __aligned(4);
1979 
1980 struct mcx_cmd_create_flow_group_mb_in {
1981 	uint8_t			cmd_table_type;
1982 	uint8_t			cmd_reserved0[3];
1983 	uint32_t		cmd_table_id;
1984 	uint8_t			cmd_reserved1[4];
1985 	uint32_t		cmd_start_flow_index;
1986 	uint8_t			cmd_reserved2[4];
1987 	uint32_t		cmd_end_flow_index;
1988 	uint8_t			cmd_reserved3[23];
1989 	uint8_t			cmd_match_criteria_enable;
1990 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER	(1 << 0)
1991 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC		(1 << 1)
1992 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER	(1 << 2)
1993 	struct mcx_flow_match	cmd_match_criteria;
1994 	uint8_t			cmd_reserved4[448];
1995 } __packed __aligned(4);
1996 
1997 struct mcx_cmd_create_flow_group_out {
1998 	uint8_t			cmd_status;
1999 	uint8_t			cmd_reserved0[3];
2000 	uint32_t		cmd_syndrome;
2001 	uint32_t		cmd_group_id;
2002 	uint8_t			cmd_reserved1[4];
2003 } __packed __aligned(4);
2004 
2005 struct mcx_flow_ctx {
2006 	uint8_t			fc_reserved0[4];
2007 	uint32_t		fc_group_id;
2008 	uint32_t		fc_flow_tag;
2009 	uint32_t		fc_action;
2010 #define MCX_FLOW_CONTEXT_ACTION_ALLOW		(1 << 0)
2011 #define MCX_FLOW_CONTEXT_ACTION_DROP		(1 << 1)
2012 #define MCX_FLOW_CONTEXT_ACTION_FORWARD		(1 << 2)
2013 #define MCX_FLOW_CONTEXT_ACTION_COUNT		(1 << 3)
2014 	uint32_t		fc_dest_list_size;
2015 	uint32_t		fc_counter_list_size;
2016 	uint8_t			fc_reserved1[40];
2017 	struct mcx_flow_match	fc_match_value;
2018 	uint8_t			fc_reserved2[192];
2019 } __packed __aligned(4);
2020 
2021 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE	(1 << 24)
2022 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR		(2 << 24)
2023 
2024 struct mcx_cmd_destroy_flow_group_in {
2025 	uint16_t		cmd_opcode;
2026 	uint8_t			cmd_reserved0[4];
2027 	uint16_t		cmd_op_mod;
2028 	uint8_t			cmd_reserved1[8];
2029 } __packed __aligned(4);
2030 
2031 struct mcx_cmd_destroy_flow_group_mb_in {
2032 	uint8_t			cmd_table_type;
2033 	uint8_t			cmd_reserved0[3];
2034 	uint32_t		cmd_table_id;
2035 	uint32_t		cmd_group_id;
2036 	uint8_t			cmd_reserved1[36];
2037 } __packed __aligned(4);
2038 
2039 struct mcx_cmd_destroy_flow_group_out {
2040 	uint8_t			cmd_status;
2041 	uint8_t			cmd_reserved0[3];
2042 	uint32_t		cmd_syndrome;
2043 	uint8_t			cmd_reserved1[8];
2044 } __packed __aligned(4);
2045 
2046 struct mcx_cmd_set_flow_table_entry_in {
2047 	uint16_t		cmd_opcode;
2048 	uint8_t			cmd_reserved0[4];
2049 	uint16_t		cmd_op_mod;
2050 	uint8_t			cmd_reserved1[8];
2051 } __packed __aligned(4);
2052 
2053 struct mcx_cmd_set_flow_table_entry_mb_in {
2054 	uint8_t			cmd_table_type;
2055 	uint8_t			cmd_reserved0[3];
2056 	uint32_t		cmd_table_id;
2057 	uint32_t		cmd_modify_enable_mask;
2058 	uint8_t			cmd_reserved1[4];
2059 	uint32_t		cmd_flow_index;
2060 	uint8_t			cmd_reserved2[28];
2061 	struct mcx_flow_ctx	cmd_flow_ctx;
2062 } __packed __aligned(4);
2063 
2064 struct mcx_cmd_set_flow_table_entry_out {
2065 	uint8_t			cmd_status;
2066 	uint8_t			cmd_reserved0[3];
2067 	uint32_t		cmd_syndrome;
2068 	uint8_t			cmd_reserved1[8];
2069 } __packed __aligned(4);
2070 
2071 struct mcx_cmd_query_flow_table_entry_in {
2072 	uint16_t		cmd_opcode;
2073 	uint8_t			cmd_reserved0[4];
2074 	uint16_t		cmd_op_mod;
2075 	uint8_t			cmd_reserved1[8];
2076 } __packed __aligned(4);
2077 
2078 struct mcx_cmd_query_flow_table_entry_mb_in {
2079 	uint8_t			cmd_table_type;
2080 	uint8_t			cmd_reserved0[3];
2081 	uint32_t		cmd_table_id;
2082 	uint8_t			cmd_reserved1[8];
2083 	uint32_t		cmd_flow_index;
2084 	uint8_t			cmd_reserved2[28];
2085 } __packed __aligned(4);
2086 
2087 struct mcx_cmd_query_flow_table_entry_out {
2088 	uint8_t			cmd_status;
2089 	uint8_t			cmd_reserved0[3];
2090 	uint32_t		cmd_syndrome;
2091 	uint8_t			cmd_reserved1[8];
2092 } __packed __aligned(4);
2093 
2094 struct mcx_cmd_query_flow_table_entry_mb_out {
2095 	uint8_t			cmd_reserved0[48];
2096 	struct mcx_flow_ctx	cmd_flow_ctx;
2097 } __packed __aligned(4);
2098 
2099 struct mcx_cmd_delete_flow_table_entry_in {
2100 	uint16_t		cmd_opcode;
2101 	uint8_t			cmd_reserved0[4];
2102 	uint16_t		cmd_op_mod;
2103 	uint8_t			cmd_reserved1[8];
2104 } __packed __aligned(4);
2105 
2106 struct mcx_cmd_delete_flow_table_entry_mb_in {
2107 	uint8_t			cmd_table_type;
2108 	uint8_t			cmd_reserved0[3];
2109 	uint32_t		cmd_table_id;
2110 	uint8_t			cmd_reserved1[8];
2111 	uint32_t		cmd_flow_index;
2112 	uint8_t			cmd_reserved2[28];
2113 } __packed __aligned(4);
2114 
2115 struct mcx_cmd_delete_flow_table_entry_out {
2116 	uint8_t			cmd_status;
2117 	uint8_t			cmd_reserved0[3];
2118 	uint32_t		cmd_syndrome;
2119 	uint8_t			cmd_reserved1[8];
2120 } __packed __aligned(4);
2121 
2122 struct mcx_cmd_query_flow_group_in {
2123 	uint16_t		cmd_opcode;
2124 	uint8_t			cmd_reserved0[4];
2125 	uint16_t		cmd_op_mod;
2126 	uint8_t			cmd_reserved1[8];
2127 } __packed __aligned(4);
2128 
2129 struct mcx_cmd_query_flow_group_mb_in {
2130 	uint8_t			cmd_table_type;
2131 	uint8_t			cmd_reserved0[3];
2132 	uint32_t		cmd_table_id;
2133 	uint32_t		cmd_group_id;
2134 	uint8_t			cmd_reserved1[36];
2135 } __packed __aligned(4);
2136 
2137 struct mcx_cmd_query_flow_group_out {
2138 	uint8_t			cmd_status;
2139 	uint8_t			cmd_reserved0[3];
2140 	uint32_t		cmd_syndrome;
2141 	uint8_t			cmd_reserved1[8];
2142 } __packed __aligned(4);
2143 
2144 struct mcx_cmd_query_flow_group_mb_out {
2145 	uint8_t			cmd_reserved0[12];
2146 	uint32_t		cmd_start_flow_index;
2147 	uint8_t			cmd_reserved1[4];
2148 	uint32_t		cmd_end_flow_index;
2149 	uint8_t			cmd_reserved2[20];
2150 	uint32_t		cmd_match_criteria_enable;
2151 	uint8_t			cmd_match_criteria[512];
2152 	uint8_t			cmd_reserved4[448];
2153 } __packed __aligned(4);
2154 
2155 struct mcx_cmd_query_flow_table_in {
2156 	uint16_t		cmd_opcode;
2157 	uint8_t			cmd_reserved0[4];
2158 	uint16_t		cmd_op_mod;
2159 	uint8_t			cmd_reserved1[8];
2160 } __packed __aligned(4);
2161 
2162 struct mcx_cmd_query_flow_table_mb_in {
2163 	uint8_t			cmd_table_type;
2164 	uint8_t			cmd_reserved0[3];
2165 	uint32_t		cmd_table_id;
2166 	uint8_t			cmd_reserved1[40];
2167 } __packed __aligned(4);
2168 
2169 struct mcx_cmd_query_flow_table_out {
2170 	uint8_t			cmd_status;
2171 	uint8_t			cmd_reserved0[3];
2172 	uint32_t		cmd_syndrome;
2173 	uint8_t			cmd_reserved1[8];
2174 } __packed __aligned(4);
2175 
2176 struct mcx_cmd_query_flow_table_mb_out {
2177 	uint8_t			cmd_reserved0[4];
2178 	struct mcx_flow_table_ctx cmd_ctx;
2179 } __packed __aligned(4);
2180 
2181 struct mcx_cmd_alloc_flow_counter_in {
2182 	uint16_t		cmd_opcode;
2183 	uint8_t			cmd_reserved0[4];
2184 	uint16_t		cmd_op_mod;
2185 	uint8_t			cmd_reserved1[8];
2186 } __packed __aligned(4);
2187 
2188 struct mcx_cmd_query_rq_in {
2189 	uint16_t		cmd_opcode;
2190 	uint8_t			cmd_reserved0[4];
2191 	uint16_t		cmd_op_mod;
2192 	uint32_t		cmd_rqn;
2193 	uint8_t			cmd_reserved1[4];
2194 } __packed __aligned(4);
2195 
2196 struct mcx_cmd_query_rq_out {
2197 	uint8_t			cmd_status;
2198 	uint8_t			cmd_reserved0[3];
2199 	uint32_t		cmd_syndrome;
2200 	uint8_t			cmd_reserved1[8];
2201 } __packed __aligned(4);
2202 
2203 struct mcx_cmd_query_rq_mb_out {
2204 	uint8_t			cmd_reserved0[16];
2205 	struct mcx_rq_ctx	cmd_ctx;
2206 };
2207 
2208 struct mcx_cmd_query_sq_in {
2209 	uint16_t		cmd_opcode;
2210 	uint8_t			cmd_reserved0[4];
2211 	uint16_t		cmd_op_mod;
2212 	uint32_t		cmd_sqn;
2213 	uint8_t			cmd_reserved1[4];
2214 } __packed __aligned(4);
2215 
2216 struct mcx_cmd_query_sq_out {
2217 	uint8_t			cmd_status;
2218 	uint8_t			cmd_reserved0[3];
2219 	uint32_t		cmd_syndrome;
2220 	uint8_t			cmd_reserved1[8];
2221 } __packed __aligned(4);
2222 
2223 struct mcx_cmd_query_sq_mb_out {
2224 	uint8_t			cmd_reserved0[16];
2225 	struct mcx_sq_ctx	cmd_ctx;
2226 };
2227 
2228 struct mcx_cmd_alloc_flow_counter_out {
2229 	uint8_t			cmd_status;
2230 	uint8_t			cmd_reserved0[3];
2231 	uint32_t		cmd_syndrome;
2232 	uint8_t			cmd_reserved1[2];
2233 	uint16_t		cmd_flow_counter_id;
2234 	uint8_t			cmd_reserved2[4];
2235 } __packed __aligned(4);
2236 
2237 struct mcx_wq_doorbell {
2238 	uint32_t		 db_recv_counter;
2239 	uint32_t		 db_send_counter;
2240 } __packed __aligned(8);
2241 
2242 struct mcx_dmamem {
2243 	bus_dmamap_t		 mxm_map;
2244 	bus_dma_segment_t	 mxm_seg;
2245 	int			 mxm_nsegs;
2246 	size_t			 mxm_size;
2247 	caddr_t			 mxm_kva;
2248 };
2249 #define MCX_DMA_MAP(_mxm)	((_mxm)->mxm_map)
2250 #define MCX_DMA_DVA(_mxm)	((_mxm)->mxm_map->dm_segs[0].ds_addr)
2251 #define MCX_DMA_KVA(_mxm)	((void *)(_mxm)->mxm_kva)
2252 #define MCX_DMA_OFF(_mxm, _off)	((void *)((_mxm)->mxm_kva + (_off)))
2253 #define MCX_DMA_LEN(_mxm)	((_mxm)->mxm_size)
2254 
2255 struct mcx_hwmem {
2256 	bus_dmamap_t		 mhm_map;
2257 	bus_dma_segment_t	*mhm_segs;
2258 	unsigned int		 mhm_seg_count;
2259 	unsigned int		 mhm_npages;
2260 };
2261 
2262 struct mcx_slot {
2263 	bus_dmamap_t		 ms_map;
2264 	struct mbuf		*ms_m;
2265 };
2266 
2267 struct mcx_eq {
2268 	int			 eq_n;
2269 	uint32_t		 eq_cons;
2270 	struct mcx_dmamem	 eq_mem;
2271 };
2272 
2273 struct mcx_cq {
2274 	int			 cq_n;
2275 	struct mcx_dmamem	 cq_mem;
2276 	bus_addr_t		 cq_doorbell;
2277 	uint32_t		 cq_cons;
2278 	uint32_t		 cq_count;
2279 };
2280 
2281 struct mcx_calibration {
2282 	uint64_t		 c_timestamp;	/* previous mcx chip time */
2283 	uint64_t		 c_uptime;	/* previous kernel nanouptime */
2284 	uint64_t		 c_tbase;	/* mcx chip time */
2285 	uint64_t		 c_ubase;	/* kernel nanouptime */
2286 	uint64_t		 c_ratio;
2287 };
2288 
2289 #define MCX_CALIBRATE_FIRST    2
2290 #define MCX_CALIBRATE_NORMAL   32
2291 
2292 struct mcx_rx {
2293 	struct mcx_softc	*rx_softc;
2294 	struct ifiqueue		*rx_ifiq;
2295 
2296 	int			 rx_rqn;
2297 	struct mcx_dmamem	 rx_rq_mem;
2298 	struct mcx_slot		*rx_slots;
2299 	bus_addr_t		 rx_doorbell;
2300 
2301 	uint32_t		 rx_prod;
2302 	struct timeout		 rx_refill;
2303 	struct if_rxring	 rx_rxr;
2304 } __aligned(64);
2305 
2306 struct mcx_tx {
2307 	struct mcx_softc	*tx_softc;
2308 	struct ifqueue		*tx_ifq;
2309 
2310 	int			 tx_uar;
2311 	int			 tx_sqn;
2312 	struct mcx_dmamem	 tx_sq_mem;
2313 	struct mcx_slot		*tx_slots;
2314 	bus_addr_t		 tx_doorbell;
2315 	int			 tx_bf_offset;
2316 
2317 	uint32_t		 tx_cons;
2318 	uint32_t		 tx_prod;
2319 } __aligned(64);
2320 
2321 struct mcx_queues {
2322 	char			 q_name[16];
2323 	void			*q_ihc;
2324 	struct mcx_softc	*q_sc;
2325 	int			 q_uar;
2326 	int			 q_index;
2327 	struct mcx_rx		 q_rx;
2328 	struct mcx_tx		 q_tx;
2329 	struct mcx_cq		 q_cq;
2330 	struct mcx_eq		 q_eq;
2331 #if NKSTAT > 0
2332 	struct kstat		*q_kstat;
2333 #endif
2334 };
2335 
2336 struct mcx_flow_group {
2337 	int			 g_id;
2338 	int			 g_table;
2339 	int			 g_start;
2340 	int			 g_size;
2341 };
2342 
2343 #define MCX_FLOW_GROUP_PROMISC	 0
2344 #define MCX_FLOW_GROUP_ALLMULTI	 1
2345 #define MCX_FLOW_GROUP_MAC	 2
2346 #define MCX_FLOW_GROUP_RSS_L4	 3
2347 #define MCX_FLOW_GROUP_RSS_L3	 4
2348 #define MCX_FLOW_GROUP_RSS_NONE	 5
2349 #define MCX_NUM_FLOW_GROUPS	 6
2350 
2351 #define MCX_HASH_SEL_L3		MCX_TIR_CTX_HASH_SEL_SRC_IP | \
2352 				MCX_TIR_CTX_HASH_SEL_DST_IP
2353 #define MCX_HASH_SEL_L4		MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_SPORT | \
2354 				MCX_TIR_CTX_HASH_SEL_DPORT
2355 
2356 #define MCX_RSS_HASH_SEL_V4_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP  |\
2357 				MCX_TIR_CTX_HASH_SEL_IPV4
2358 #define MCX_RSS_HASH_SEL_V6_TCP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP | \
2359 				MCX_TIR_CTX_HASH_SEL_IPV6
2360 #define MCX_RSS_HASH_SEL_V4_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2361 				MCX_TIR_CTX_HASH_SEL_IPV4
2362 #define MCX_RSS_HASH_SEL_V6_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2363 				MCX_TIR_CTX_HASH_SEL_IPV6
2364 #define MCX_RSS_HASH_SEL_V4	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV4
2365 #define MCX_RSS_HASH_SEL_V6	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV6
2366 
2367 /*
2368  * There are a few different pieces involved in configuring RSS.
2369  * A Receive Queue Table (RQT) is the indirection table that maps packets to
2370  * different rx queues based on a hash value.  We only create one, because
2371  * we want to scatter any traffic we can apply RSS to across all our rx
2372  * queues.  Anything else will only be delivered to the first rx queue,
2373  * which doesn't require an RQT.
2374  *
2375  * A Transport Interface Receive (TIR) delivers packets to either a single rx
2376  * queue or an RQT, and in the latter case, specifies the set of fields
2377  * hashed, the hash function, and the hash key.  We need one of these for each
2378  * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6,
2379  * and one for non-RSS traffic.
2380  *
2381  * Flow tables hold flow table entries in sequence.  The first entry that
2382  * matches a packet is applied, sending the packet to either another flow
2383  * table or a TIR.  We use one flow table to select packets based on
2384  * destination MAC address, and a second to apply RSS.  The entries in the
2385  * first table send matching packets to the second, and the entries in the
2386  * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR.
2387  *
2388  * The flow table entry that delivers packets to an RSS TIR must include match
2389  * criteria that ensure packets delivered to the TIR include all the fields
2390  * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must
2391  * only accept v4 TCP packets.  Accordingly, we need flow table entries for
2392  * each TIR.
2393  *
2394  * All of this is a lot more flexible than we need, and we can describe most
2395  * of the stuff we need with a simple array.
2396  *
2397  * An RSS config creates a TIR with hashing enabled on a set of fields,
2398  * pointing to either the first rx queue or the RQT containing all the rx
2399  * queues, and a flow table entry that matches on an ether type and
2400  * optionally an ip proto, that delivers packets to the TIR.
2401  */
2402 static struct mcx_rss_rule {
2403 	int			hash_sel;
2404 	int			flow_group;
2405 	int			ethertype;
2406 	int			ip_proto;
2407 } mcx_rss_config[] = {
2408 	/* udp and tcp for v4/v6 */
2409 	{ MCX_RSS_HASH_SEL_V4_TCP, MCX_FLOW_GROUP_RSS_L4,
2410 	  ETHERTYPE_IP, IPPROTO_TCP },
2411 	{ MCX_RSS_HASH_SEL_V6_TCP, MCX_FLOW_GROUP_RSS_L4,
2412 	  ETHERTYPE_IPV6, IPPROTO_TCP },
2413 	{ MCX_RSS_HASH_SEL_V4_UDP, MCX_FLOW_GROUP_RSS_L4,
2414 	  ETHERTYPE_IP, IPPROTO_UDP },
2415 	{ MCX_RSS_HASH_SEL_V6_UDP, MCX_FLOW_GROUP_RSS_L4,
2416 	  ETHERTYPE_IPV6, IPPROTO_UDP },
2417 
2418 	/* other v4/v6 */
2419 	{ MCX_RSS_HASH_SEL_V4, MCX_FLOW_GROUP_RSS_L3,
2420 	  ETHERTYPE_IP, 0 },
2421 	{ MCX_RSS_HASH_SEL_V6, MCX_FLOW_GROUP_RSS_L3,
2422 	  ETHERTYPE_IPV6, 0 },
2423 
2424 	/* non v4/v6 */
2425 	{ 0, MCX_FLOW_GROUP_RSS_NONE, 0, 0 }
2426 };
2427 
2428 struct mcx_softc {
2429 	struct device		 sc_dev;
2430 	struct arpcom		 sc_ac;
2431 	struct ifmedia		 sc_media;
2432 	uint64_t		 sc_media_status;
2433 	uint64_t		 sc_media_active;
2434 
2435 	pci_chipset_tag_t	 sc_pc;
2436 	pci_intr_handle_t	 sc_ih;
2437 	void			*sc_ihc;
2438 	pcitag_t		 sc_tag;
2439 
2440 	bus_dma_tag_t		 sc_dmat;
2441 	bus_space_tag_t		 sc_memt;
2442 	bus_space_handle_t	 sc_memh;
2443 	bus_size_t		 sc_mems;
2444 
2445 	struct mcx_dmamem	 sc_cmdq_mem;
2446 	unsigned int		 sc_cmdq_mask;
2447 	unsigned int		 sc_cmdq_size;
2448 
2449 	unsigned int		 sc_cmdq_token;
2450 	struct mutex		 sc_cmdq_mtx;
2451 	struct rwlock		 sc_cmdq_kstat_lk;
2452 	struct rwlock		 sc_cmdq_ioctl_lk;
2453 
2454 	struct mcx_hwmem	 sc_boot_pages;
2455 	struct mcx_hwmem	 sc_init_pages;
2456 	struct mcx_hwmem	 sc_regular_pages;
2457 
2458 	int			 sc_uar;
2459 	int			 sc_pd;
2460 	int			 sc_tdomain;
2461 	uint32_t		 sc_lkey;
2462 	int			 sc_tis;
2463 	int			 sc_tir[nitems(mcx_rss_config)];
2464 	int			 sc_rqt;
2465 
2466 	struct mcx_dmamem	 sc_doorbell_mem;
2467 
2468 	struct mcx_eq		 sc_admin_eq;
2469 	struct mcx_eq		 sc_queue_eq;
2470 
2471 	int			 sc_hardmtu;
2472 	int			 sc_rxbufsz;
2473 
2474 	int			 sc_bf_size;
2475 	int			 sc_max_rqt_size;
2476 
2477 	struct task		 sc_port_change;
2478 
2479 	int			 sc_mac_flow_table_id;
2480 	int			 sc_rss_flow_table_id;
2481 	struct mcx_flow_group	 sc_flow_group[MCX_NUM_FLOW_GROUPS];
2482 	int			 sc_promisc_flow_enabled;
2483 	int			 sc_allmulti_flow_enabled;
2484 	int			 sc_mcast_flow_base;
2485 	int			 sc_extra_mcast;
2486 	uint8_t			 sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
2487 
2488 	struct mcx_calibration	 sc_calibration[2];
2489 	unsigned int		 sc_calibration_gen;
2490 	struct timeout		 sc_calibrate;
2491 	uint32_t		 sc_mhz;
2492 	uint32_t		 sc_khz;
2493 
2494 	struct intrmap		*sc_intrmap;
2495 	struct mcx_queues	*sc_queues;
2496 
2497 	int			 sc_mcam_reg;
2498 
2499 #if NKSTAT > 0
2500 	struct kstat		*sc_kstat_ieee8023;
2501 	struct kstat		*sc_kstat_rfc2863;
2502 	struct kstat		*sc_kstat_rfc2819;
2503 	struct kstat		*sc_kstat_rfc3635;
2504 	unsigned int		 sc_kstat_mtmp_count;
2505 	struct kstat		**sc_kstat_mtmp;
2506 #endif
2507 
2508 	struct timecounter	 sc_timecounter;
2509 };
2510 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
2511 
2512 static int	mcx_match(struct device *, void *, void *);
2513 static void	mcx_attach(struct device *, struct device *, void *);
2514 
2515 #if NKSTAT > 0
2516 static void	mcx_kstat_attach(struct mcx_softc *);
2517 #endif
2518 
2519 static void	mcx_timecounter_attach(struct mcx_softc *);
2520 
2521 static int	mcx_version(struct mcx_softc *);
2522 static int	mcx_init_wait(struct mcx_softc *);
2523 static int	mcx_enable_hca(struct mcx_softc *);
2524 static int	mcx_teardown_hca(struct mcx_softc *, uint16_t);
2525 static int	mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2526 		    int, enum mcx_cmdq_slot);
2527 static int	mcx_issi(struct mcx_softc *);
2528 static int	mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2529 static int	mcx_hca_max_caps(struct mcx_softc *);
2530 static int	mcx_hca_set_caps(struct mcx_softc *);
2531 static int	mcx_init_hca(struct mcx_softc *);
2532 static int	mcx_set_driver_version(struct mcx_softc *);
2533 static int	mcx_iff(struct mcx_softc *);
2534 static int	mcx_alloc_uar(struct mcx_softc *, int *);
2535 static int	mcx_alloc_pd(struct mcx_softc *);
2536 static int	mcx_alloc_tdomain(struct mcx_softc *);
2537 static int	mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int,
2538 		    uint64_t, int);
2539 static int	mcx_query_nic_vport_context(struct mcx_softc *);
2540 static int	mcx_query_special_contexts(struct mcx_softc *);
2541 static int	mcx_set_port_mtu(struct mcx_softc *, int);
2542 static int	mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int,
2543 		    int);
2544 static int	mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *);
2545 static int	mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int,
2546 		    int);
2547 static int	mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *);
2548 static int	mcx_ready_sq(struct mcx_softc *, struct mcx_tx *);
2549 static int	mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int);
2550 static int	mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *);
2551 static int	mcx_ready_rq(struct mcx_softc *, struct mcx_rx *);
2552 static int	mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *,
2553 		    int *);
2554 static int	mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t,
2555 		    int *);
2556 static int	mcx_destroy_tir(struct mcx_softc *, int);
2557 static int	mcx_create_tis(struct mcx_softc *, int *);
2558 static int	mcx_destroy_tis(struct mcx_softc *, int);
2559 static int	mcx_create_rqt(struct mcx_softc *, int, int *, int *);
2560 static int	mcx_destroy_rqt(struct mcx_softc *, int);
2561 static int	mcx_create_flow_table(struct mcx_softc *, int, int, int *);
2562 static int	mcx_set_flow_table_root(struct mcx_softc *, int);
2563 static int	mcx_destroy_flow_table(struct mcx_softc *, int);
2564 static int	mcx_create_flow_group(struct mcx_softc *, int, int, int,
2565 		    int, int, struct mcx_flow_match *);
2566 static int	mcx_destroy_flow_group(struct mcx_softc *, int);
2567 static int	mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int,
2568 		    uint8_t *, uint32_t);
2569 static int	mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int,
2570 		    int, int, uint32_t);
2571 static int	mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2572 
2573 #if NKSTAT > 0
2574 static int	mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *);
2575 static int	mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *);
2576 static int	mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *);
2577 static int	mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *);
2578 #endif
2579 
2580 #if 0
2581 static int	mcx_dump_flow_table(struct mcx_softc *, int);
2582 static int	mcx_dump_flow_table_entry(struct mcx_softc *, int, int);
2583 static int	mcx_dump_flow_group(struct mcx_softc *, int);
2584 #endif
2585 
2586 
2587 /*
2588 static void	mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2589 static void	mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2590 */
2591 static void	mcx_refill(void *);
2592 static int	mcx_process_rx(struct mcx_softc *, struct mcx_rx *,
2593 		    struct mcx_cq_entry *, struct mbuf_list *,
2594 		    const struct mcx_calibration *);
2595 static int	mcx_process_txeof(struct mcx_softc *, struct mcx_tx *,
2596 		    struct mcx_cq_entry *);
2597 static void	mcx_process_cq(struct mcx_softc *, struct mcx_queues *,
2598 		    struct mcx_cq *);
2599 
2600 static void	mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int);
2601 static void	mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int);
2602 static int	mcx_admin_intr(void *);
2603 static int	mcx_cq_intr(void *);
2604 
2605 static int	mcx_up(struct mcx_softc *);
2606 static void	mcx_down(struct mcx_softc *);
2607 static int	mcx_ioctl(struct ifnet *, u_long, caddr_t);
2608 static int	mcx_rxrinfo(struct mcx_softc *, struct if_rxrinfo *);
2609 static void	mcx_start(struct ifqueue *);
2610 static void	mcx_watchdog(struct ifnet *);
2611 static void	mcx_media_add_types(struct mcx_softc *);
2612 static void	mcx_media_status(struct ifnet *, struct ifmediareq *);
2613 static int	mcx_media_change(struct ifnet *);
2614 static int	mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2615 static void	mcx_port_change(void *);
2616 
2617 static void	mcx_calibrate_first(struct mcx_softc *);
2618 static void	mcx_calibrate(void *);
2619 
2620 static inline uint32_t
2621 		mcx_rd(struct mcx_softc *, bus_size_t);
2622 static inline void
2623 		mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2624 static inline void
2625 		mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2626 
2627 static uint64_t	mcx_timer(struct mcx_softc *);
2628 
2629 static int	mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2630 		    bus_size_t, u_int align);
2631 static void	mcx_dmamem_zero(struct mcx_dmamem *);
2632 static void	mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2633 
2634 static int	mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2635 		    unsigned int);
2636 static void	mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2637 
2638 struct cfdriver mcx_cd = {
2639 	NULL,
2640 	"mcx",
2641 	DV_IFNET,
2642 };
2643 
2644 const struct cfattach mcx_ca = {
2645 	sizeof(struct mcx_softc),
2646 	mcx_match,
2647 	mcx_attach,
2648 };
2649 
2650 static const struct pci_matchid mcx_devices[] = {
2651 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700 },
2652 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700VF },
2653 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710 },
2654 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710VF },
2655 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800 },
2656 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800VF },
2657 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800 },
2658 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800VF },
2659 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28908 },
2660 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT2892 },
2661 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT2894 },
2662 };
2663 
2664 struct mcx_eth_proto_capability {
2665 	uint64_t	cap_media;
2666 	uint64_t	cap_baudrate;
2667 };
2668 
2669 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = {
2670 	[MCX_ETHER_CAP_SGMII]		= { IFM_1000_SGMII,	IF_Gbps(1) },
2671 	[MCX_ETHER_CAP_1000_KX]		= { IFM_1000_KX,	IF_Gbps(1) },
2672 	[MCX_ETHER_CAP_10G_CX4]		= { IFM_10G_CX4,	IF_Gbps(10) },
2673 	[MCX_ETHER_CAP_10G_KX4]		= { IFM_10G_KX4,	IF_Gbps(10) },
2674 	[MCX_ETHER_CAP_10G_KR]		= { IFM_10G_KR,		IF_Gbps(10) },
2675 	[MCX_ETHER_CAP_40G_CR4]		= { IFM_40G_CR4,	IF_Gbps(40) },
2676 	[MCX_ETHER_CAP_40G_KR4]		= { IFM_40G_KR4,	IF_Gbps(40) },
2677 	[MCX_ETHER_CAP_10G_CR]		= { IFM_10G_SFP_CU,	IF_Gbps(10) },
2678 	[MCX_ETHER_CAP_10G_SR]		= { IFM_10G_SR,		IF_Gbps(10) },
2679 	[MCX_ETHER_CAP_10G_LR]		= { IFM_10G_LR,		IF_Gbps(10) },
2680 	[MCX_ETHER_CAP_40G_SR4]		= { IFM_40G_SR4,	IF_Gbps(40) },
2681 	[MCX_ETHER_CAP_40G_LR4]		= { IFM_40G_LR4,	IF_Gbps(40) },
2682 	[MCX_ETHER_CAP_50G_SR2]		= { 0 /*IFM_50G_SR2*/,	IF_Gbps(50) },
2683 	[MCX_ETHER_CAP_100G_CR4]	= { IFM_100G_CR4,	IF_Gbps(100) },
2684 	[MCX_ETHER_CAP_100G_SR4]	= { IFM_100G_SR4,	IF_Gbps(100) },
2685 	[MCX_ETHER_CAP_100G_KR4]	= { IFM_100G_KR4,	IF_Gbps(100) },
2686 	[MCX_ETHER_CAP_100G_LR4]	= { IFM_100G_LR4,	IF_Gbps(100) },
2687 	[MCX_ETHER_CAP_25G_CR]		= { IFM_25G_CR,		IF_Gbps(25) },
2688 	[MCX_ETHER_CAP_25G_KR]		= { IFM_25G_KR,		IF_Gbps(25) },
2689 	[MCX_ETHER_CAP_25G_SR]		= { IFM_25G_SR,		IF_Gbps(25) },
2690 	[MCX_ETHER_CAP_50G_CR2]		= { IFM_50G_CR2,	IF_Gbps(50) },
2691 	[MCX_ETHER_CAP_50G_KR2]		= { IFM_50G_KR2,	IF_Gbps(50) },
2692 };
2693 
2694 static int
2695 mcx_get_id(uint32_t val)
2696 {
2697 	return betoh32(val) & 0x00ffffff;
2698 }
2699 
2700 static int
2701 mcx_match(struct device *parent, void *match, void *aux)
2702 {
2703 	return (pci_matchbyid(aux, mcx_devices, nitems(mcx_devices)));
2704 }
2705 
2706 void
2707 mcx_attach(struct device *parent, struct device *self, void *aux)
2708 {
2709 	struct mcx_softc *sc = (struct mcx_softc *)self;
2710 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2711 	struct pci_attach_args *pa = aux;
2712 	pcireg_t memtype;
2713 	uint32_t r;
2714 	unsigned int cq_stride;
2715 	unsigned int cq_size;
2716 	const char *intrstr;
2717 	int i, msix;
2718 
2719 	sc->sc_pc = pa->pa_pc;
2720 	sc->sc_tag = pa->pa_tag;
2721 	sc->sc_dmat = pa->pa_dmat;
2722 
2723 	/* Map the PCI memory space */
2724 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2725 	if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2726 	    BUS_SPACE_MAP_PREFETCHABLE, &sc->sc_memt, &sc->sc_memh,
2727 	    NULL, &sc->sc_mems, 0)) {
2728 		printf(": unable to map register memory\n");
2729 		return;
2730 	}
2731 
2732 	if (mcx_version(sc) != 0) {
2733 		/* error printed by mcx_version */
2734 		goto unmap;
2735 	}
2736 
2737 	r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2738 	cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2739 	cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2740 	if (cq_size > MCX_MAX_CQE) {
2741 		printf(", command queue size overflow %u\n", cq_size);
2742 		goto unmap;
2743 	}
2744 	if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2745 		printf(", command queue entry size underflow %u\n", cq_stride);
2746 		goto unmap;
2747 	}
2748 	if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2749 		printf(", command queue page overflow\n");
2750 		goto unmap;
2751 	}
2752 
2753 	if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE,
2754 	    MCX_PAGE_SIZE) != 0) {
2755 		printf(", unable to allocate doorbell memory\n");
2756 		goto unmap;
2757 	}
2758 
2759 	if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2760 	    MCX_PAGE_SIZE) != 0) {
2761 		printf(", unable to allocate command queue\n");
2762 		goto dbfree;
2763 	}
2764 
2765 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2766 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t),
2767 	    BUS_SPACE_BARRIER_WRITE);
2768 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2769 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t),
2770 	    BUS_SPACE_BARRIER_WRITE);
2771 
2772 	if (mcx_init_wait(sc) != 0) {
2773 		printf(", timeout waiting for init\n");
2774 		goto cqfree;
2775 	}
2776 
2777 	sc->sc_cmdq_mask = cq_size - 1;
2778 	sc->sc_cmdq_size = cq_stride;
2779 	rw_init(&sc->sc_cmdq_kstat_lk, "mcxkstat");
2780 	rw_init(&sc->sc_cmdq_ioctl_lk, "mcxioctl");
2781 	mtx_init(&sc->sc_cmdq_mtx, IPL_NET);
2782 
2783 	if (mcx_enable_hca(sc) != 0) {
2784 		/* error printed by mcx_enable_hca */
2785 		goto cqfree;
2786 	}
2787 
2788 	if (mcx_issi(sc) != 0) {
2789 		/* error printed by mcx_issi */
2790 		goto teardown;
2791 	}
2792 
2793 	if (mcx_pages(sc, &sc->sc_boot_pages,
2794 	    htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2795 		/* error printed by mcx_pages */
2796 		goto teardown;
2797 	}
2798 
2799 	if (mcx_hca_max_caps(sc) != 0) {
2800 		/* error printed by mcx_hca_max_caps */
2801 		goto teardown;
2802 	}
2803 
2804 	if (mcx_hca_set_caps(sc) != 0) {
2805 		/* error printed by mcx_hca_set_caps */
2806 		goto teardown;
2807 	}
2808 
2809 	if (mcx_pages(sc, &sc->sc_init_pages,
2810 	    htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2811 		/* error printed by mcx_pages */
2812 		goto teardown;
2813 	}
2814 
2815 	if (mcx_init_hca(sc) != 0) {
2816 		/* error printed by mcx_init_hca */
2817 		goto teardown;
2818 	}
2819 
2820 	if (mcx_pages(sc, &sc->sc_regular_pages,
2821 	    htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2822 		/* error printed by mcx_pages */
2823 		goto teardown;
2824 	}
2825 
2826 	/* apparently not necessary? */
2827 	if (mcx_set_driver_version(sc) != 0) {
2828 		/* error printed by mcx_set_driver_version */
2829 		goto teardown;
2830 	}
2831 
2832 	if (mcx_iff(sc) != 0) {	/* modify nic vport context */
2833 		/* error printed by mcx_iff? */
2834 		goto teardown;
2835 	}
2836 
2837 	if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) {
2838 		/* error printed by mcx_alloc_uar */
2839 		goto teardown;
2840 	}
2841 
2842 	if (mcx_alloc_pd(sc) != 0) {
2843 		/* error printed by mcx_alloc_pd */
2844 		goto teardown;
2845 	}
2846 
2847 	if (mcx_alloc_tdomain(sc) != 0) {
2848 		/* error printed by mcx_alloc_tdomain */
2849 		goto teardown;
2850 	}
2851 
2852 	msix = pci_intr_msix_count(pa);
2853 	if (msix < 2) {
2854 		printf(": not enough msi-x vectors\n");
2855 		goto teardown;
2856 	}
2857 
2858 	/*
2859 	 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2860 	 * mellanox support tells me legacy interrupts are not supported,
2861 	 * so we're stuck with just msi-x.
2862 	 */
2863 	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) {
2864 		printf(": unable to map interrupt\n");
2865 		goto teardown;
2866 	}
2867 	intrstr = pci_intr_string(sc->sc_pc, sc->sc_ih);
2868 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
2869 	    IPL_NET | IPL_MPSAFE, mcx_admin_intr, sc, DEVNAME(sc));
2870 	if (sc->sc_ihc == NULL) {
2871 		printf(": unable to establish interrupt");
2872 		if (intrstr != NULL)
2873 			printf(" at %s", intrstr);
2874 		printf("\n");
2875 		goto teardown;
2876 	}
2877 
2878 	if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar,
2879 	    (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
2880 	    (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
2881 	    (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
2882 	    (1ull << MCX_EVENT_TYPE_PAGE_REQUEST), 0) != 0) {
2883 		/* error printed by mcx_create_eq */
2884 		goto teardown;
2885 	}
2886 
2887 	if (mcx_query_nic_vport_context(sc) != 0) {
2888 		/* error printed by mcx_query_nic_vport_context */
2889 		goto teardown;
2890 	}
2891 
2892 	if (mcx_query_special_contexts(sc) != 0) {
2893 		/* error printed by mcx_query_special_contexts */
2894 		goto teardown;
2895 	}
2896 
2897 	if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2898 		/* error printed by mcx_set_port_mtu */
2899 		goto teardown;
2900 	}
2901 
2902 	printf(", %s, address %s\n", intrstr,
2903 	    ether_sprintf(sc->sc_ac.ac_enaddr));
2904 
2905 	msix--; /* admin ops took one */
2906 	sc->sc_intrmap = intrmap_create(&sc->sc_dev, msix, MCX_MAX_QUEUES,
2907 	    INTRMAP_POWEROF2);
2908 	if (sc->sc_intrmap == NULL) {
2909 		printf("%s: unable to create interrupt map\n", DEVNAME(sc));
2910 		goto teardown;
2911 	}
2912 	sc->sc_queues = mallocarray(intrmap_count(sc->sc_intrmap),
2913 	    sizeof(*sc->sc_queues), M_DEVBUF, M_WAITOK|M_ZERO);
2914 	if (sc->sc_queues == NULL) {
2915 		printf("%s: unable to create queues\n", DEVNAME(sc));
2916 		goto intrunmap;
2917 	}
2918 
2919 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2920 	ifp->if_softc = sc;
2921 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2922 	ifp->if_xflags = IFXF_MPSAFE;
2923 	ifp->if_ioctl = mcx_ioctl;
2924 	ifp->if_qstart = mcx_start;
2925 	ifp->if_watchdog = mcx_watchdog;
2926 	ifp->if_hardmtu = sc->sc_hardmtu;
2927 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
2928 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_UDPv6 | IFCAP_CSUM_TCPv4 |
2929 	    IFCAP_CSUM_TCPv6;
2930 #if NVLAN > 0
2931 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2932 #endif
2933 	ifq_init_maxlen(&ifp->if_snd, 1024);
2934 
2935 	ifmedia_init(&sc->sc_media, IFM_IMASK, mcx_media_change,
2936 	    mcx_media_status);
2937 	mcx_media_add_types(sc);
2938 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2939 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2940 
2941 	if_attach(ifp);
2942 	ether_ifattach(ifp);
2943 
2944 	if_attach_iqueues(ifp, intrmap_count(sc->sc_intrmap));
2945 	if_attach_queues(ifp, intrmap_count(sc->sc_intrmap));
2946 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
2947 		struct ifiqueue *ifiq = ifp->if_iqs[i];
2948 		struct ifqueue *ifq = ifp->if_ifqs[i];
2949 		struct mcx_queues *q = &sc->sc_queues[i];
2950 		struct mcx_rx *rx = &q->q_rx;
2951 		struct mcx_tx *tx = &q->q_tx;
2952 		pci_intr_handle_t ih;
2953 		int vec;
2954 
2955 		vec = i + 1;
2956 		q->q_sc = sc;
2957 		q->q_index = i;
2958 
2959 		if (mcx_alloc_uar(sc, &q->q_uar) != 0) {
2960 			printf("%s: unable to alloc uar %d\n",
2961 			    DEVNAME(sc), i);
2962 			goto intrdisestablish;
2963 		}
2964 
2965 		if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) {
2966 			printf("%s: unable to create event queue %d\n",
2967 			    DEVNAME(sc), i);
2968 			goto intrdisestablish;
2969 		}
2970 
2971 		rx->rx_softc = sc;
2972 		rx->rx_ifiq = ifiq;
2973 		timeout_set(&rx->rx_refill, mcx_refill, rx);
2974 		ifiq->ifiq_softc = rx;
2975 
2976 		tx->tx_softc = sc;
2977 		tx->tx_ifq = ifq;
2978 		ifq->ifq_softc = tx;
2979 
2980 		if (pci_intr_map_msix(pa, vec, &ih) != 0) {
2981 			printf("%s: unable to map queue interrupt %d\n",
2982 			    DEVNAME(sc), i);
2983 			goto intrdisestablish;
2984 		}
2985 		snprintf(q->q_name, sizeof(q->q_name), "%s:%d",
2986 		    DEVNAME(sc), i);
2987 		q->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
2988 		    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
2989 		    mcx_cq_intr, q, q->q_name);
2990 		if (q->q_ihc == NULL) {
2991 			printf("%s: unable to establish interrupt %d\n",
2992 			    DEVNAME(sc), i);
2993 			goto intrdisestablish;
2994 		}
2995 	}
2996 
2997 	timeout_set(&sc->sc_calibrate, mcx_calibrate, sc);
2998 
2999 	task_set(&sc->sc_port_change, mcx_port_change, sc);
3000 	mcx_port_change(sc);
3001 
3002 	sc->sc_mac_flow_table_id = -1;
3003 	sc->sc_rss_flow_table_id = -1;
3004 	sc->sc_rqt = -1;
3005 	for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
3006 		struct mcx_flow_group *mfg = &sc->sc_flow_group[i];
3007 		mfg->g_id = -1;
3008 		mfg->g_table = -1;
3009 		mfg->g_size = 0;
3010 		mfg->g_start = 0;
3011 	}
3012 	sc->sc_extra_mcast = 0;
3013 	memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
3014 
3015 #if NKSTAT > 0
3016 	mcx_kstat_attach(sc);
3017 #endif
3018 	mcx_timecounter_attach(sc);
3019 	return;
3020 
3021 intrdisestablish:
3022 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
3023 		struct mcx_queues *q = &sc->sc_queues[i];
3024 		if (q->q_ihc == NULL)
3025 			continue;
3026 		pci_intr_disestablish(sc->sc_pc, q->q_ihc);
3027 		q->q_ihc = NULL;
3028 	}
3029 	free(sc->sc_queues, M_DEVBUF,
3030 	    intrmap_count(sc->sc_intrmap) * sizeof(*sc->sc_queues));
3031 intrunmap:
3032 	intrmap_destroy(sc->sc_intrmap);
3033 	sc->sc_intrmap = NULL;
3034 teardown:
3035 	mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
3036 	/* error printed by mcx_teardown_hca, and we're already unwinding */
3037 cqfree:
3038 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
3039 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3040 	    BUS_SPACE_BARRIER_WRITE);
3041 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
3042 	    MCX_CMDQ_INTERFACE_DISABLED);
3043 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t),
3044 	    BUS_SPACE_BARRIER_WRITE);
3045 
3046 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
3047 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3048 	    BUS_SPACE_BARRIER_WRITE);
3049 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
3050 
3051 	mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
3052 dbfree:
3053 	mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
3054 unmap:
3055 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
3056 	sc->sc_mems = 0;
3057 }
3058 
3059 static int
3060 mcx_version(struct mcx_softc *sc)
3061 {
3062 	uint32_t fw0, fw1;
3063 	uint16_t cmdif;
3064 
3065 	fw0 = mcx_rd(sc, MCX_FW_VER);
3066 	fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
3067 
3068 	printf(": FW %u.%u.%04u", MCX_FW_VER_MAJOR(fw0),
3069 	    MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
3070 
3071 	cmdif = MCX_CMDIF(fw1);
3072 	if (cmdif != MCX_CMD_IF_SUPPORTED) {
3073 		printf(", unsupported command interface %u\n", cmdif);
3074 		return (-1);
3075 	}
3076 
3077 	return (0);
3078 }
3079 
3080 static int
3081 mcx_init_wait(struct mcx_softc *sc)
3082 {
3083 	unsigned int i;
3084 	uint32_t r;
3085 
3086 	for (i = 0; i < 2000; i++) {
3087 		r = mcx_rd(sc, MCX_STATE);
3088 		if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
3089 			return (0);
3090 
3091 		delay(1000);
3092 		mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
3093 		    BUS_SPACE_BARRIER_READ);
3094 	}
3095 
3096 	return (-1);
3097 }
3098 
3099 static uint8_t
3100 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3101     unsigned int msec)
3102 {
3103 	unsigned int i;
3104 
3105 	for (i = 0; i < msec; i++) {
3106 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3107 		    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3108 
3109 		if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
3110 		    MCX_CQ_STATUS_OWN_SW)
3111 			return (0);
3112 
3113 		delay(1000);
3114 	}
3115 
3116 	return (ETIMEDOUT);
3117 }
3118 
3119 static uint32_t
3120 mcx_mix_u64(uint32_t xor, uint64_t u64)
3121 {
3122 	xor ^= u64 >> 32;
3123 	xor ^= u64;
3124 
3125 	return (xor);
3126 }
3127 
3128 static uint32_t
3129 mcx_mix_u32(uint32_t xor, uint32_t u32)
3130 {
3131 	xor ^= u32;
3132 
3133 	return (xor);
3134 }
3135 
3136 static uint32_t
3137 mcx_mix_u8(uint32_t xor, uint8_t u8)
3138 {
3139 	xor ^= u8;
3140 
3141 	return (xor);
3142 }
3143 
3144 static uint8_t
3145 mcx_mix_done(uint32_t xor)
3146 {
3147 	xor ^= xor >> 16;
3148 	xor ^= xor >> 8;
3149 
3150 	return (xor);
3151 }
3152 
3153 static uint8_t
3154 mcx_xor(const void *buf, size_t len)
3155 {
3156 	const uint32_t *dwords = buf;
3157 	uint32_t xor = 0xff;
3158 	size_t i;
3159 
3160 	len /= sizeof(*dwords);
3161 
3162 	for (i = 0; i < len; i++)
3163 		xor ^= dwords[i];
3164 
3165 	return (mcx_mix_done(xor));
3166 }
3167 
3168 static uint8_t
3169 mcx_cmdq_token(struct mcx_softc *sc)
3170 {
3171 	uint8_t token;
3172 
3173 	mtx_enter(&sc->sc_cmdq_mtx);
3174 	do {
3175 		token = ++sc->sc_cmdq_token;
3176 	} while (token == 0);
3177 	mtx_leave(&sc->sc_cmdq_mtx);
3178 
3179 	return (token);
3180 }
3181 
3182 static struct mcx_cmdq_entry *
3183 mcx_get_cmdq_entry(struct mcx_softc *sc, enum mcx_cmdq_slot slot)
3184 {
3185 	struct mcx_cmdq_entry *cqe;
3186 
3187 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3188 	cqe += slot;
3189 
3190 	/* make sure the slot isn't running a command already */
3191 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3192 	    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3193 	if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) !=
3194 	    MCX_CQ_STATUS_OWN_SW)
3195 		cqe = NULL;
3196 
3197 	return (cqe);
3198 }
3199 
3200 static void
3201 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3202     uint32_t ilen, uint32_t olen, uint8_t token)
3203 {
3204 	memset(cqe, 0, sc->sc_cmdq_size);
3205 
3206 	cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
3207 	htobem32(&cqe->cq_input_length, ilen);
3208 	htobem32(&cqe->cq_output_length, olen);
3209 	cqe->cq_token = token;
3210 	cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
3211 }
3212 
3213 static void
3214 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
3215 {
3216 	cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
3217 }
3218 
3219 static int
3220 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
3221 {
3222 	/* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 :  0); */
3223 	return (0);
3224 }
3225 
3226 static void *
3227 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
3228 {
3229 	return (&cqe->cq_input_data);
3230 }
3231 
3232 static void *
3233 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
3234 {
3235 	return (&cqe->cq_output_data);
3236 }
3237 
3238 static void
3239 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3240     unsigned int slot)
3241 {
3242 	mcx_cmdq_sign(cqe);
3243 
3244 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3245 	    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
3246 
3247 	mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
3248 	mcx_bar(sc, MCX_CMDQ_DOORBELL, sizeof(uint32_t),
3249 	    BUS_SPACE_BARRIER_WRITE);
3250 }
3251 
3252 static int
3253 mcx_cmdq_exec(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3254     unsigned int slot, unsigned int msec)
3255 {
3256 	int err;
3257 
3258 	if (slot == MCX_CMDQ_SLOT_POLL) {
3259 		mcx_cmdq_post(sc, cqe, slot);
3260 		return (mcx_cmdq_poll(sc, cqe, msec));
3261 	}
3262 
3263 	mtx_enter(&sc->sc_cmdq_mtx);
3264 	mcx_cmdq_post(sc, cqe, slot);
3265 
3266 	err = 0;
3267 	while (err == 0) {
3268 		err = msleep_nsec(&sc->sc_cmdq_token, &sc->sc_cmdq_mtx, 0,
3269 		    "mcxcmd", msec * 1000);
3270 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem), 0,
3271 		    MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3272 		if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
3273 		    MCX_CQ_STATUS_OWN_SW) {
3274 			err = 0;
3275 			break;
3276 		}
3277 	}
3278 
3279 	mtx_leave(&sc->sc_cmdq_mtx);
3280 	return (err);
3281 }
3282 
3283 static int
3284 mcx_enable_hca(struct mcx_softc *sc)
3285 {
3286 	struct mcx_cmdq_entry *cqe;
3287 	struct mcx_cmd_enable_hca_in *in;
3288 	struct mcx_cmd_enable_hca_out *out;
3289 	int error;
3290 	uint8_t status;
3291 
3292 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3293 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3294 
3295 	in = mcx_cmdq_in(cqe);
3296 	in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
3297 	in->cmd_op_mod = htobe16(0);
3298 	in->cmd_function_id = htobe16(0);
3299 
3300 	mcx_cmdq_post(sc, cqe, 0);
3301 
3302 	error = mcx_cmdq_poll(sc, cqe, 1000);
3303 	if (error != 0) {
3304 		printf(", hca enable timeout\n");
3305 		return (-1);
3306 	}
3307 	if (mcx_cmdq_verify(cqe) != 0) {
3308 		printf(", hca enable command corrupt\n");
3309 		return (-1);
3310 	}
3311 
3312 	status = cqe->cq_output_data[0];
3313 	if (status != MCX_CQ_STATUS_OK) {
3314 		printf(", hca enable failed (%x)\n", status);
3315 		return (-1);
3316 	}
3317 
3318 	return (0);
3319 }
3320 
3321 static int
3322 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
3323 {
3324 	struct mcx_cmdq_entry *cqe;
3325 	struct mcx_cmd_teardown_hca_in *in;
3326 	struct mcx_cmd_teardown_hca_out *out;
3327 	int error;
3328 	uint8_t status;
3329 
3330 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3331 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3332 
3333 	in = mcx_cmdq_in(cqe);
3334 	in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
3335 	in->cmd_op_mod = htobe16(0);
3336 	in->cmd_profile = profile;
3337 
3338 	mcx_cmdq_post(sc, cqe, 0);
3339 
3340 	error = mcx_cmdq_poll(sc, cqe, 1000);
3341 	if (error != 0) {
3342 		printf(", hca teardown timeout\n");
3343 		return (-1);
3344 	}
3345 	if (mcx_cmdq_verify(cqe) != 0) {
3346 		printf(", hca teardown command corrupt\n");
3347 		return (-1);
3348 	}
3349 
3350 	status = cqe->cq_output_data[0];
3351 	if (status != MCX_CQ_STATUS_OK) {
3352 		printf(", hca teardown failed (%x)\n", status);
3353 		return (-1);
3354 	}
3355 
3356 	return (0);
3357 }
3358 
3359 static int
3360 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
3361     unsigned int nmb, uint64_t *ptr, uint8_t token)
3362 {
3363 	caddr_t kva;
3364 	uint64_t dva;
3365 	int i;
3366 	int error;
3367 
3368 	error = mcx_dmamem_alloc(sc, mxm,
3369 	    nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
3370 	if (error != 0)
3371 		return (error);
3372 
3373 	mcx_dmamem_zero(mxm);
3374 
3375 	dva = MCX_DMA_DVA(mxm);
3376 	kva = MCX_DMA_KVA(mxm);
3377 	for (i = 0; i < nmb; i++) {
3378 		struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
3379 
3380 		/* patch the cqe or mbox pointing at this one */
3381 		htobem64(ptr, dva);
3382 
3383 		/* fill in this mbox */
3384 		htobem32(&mbox->mb_block_number, i);
3385 		mbox->mb_token = token;
3386 
3387 		/* move to the next one */
3388 		ptr = &mbox->mb_next_ptr;
3389 
3390 		dva += MCX_CMDQ_MAILBOX_SIZE;
3391 		kva += MCX_CMDQ_MAILBOX_SIZE;
3392 	}
3393 
3394 	return (0);
3395 }
3396 
3397 static uint32_t
3398 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
3399 {
3400 	uint32_t xor = 0xff;
3401 
3402 	/* only 3 fields get set, so mix them directly */
3403 	xor = mcx_mix_u64(xor, mb->mb_next_ptr);
3404 	xor = mcx_mix_u32(xor, mb->mb_block_number);
3405 	xor = mcx_mix_u8(xor, mb->mb_token);
3406 
3407 	return (mcx_mix_done(xor));
3408 }
3409 
3410 static void
3411 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
3412 {
3413 	caddr_t kva;
3414 	int i;
3415 
3416 	kva = MCX_DMA_KVA(mxm);
3417 
3418 	for (i = 0; i < nmb; i++) {
3419 		struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
3420 		uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
3421 		mb->mb_ctrl_signature = sig;
3422 		mb->mb_signature = sig ^
3423 		    mcx_xor(mb->mb_data, sizeof(mb->mb_data));
3424 
3425 		kva += MCX_CMDQ_MAILBOX_SIZE;
3426 	}
3427 }
3428 
3429 static void
3430 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
3431 {
3432 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
3433 	    0, MCX_DMA_LEN(mxm), ops);
3434 }
3435 
3436 static struct mcx_cmdq_mailbox *
3437 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
3438 {
3439 	caddr_t kva;
3440 
3441 	kva = MCX_DMA_KVA(mxm);
3442 	kva += i * MCX_CMDQ_MAILBOX_SIZE;
3443 
3444 	return ((struct mcx_cmdq_mailbox *)kva);
3445 }
3446 
3447 static inline void *
3448 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
3449 {
3450 	return (&mb->mb_data);
3451 }
3452 
3453 static void
3454 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
3455     void *b, size_t len)
3456 {
3457 	caddr_t buf = b;
3458 	struct mcx_cmdq_mailbox *mb;
3459 	int i;
3460 
3461 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3462 	for (i = 0; i < nmb; i++) {
3463 
3464 		memcpy(mb->mb_data, buf, min(sizeof(mb->mb_data), len));
3465 
3466 		if (sizeof(mb->mb_data) >= len)
3467 			break;
3468 
3469 		buf += sizeof(mb->mb_data);
3470 		len -= sizeof(mb->mb_data);
3471 		mb++;
3472 	}
3473 }
3474 
3475 static void
3476 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages,
3477     struct mcx_dmamem *buf)
3478 {
3479 	uint64_t *pas;
3480 	int mbox, mbox_pages, i;
3481 
3482 	mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE;
3483 	offset %= MCX_CMDQ_MAILBOX_DATASIZE;
3484 
3485 	pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3486 	pas += (offset / sizeof(*pas));
3487 	mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas);
3488 	for (i = 0; i < npages; i++) {
3489 		if (i == mbox_pages) {
3490 			mbox++;
3491 			pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3492 			mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas);
3493 		}
3494 		*pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE));
3495 		pas++;
3496 	}
3497 }
3498 
3499 static void
3500 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
3501 {
3502 	caddr_t buf = b;
3503 	struct mcx_cmdq_mailbox *mb;
3504 	int i;
3505 
3506 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3507 	for (i = 0; i < nmb; i++) {
3508 		memcpy(buf, mb->mb_data, min(sizeof(mb->mb_data), len));
3509 
3510 		if (sizeof(mb->mb_data) >= len)
3511 			break;
3512 
3513 		buf += sizeof(mb->mb_data);
3514 		len -= sizeof(mb->mb_data);
3515 		mb++;
3516 	}
3517 }
3518 
3519 static void
3520 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
3521 {
3522 	mcx_dmamem_free(sc, mxm);
3523 }
3524 
3525 #if 0
3526 static void
3527 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
3528 {
3529 	unsigned int i;
3530 
3531 	printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
3532 	    bemtoh32(&cqe->cq_input_length), bemtoh64(&cqe->cq_input_ptr));
3533 
3534 	printf(", idata ");
3535 	for (i = 0; i < sizeof(cqe->cq_input_data); i++)
3536 		printf("%02x", cqe->cq_input_data[i]);
3537 
3538 	printf(", odata ");
3539 	for (i = 0; i < sizeof(cqe->cq_output_data); i++)
3540 		printf("%02x", cqe->cq_output_data[i]);
3541 
3542 	printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
3543 	    bemtoh64(&cqe->cq_output_ptr), bemtoh32(&cqe->cq_output_length),
3544 	    cqe->cq_token, cqe->cq_signature, cqe->cq_status);
3545 }
3546 
3547 static void
3548 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
3549 {
3550 	int i, j;
3551 	uint8_t *d;
3552 
3553 	for (i = 0; i < num; i++) {
3554 		struct mcx_cmdq_mailbox *mbox;
3555 		mbox = mcx_cq_mbox(mboxes, i);
3556 
3557 		d = mcx_cq_mbox_data(mbox);
3558 		for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
3559 			if (j != 0 && (j % 16 == 0))
3560 				printf("\n");
3561 			printf("%.2x ", d[j]);
3562 		}
3563 	}
3564 }
3565 #endif
3566 
3567 static int
3568 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
3569     int len, enum mcx_cmdq_slot slot)
3570 {
3571 	struct mcx_dmamem mxm;
3572 	struct mcx_cmdq_entry *cqe;
3573 	struct mcx_cmd_access_reg_in *in;
3574 	struct mcx_cmd_access_reg_out *out;
3575 	uint8_t token = mcx_cmdq_token(sc);
3576 	int error, nmb;
3577 
3578 	cqe = mcx_get_cmdq_entry(sc, slot);
3579 	if (cqe == NULL)
3580 		return (-1);
3581 
3582 	mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
3583 	    token);
3584 
3585 	in = mcx_cmdq_in(cqe);
3586 	in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
3587 	in->cmd_op_mod = htobe16(op);
3588 	in->cmd_register_id = htobe16(reg);
3589 
3590 	nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
3591 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3592 	    &cqe->cq_output_ptr, token) != 0) {
3593 		printf(", unable to allocate access reg mailboxen\n");
3594 		return (-1);
3595 	}
3596 	cqe->cq_input_ptr = cqe->cq_output_ptr;
3597 	mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
3598 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3599 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3600 
3601 	error = mcx_cmdq_exec(sc, cqe, slot, 1000);
3602 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3603 
3604 	if (error != 0) {
3605 		printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
3606 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
3607 		goto free;
3608 	}
3609 	error = mcx_cmdq_verify(cqe);
3610 	if (error != 0) {
3611 		printf("%s: access reg (%s %x) reply corrupt\n",
3612 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
3613 		    reg);
3614 		goto free;
3615 	}
3616 
3617 	out = mcx_cmdq_out(cqe);
3618 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3619 		printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
3620 		    DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
3621 		    reg, out->cmd_status, betoh32(out->cmd_syndrome));
3622 		error = -1;
3623 		goto free;
3624 	}
3625 
3626 	mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3627 free:
3628 	mcx_dmamem_free(sc, &mxm);
3629 
3630 	return (error);
3631 }
3632 
3633 static int
3634 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3635     unsigned int slot)
3636 {
3637 	struct mcx_cmd_set_issi_in *in;
3638 	struct mcx_cmd_set_issi_out *out;
3639 	uint8_t status;
3640 
3641 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3642 
3643 	in = mcx_cmdq_in(cqe);
3644 	in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3645 	in->cmd_op_mod = htobe16(0);
3646 	in->cmd_current_issi = htobe16(MCX_ISSI);
3647 
3648 	mcx_cmdq_post(sc, cqe, slot);
3649 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3650 		return (-1);
3651 	if (mcx_cmdq_verify(cqe) != 0)
3652 		return (-1);
3653 
3654 	status = cqe->cq_output_data[0];
3655 	if (status != MCX_CQ_STATUS_OK)
3656 		return (-1);
3657 
3658 	return (0);
3659 }
3660 
3661 static int
3662 mcx_issi(struct mcx_softc *sc)
3663 {
3664 	struct mcx_dmamem mxm;
3665 	struct mcx_cmdq_entry *cqe;
3666 	struct mcx_cmd_query_issi_in *in;
3667 	struct mcx_cmd_query_issi_il_out *out;
3668 	struct mcx_cmd_query_issi_mb_out *mb;
3669 	uint8_t token = mcx_cmdq_token(sc);
3670 	uint8_t status;
3671 	int error;
3672 
3673 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3674 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3675 
3676 	in = mcx_cmdq_in(cqe);
3677 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3678 	in->cmd_op_mod = htobe16(0);
3679 
3680 	CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3681 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3682 	    &cqe->cq_output_ptr, token) != 0) {
3683 		printf(", unable to allocate query issi mailbox\n");
3684 		return (-1);
3685 	}
3686 	mcx_cmdq_mboxes_sign(&mxm, 1);
3687 
3688 	mcx_cmdq_post(sc, cqe, 0);
3689 	error = mcx_cmdq_poll(sc, cqe, 1000);
3690 	if (error != 0) {
3691 		printf(", query issi timeout\n");
3692 		goto free;
3693 	}
3694 	error = mcx_cmdq_verify(cqe);
3695 	if (error != 0) {
3696 		printf(", query issi reply corrupt\n");
3697 		goto free;
3698 	}
3699 
3700 	status = cqe->cq_output_data[0];
3701 	switch (status) {
3702 	case MCX_CQ_STATUS_OK:
3703 		break;
3704 	case MCX_CQ_STATUS_BAD_OPCODE:
3705 		/* use ISSI 0 */
3706 		goto free;
3707 	default:
3708 		printf(", query issi failed (%x)\n", status);
3709 		error = -1;
3710 		goto free;
3711 	}
3712 
3713 	out = mcx_cmdq_out(cqe);
3714 	if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3715 		/* use ISSI 1 */
3716 		goto free;
3717 	}
3718 
3719 	/* don't need to read cqe anymore, can be used for SET ISSI */
3720 
3721 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3722 	CTASSERT(MCX_ISSI < NBBY);
3723 	 /* XXX math is hard */
3724 	if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3725 		/* use ISSI 0 */
3726 		goto free;
3727 	}
3728 
3729 	if (mcx_set_issi(sc, cqe, 0) != 0) {
3730 		/* ignore the error, just use ISSI 0 */
3731 	} else {
3732 		/* use ISSI 1 */
3733 	}
3734 
3735 free:
3736 	mcx_cq_mboxes_free(sc, &mxm);
3737 	return (error);
3738 }
3739 
3740 static int
3741 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3742     int32_t *npages, uint16_t *func_id)
3743 {
3744 	struct mcx_cmdq_entry *cqe;
3745 	struct mcx_cmd_query_pages_in *in;
3746 	struct mcx_cmd_query_pages_out *out;
3747 
3748 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3749 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3750 
3751 	in = mcx_cmdq_in(cqe);
3752 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3753 	in->cmd_op_mod = type;
3754 
3755 	mcx_cmdq_post(sc, cqe, 0);
3756 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3757 		printf(", query pages timeout\n");
3758 		return (-1);
3759 	}
3760 	if (mcx_cmdq_verify(cqe) != 0) {
3761 		printf(", query pages reply corrupt\n");
3762 		return (-1);
3763 	}
3764 
3765 	out = mcx_cmdq_out(cqe);
3766 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3767 		printf(", query pages failed (%x)\n", out->cmd_status);
3768 		return (-1);
3769 	}
3770 
3771 	*func_id = out->cmd_func_id;
3772 	*npages = bemtoh32(&out->cmd_num_pages);
3773 
3774 	return (0);
3775 }
3776 
3777 struct bus_dma_iter {
3778 	bus_dmamap_t		i_map;
3779 	bus_size_t		i_offset;
3780 	unsigned int		i_index;
3781 };
3782 
3783 static void
3784 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3785 {
3786 	i->i_map = map;
3787 	i->i_offset = 0;
3788 	i->i_index = 0;
3789 }
3790 
3791 static bus_addr_t
3792 bus_dma_iter_addr(struct bus_dma_iter *i)
3793 {
3794 	return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3795 }
3796 
3797 static void
3798 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3799 {
3800 	bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3801 	bus_size_t diff;
3802 
3803 	do {
3804 		diff = seg->ds_len - i->i_offset;
3805 		if (size < diff)
3806 			break;
3807 
3808 		size -= diff;
3809 
3810 		seg++;
3811 
3812 		i->i_offset = 0;
3813 		i->i_index++;
3814 	} while (size > 0);
3815 
3816 	i->i_offset += size;
3817 }
3818 
3819 static int
3820 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3821 {
3822 	struct mcx_dmamem mxm;
3823 	struct mcx_cmdq_entry *cqe;
3824 	struct mcx_cmd_manage_pages_in *in;
3825 	struct mcx_cmd_manage_pages_out *out;
3826 	unsigned int paslen, nmb, i, j, npages;
3827 	struct bus_dma_iter iter;
3828 	uint64_t *pas;
3829 	uint8_t status;
3830 	uint8_t token = mcx_cmdq_token(sc);
3831 	int error;
3832 
3833 	npages = mhm->mhm_npages;
3834 
3835 	paslen = sizeof(*pas) * npages;
3836 	nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3837 
3838 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3839 	mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3840 
3841 	in = mcx_cmdq_in(cqe);
3842 	in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3843 	in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3844 	in->cmd_func_id = func_id;
3845 	htobem32(&in->cmd_input_num_entries, npages);
3846 
3847 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3848 	    &cqe->cq_input_ptr, token) != 0) {
3849 		printf(", unable to allocate manage pages mailboxen\n");
3850 		return (-1);
3851 	}
3852 
3853 	bus_dma_iter_init(&iter, mhm->mhm_map);
3854 	for (i = 0; i < nmb; i++) {
3855 		unsigned int lim;
3856 
3857 		pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3858 		lim = min(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3859 
3860 		for (j = 0; j < lim; j++) {
3861 			htobem64(&pas[j], bus_dma_iter_addr(&iter));
3862 			bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3863 		}
3864 
3865 		npages -= lim;
3866 	}
3867 
3868 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3869 
3870 	mcx_cmdq_post(sc, cqe, 0);
3871 	error = mcx_cmdq_poll(sc, cqe, 1000);
3872 	if (error != 0) {
3873 		printf(", manage pages timeout\n");
3874 		goto free;
3875 	}
3876 	error = mcx_cmdq_verify(cqe);
3877 	if (error != 0) {
3878 		printf(", manage pages reply corrupt\n");
3879 		goto free;
3880 	}
3881 
3882 	status = cqe->cq_output_data[0];
3883 	if (status != MCX_CQ_STATUS_OK) {
3884 		printf(", manage pages failed (%x)\n", status);
3885 		error = -1;
3886 		goto free;
3887 	}
3888 
3889 free:
3890 	mcx_dmamem_free(sc, &mxm);
3891 
3892 	return (error);
3893 }
3894 
3895 static int
3896 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3897 {
3898 	int32_t npages;
3899 	uint16_t func_id;
3900 
3901 	if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3902 		/* error printed by mcx_query_pages */
3903 		return (-1);
3904 	}
3905 
3906 	if (npages < 1)
3907 		return (0);
3908 
3909 	if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3910 		printf(", unable to allocate hwmem\n");
3911 		return (-1);
3912 	}
3913 
3914 	if (mcx_add_pages(sc, mhm, func_id) != 0) {
3915 		printf(", unable to add hwmem\n");
3916 		goto free;
3917 	}
3918 
3919 	return (0);
3920 
3921 free:
3922 	mcx_hwmem_free(sc, mhm);
3923 
3924 	return (-1);
3925 }
3926 
3927 static int
3928 mcx_hca_max_caps(struct mcx_softc *sc)
3929 {
3930 	struct mcx_dmamem mxm;
3931 	struct mcx_cmdq_entry *cqe;
3932 	struct mcx_cmd_query_hca_cap_in *in;
3933 	struct mcx_cmd_query_hca_cap_out *out;
3934 	struct mcx_cmdq_mailbox *mb;
3935 	struct mcx_cap_device *hca;
3936 	uint8_t status;
3937 	uint8_t token = mcx_cmdq_token(sc);
3938 	int error;
3939 
3940 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3941 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3942 	    token);
3943 
3944 	in = mcx_cmdq_in(cqe);
3945 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3946 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3947 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
3948 
3949 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3950 	    &cqe->cq_output_ptr, token) != 0) {
3951 		printf(", unable to allocate query hca caps mailboxen\n");
3952 		return (-1);
3953 	}
3954 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3955 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3956 
3957 	mcx_cmdq_post(sc, cqe, 0);
3958 	error = mcx_cmdq_poll(sc, cqe, 1000);
3959 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3960 
3961 	if (error != 0) {
3962 		printf(", query hca caps timeout\n");
3963 		goto free;
3964 	}
3965 	error = mcx_cmdq_verify(cqe);
3966 	if (error != 0) {
3967 		printf(", query hca caps reply corrupt\n");
3968 		goto free;
3969 	}
3970 
3971 	status = cqe->cq_output_data[0];
3972 	if (status != MCX_CQ_STATUS_OK) {
3973 		printf(", query hca caps failed (%x)\n", status);
3974 		error = -1;
3975 		goto free;
3976 	}
3977 
3978 	mb = mcx_cq_mbox(&mxm, 0);
3979 	hca = mcx_cq_mbox_data(mb);
3980 
3981 	if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE)
3982 	    != MCX_CAP_DEVICE_PORT_TYPE_ETH) {
3983 		printf(", not in ethernet mode\n");
3984 		error = -1;
3985 		goto free;
3986 	}
3987 	if (hca->log_pg_sz > PAGE_SHIFT) {
3988 		printf(", minimum system page shift %u is too large\n",
3989 		    hca->log_pg_sz);
3990 		error = -1;
3991 		goto free;
3992 	}
3993 	/*
3994 	 * blueflame register is split into two buffers, and we must alternate
3995 	 * between the two of them.
3996 	 */
3997 	sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
3998 	sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size);
3999 
4000 	if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG)
4001 		sc->sc_mcam_reg = 1;
4002 
4003 	sc->sc_mhz = bemtoh32(&hca->device_frequency_mhz);
4004 	sc->sc_khz = bemtoh32(&hca->device_frequency_khz);
4005 
4006 free:
4007 	mcx_dmamem_free(sc, &mxm);
4008 
4009 	return (error);
4010 }
4011 
4012 static int
4013 mcx_hca_set_caps(struct mcx_softc *sc)
4014 {
4015 	struct mcx_dmamem mxm;
4016 	struct mcx_cmdq_entry *cqe;
4017 	struct mcx_cmd_query_hca_cap_in *in;
4018 	struct mcx_cmd_query_hca_cap_out *out;
4019 	struct mcx_cmdq_mailbox *mb;
4020 	struct mcx_cap_device *hca;
4021 	uint8_t status;
4022 	uint8_t token = mcx_cmdq_token(sc);
4023 	int error;
4024 
4025 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4026 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
4027 	    token);
4028 
4029 	in = mcx_cmdq_in(cqe);
4030 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
4031 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
4032 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
4033 
4034 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
4035 	    &cqe->cq_output_ptr, token) != 0) {
4036 		printf(", unable to allocate manage pages mailboxen\n");
4037 		return (-1);
4038 	}
4039 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
4040 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
4041 
4042 	mcx_cmdq_post(sc, cqe, 0);
4043 	error = mcx_cmdq_poll(sc, cqe, 1000);
4044 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
4045 
4046 	if (error != 0) {
4047 		printf(", query hca caps timeout\n");
4048 		goto free;
4049 	}
4050 	error = mcx_cmdq_verify(cqe);
4051 	if (error != 0) {
4052 		printf(", query hca caps reply corrupt\n");
4053 		goto free;
4054 	}
4055 
4056 	status = cqe->cq_output_data[0];
4057 	if (status != MCX_CQ_STATUS_OK) {
4058 		printf(", query hca caps failed (%x)\n", status);
4059 		error = -1;
4060 		goto free;
4061 	}
4062 
4063 	mb = mcx_cq_mbox(&mxm, 0);
4064 	hca = mcx_cq_mbox_data(mb);
4065 
4066 	hca->log_pg_sz = PAGE_SHIFT;
4067 
4068 free:
4069 	mcx_dmamem_free(sc, &mxm);
4070 
4071 	return (error);
4072 }
4073 
4074 
4075 static int
4076 mcx_init_hca(struct mcx_softc *sc)
4077 {
4078 	struct mcx_cmdq_entry *cqe;
4079 	struct mcx_cmd_init_hca_in *in;
4080 	struct mcx_cmd_init_hca_out *out;
4081 	int error;
4082 	uint8_t status;
4083 
4084 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4085 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4086 
4087 	in = mcx_cmdq_in(cqe);
4088 	in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
4089 	in->cmd_op_mod = htobe16(0);
4090 
4091 	mcx_cmdq_post(sc, cqe, 0);
4092 
4093 	error = mcx_cmdq_poll(sc, cqe, 1000);
4094 	if (error != 0) {
4095 		printf(", hca init timeout\n");
4096 		return (-1);
4097 	}
4098 	if (mcx_cmdq_verify(cqe) != 0) {
4099 		printf(", hca init command corrupt\n");
4100 		return (-1);
4101 	}
4102 
4103 	status = cqe->cq_output_data[0];
4104 	if (status != MCX_CQ_STATUS_OK) {
4105 		printf(", hca init failed (%x)\n", status);
4106 		return (-1);
4107 	}
4108 
4109 	return (0);
4110 }
4111 
4112 static int
4113 mcx_set_driver_version(struct mcx_softc *sc)
4114 {
4115 	struct mcx_dmamem mxm;
4116 	struct mcx_cmdq_entry *cqe;
4117 	struct mcx_cmd_set_driver_version_in *in;
4118 	struct mcx_cmd_set_driver_version_out *out;
4119 	int error;
4120 	int token;
4121 	uint8_t status;
4122 
4123 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4124 	token = mcx_cmdq_token(sc);
4125 	mcx_cmdq_init(sc, cqe, sizeof(*in) +
4126 	    sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
4127 
4128 	in = mcx_cmdq_in(cqe);
4129 	in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
4130 	in->cmd_op_mod = htobe16(0);
4131 
4132 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4133 	    &cqe->cq_input_ptr, token) != 0) {
4134 		printf(", unable to allocate set driver version mailboxen\n");
4135 		return (-1);
4136 	}
4137 	strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
4138 	    "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
4139 
4140 	mcx_cmdq_mboxes_sign(&mxm, 1);
4141 	mcx_cmdq_post(sc, cqe, 0);
4142 
4143 	error = mcx_cmdq_poll(sc, cqe, 1000);
4144 	if (error != 0) {
4145 		printf(", set driver version timeout\n");
4146 		goto free;
4147 	}
4148 	if (mcx_cmdq_verify(cqe) != 0) {
4149 		printf(", set driver version command corrupt\n");
4150 		goto free;
4151 	}
4152 
4153 	status = cqe->cq_output_data[0];
4154 	if (status != MCX_CQ_STATUS_OK) {
4155 		printf(", set driver version failed (%x)\n", status);
4156 		error = -1;
4157 		goto free;
4158 	}
4159 
4160 free:
4161 	mcx_dmamem_free(sc, &mxm);
4162 
4163 	return (error);
4164 }
4165 
4166 static int
4167 mcx_iff(struct mcx_softc *sc)
4168 {
4169 	struct ifnet *ifp = &sc->sc_ac.ac_if;
4170 	struct mcx_dmamem mxm;
4171 	struct mcx_cmdq_entry *cqe;
4172 	struct mcx_cmd_modify_nic_vport_context_in *in;
4173 	struct mcx_cmd_modify_nic_vport_context_out *out;
4174 	struct mcx_nic_vport_ctx *ctx;
4175 	int error;
4176 	int token;
4177 	int insize;
4178 	uint32_t dest;
4179 
4180 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
4181 	    sc->sc_rss_flow_table_id;
4182 
4183 	/* enable or disable the promisc flow */
4184 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
4185 		if (sc->sc_promisc_flow_enabled == 0) {
4186 			mcx_set_flow_table_entry_mac(sc,
4187 			    MCX_FLOW_GROUP_PROMISC, 0, NULL, dest);
4188 			sc->sc_promisc_flow_enabled = 1;
4189 		}
4190 	} else if (sc->sc_promisc_flow_enabled != 0) {
4191 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
4192 		sc->sc_promisc_flow_enabled = 0;
4193 	}
4194 
4195 	/* enable or disable the all-multicast flow */
4196 	if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
4197 		if (sc->sc_allmulti_flow_enabled == 0) {
4198 			uint8_t mcast[ETHER_ADDR_LEN];
4199 
4200 			memset(mcast, 0, sizeof(mcast));
4201 			mcast[0] = 0x01;
4202 			mcx_set_flow_table_entry_mac(sc,
4203 			    MCX_FLOW_GROUP_ALLMULTI, 0, mcast, dest);
4204 			sc->sc_allmulti_flow_enabled = 1;
4205 		}
4206 	} else if (sc->sc_allmulti_flow_enabled != 0) {
4207 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
4208 		sc->sc_allmulti_flow_enabled = 0;
4209 	}
4210 
4211 	insize = sizeof(struct mcx_nic_vport_ctx) + 240;
4212 
4213 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4214 	token = mcx_cmdq_token(sc);
4215 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4216 
4217 	in = mcx_cmdq_in(cqe);
4218 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
4219 	in->cmd_op_mod = htobe16(0);
4220 	in->cmd_field_select = htobe32(
4221 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
4222 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
4223 
4224 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4225 		printf(", unable to allocate modify "
4226 		    "nic vport context mailboxen\n");
4227 		return (-1);
4228 	}
4229 	ctx = (struct mcx_nic_vport_ctx *)
4230 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
4231 	ctx->vp_mtu = htobe32(sc->sc_hardmtu);
4232 	/*
4233          * always leave promisc-all enabled on the vport since we
4234          * can't give it a vlan list, and we're already doing multicast
4235          * filtering in the flow table.
4236 	 */
4237 	ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
4238 
4239 	mcx_cmdq_mboxes_sign(&mxm, 1);
4240 	mcx_cmdq_post(sc, cqe, 0);
4241 
4242 	error = mcx_cmdq_poll(sc, cqe, 1000);
4243 	if (error != 0) {
4244 		printf(", modify nic vport context timeout\n");
4245 		goto free;
4246 	}
4247 	if (mcx_cmdq_verify(cqe) != 0) {
4248 		printf(", modify nic vport context command corrupt\n");
4249 		goto free;
4250 	}
4251 
4252 	out = mcx_cmdq_out(cqe);
4253 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4254 		printf(", modify nic vport context failed (%x, %x)\n",
4255 		    out->cmd_status, betoh32(out->cmd_syndrome));
4256 		error = -1;
4257 		goto free;
4258 	}
4259 
4260 free:
4261 	mcx_dmamem_free(sc, &mxm);
4262 
4263 	return (error);
4264 }
4265 
4266 static int
4267 mcx_alloc_uar(struct mcx_softc *sc, int *uar)
4268 {
4269 	struct mcx_cmdq_entry *cqe;
4270 	struct mcx_cmd_alloc_uar_in *in;
4271 	struct mcx_cmd_alloc_uar_out *out;
4272 	int error;
4273 
4274 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4275 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4276 
4277 	in = mcx_cmdq_in(cqe);
4278 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
4279 	in->cmd_op_mod = htobe16(0);
4280 
4281 	mcx_cmdq_post(sc, cqe, 0);
4282 
4283 	error = mcx_cmdq_poll(sc, cqe, 1000);
4284 	if (error != 0) {
4285 		printf(", alloc uar timeout\n");
4286 		return (-1);
4287 	}
4288 	if (mcx_cmdq_verify(cqe) != 0) {
4289 		printf(", alloc uar command corrupt\n");
4290 		return (-1);
4291 	}
4292 
4293 	out = mcx_cmdq_out(cqe);
4294 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4295 		printf(", alloc uar failed (%x)\n", out->cmd_status);
4296 		return (-1);
4297 	}
4298 
4299 	*uar = mcx_get_id(out->cmd_uar);
4300 	return (0);
4301 }
4302 
4303 static int
4304 mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar,
4305     uint64_t events, int vector)
4306 {
4307 	struct mcx_cmdq_entry *cqe;
4308 	struct mcx_dmamem mxm;
4309 	struct mcx_cmd_create_eq_in *in;
4310 	struct mcx_cmd_create_eq_mb_in *mbin;
4311 	struct mcx_cmd_create_eq_out *out;
4312 	struct mcx_eq_entry *eqe;
4313 	int error;
4314 	uint64_t *pas;
4315 	int insize, npages, paslen, i, token;
4316 
4317 	eq->eq_cons = 0;
4318 
4319 	npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
4320 	    MCX_PAGE_SIZE);
4321 	paslen = npages * sizeof(*pas);
4322 	insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
4323 
4324 	if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE,
4325 	    MCX_PAGE_SIZE) != 0) {
4326 		printf(", unable to allocate event queue memory\n");
4327 		return (-1);
4328 	}
4329 
4330 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
4331 	for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
4332 		eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
4333 	}
4334 
4335 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4336 	token = mcx_cmdq_token(sc);
4337 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4338 
4339 	in = mcx_cmdq_in(cqe);
4340 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
4341 	in->cmd_op_mod = htobe16(0);
4342 
4343 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4344 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4345 	    &cqe->cq_input_ptr, token) != 0) {
4346 		printf(", unable to allocate create eq mailboxen\n");
4347 		goto free_eq;
4348 	}
4349 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4350 	mbin->cmd_eq_ctx.eq_uar_size = htobe32(
4351 	    (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar);
4352 	mbin->cmd_eq_ctx.eq_intr = vector;
4353 	mbin->cmd_event_bitmask = htobe64(events);
4354 
4355 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4356 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
4357 
4358 	/* physical addresses follow the mailbox in data */
4359 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem);
4360 	mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
4361 	mcx_cmdq_post(sc, cqe, 0);
4362 
4363 	error = mcx_cmdq_poll(sc, cqe, 1000);
4364 	if (error != 0) {
4365 		printf(", create eq timeout\n");
4366 		goto free_mxm;
4367 	}
4368 	if (mcx_cmdq_verify(cqe) != 0) {
4369 		printf(", create eq command corrupt\n");
4370 		goto free_mxm;
4371 	}
4372 
4373 	out = mcx_cmdq_out(cqe);
4374 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4375 		printf(", create eq failed (%x, %x)\n", out->cmd_status,
4376 		    betoh32(out->cmd_syndrome));
4377 		goto free_mxm;
4378 	}
4379 
4380 	eq->eq_n = mcx_get_id(out->cmd_eqn);
4381 
4382 	mcx_dmamem_free(sc, &mxm);
4383 
4384 	mcx_arm_eq(sc, eq, uar);
4385 
4386 	return (0);
4387 
4388 free_mxm:
4389 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4390 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
4391 	mcx_dmamem_free(sc, &mxm);
4392 free_eq:
4393 	mcx_dmamem_free(sc, &eq->eq_mem);
4394 	return (-1);
4395 }
4396 
4397 static int
4398 mcx_alloc_pd(struct mcx_softc *sc)
4399 {
4400 	struct mcx_cmdq_entry *cqe;
4401 	struct mcx_cmd_alloc_pd_in *in;
4402 	struct mcx_cmd_alloc_pd_out *out;
4403 	int error;
4404 
4405 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4406 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4407 
4408 	in = mcx_cmdq_in(cqe);
4409 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
4410 	in->cmd_op_mod = htobe16(0);
4411 
4412 	mcx_cmdq_post(sc, cqe, 0);
4413 
4414 	error = mcx_cmdq_poll(sc, cqe, 1000);
4415 	if (error != 0) {
4416 		printf(", alloc pd timeout\n");
4417 		return (-1);
4418 	}
4419 	if (mcx_cmdq_verify(cqe) != 0) {
4420 		printf(", alloc pd command corrupt\n");
4421 		return (-1);
4422 	}
4423 
4424 	out = mcx_cmdq_out(cqe);
4425 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4426 		printf(", alloc pd failed (%x)\n", out->cmd_status);
4427 		return (-1);
4428 	}
4429 
4430 	sc->sc_pd = mcx_get_id(out->cmd_pd);
4431 	return (0);
4432 }
4433 
4434 static int
4435 mcx_alloc_tdomain(struct mcx_softc *sc)
4436 {
4437 	struct mcx_cmdq_entry *cqe;
4438 	struct mcx_cmd_alloc_td_in *in;
4439 	struct mcx_cmd_alloc_td_out *out;
4440 	int error;
4441 
4442 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4443 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4444 
4445 	in = mcx_cmdq_in(cqe);
4446 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
4447 	in->cmd_op_mod = htobe16(0);
4448 
4449 	mcx_cmdq_post(sc, cqe, 0);
4450 
4451 	error = mcx_cmdq_poll(sc, cqe, 1000);
4452 	if (error != 0) {
4453 		printf(", alloc transport domain timeout\n");
4454 		return (-1);
4455 	}
4456 	if (mcx_cmdq_verify(cqe) != 0) {
4457 		printf(", alloc transport domain command corrupt\n");
4458 		return (-1);
4459 	}
4460 
4461 	out = mcx_cmdq_out(cqe);
4462 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4463 		printf(", alloc transport domain failed (%x)\n",
4464 		    out->cmd_status);
4465 		return (-1);
4466 	}
4467 
4468 	sc->sc_tdomain = mcx_get_id(out->cmd_tdomain);
4469 	return (0);
4470 }
4471 
4472 static int
4473 mcx_query_nic_vport_context(struct mcx_softc *sc)
4474 {
4475 	struct mcx_dmamem mxm;
4476 	struct mcx_cmdq_entry *cqe;
4477 	struct mcx_cmd_query_nic_vport_context_in *in;
4478 	struct mcx_cmd_query_nic_vport_context_out *out;
4479 	struct mcx_nic_vport_ctx *ctx;
4480 	uint8_t *addr;
4481 	int error, token, i;
4482 
4483 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4484 	token = mcx_cmdq_token(sc);
4485 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
4486 
4487 	in = mcx_cmdq_in(cqe);
4488 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
4489 	in->cmd_op_mod = htobe16(0);
4490 	in->cmd_allowed_list_type = 0;
4491 
4492 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4493 	    &cqe->cq_output_ptr, token) != 0) {
4494 		printf(", unable to allocate "
4495 		    "query nic vport context mailboxen\n");
4496 		return (-1);
4497 	}
4498 	mcx_cmdq_mboxes_sign(&mxm, 1);
4499 	mcx_cmdq_post(sc, cqe, 0);
4500 
4501 	error = mcx_cmdq_poll(sc, cqe, 1000);
4502 	if (error != 0) {
4503 		printf(", query nic vport context timeout\n");
4504 		goto free;
4505 	}
4506 	if (mcx_cmdq_verify(cqe) != 0) {
4507 		printf(", query nic vport context command corrupt\n");
4508 		goto free;
4509 	}
4510 
4511 	out = mcx_cmdq_out(cqe);
4512 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4513 		printf(", query nic vport context failed (%x, %x)\n",
4514 		    out->cmd_status, betoh32(out->cmd_syndrome));
4515 		error = -1;
4516 		goto free;
4517 	}
4518 
4519 	ctx = (struct mcx_nic_vport_ctx *)
4520 	    mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4521 	addr = (uint8_t *)&ctx->vp_perm_addr;
4522 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
4523 		sc->sc_ac.ac_enaddr[i] = addr[i + 2];
4524 	}
4525 free:
4526 	mcx_dmamem_free(sc, &mxm);
4527 
4528 	return (error);
4529 }
4530 
4531 static int
4532 mcx_query_special_contexts(struct mcx_softc *sc)
4533 {
4534 	struct mcx_cmdq_entry *cqe;
4535 	struct mcx_cmd_query_special_ctx_in *in;
4536 	struct mcx_cmd_query_special_ctx_out *out;
4537 	int error;
4538 
4539 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4540 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4541 
4542 	in = mcx_cmdq_in(cqe);
4543 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
4544 	in->cmd_op_mod = htobe16(0);
4545 
4546 	mcx_cmdq_post(sc, cqe, 0);
4547 
4548 	error = mcx_cmdq_poll(sc, cqe, 1000);
4549 	if (error != 0) {
4550 		printf(", query special contexts timeout\n");
4551 		return (-1);
4552 	}
4553 	if (mcx_cmdq_verify(cqe) != 0) {
4554 		printf(", query special contexts command corrupt\n");
4555 		return (-1);
4556 	}
4557 
4558 	out = mcx_cmdq_out(cqe);
4559 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4560 		printf(", query special contexts failed (%x)\n",
4561 		    out->cmd_status);
4562 		return (-1);
4563 	}
4564 
4565 	sc->sc_lkey = betoh32(out->cmd_resd_lkey);
4566 	return (0);
4567 }
4568 
4569 static int
4570 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
4571 {
4572 	struct mcx_reg_pmtu pmtu;
4573 	int error;
4574 
4575 	/* read max mtu */
4576 	memset(&pmtu, 0, sizeof(pmtu));
4577 	pmtu.rp_local_port = 1;
4578 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
4579 	    sizeof(pmtu), MCX_CMDQ_SLOT_POLL);
4580 	if (error != 0) {
4581 		printf(", unable to get port MTU\n");
4582 		return error;
4583 	}
4584 
4585 	mtu = min(mtu, betoh16(pmtu.rp_max_mtu));
4586 	pmtu.rp_admin_mtu = htobe16(mtu);
4587 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
4588 	    sizeof(pmtu), MCX_CMDQ_SLOT_POLL);
4589 	if (error != 0) {
4590 		printf(", unable to set port MTU\n");
4591 		return error;
4592 	}
4593 
4594 	sc->sc_hardmtu = mtu;
4595 	sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long));
4596 	return 0;
4597 }
4598 
4599 static int
4600 mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn)
4601 {
4602 	struct mcx_cmdq_entry *cmde;
4603 	struct mcx_cq_entry *cqe;
4604 	struct mcx_dmamem mxm;
4605 	struct mcx_cmd_create_cq_in *in;
4606 	struct mcx_cmd_create_cq_mb_in *mbin;
4607 	struct mcx_cmd_create_cq_out *out;
4608 	int error;
4609 	uint64_t *pas;
4610 	int insize, npages, paslen, i, token;
4611 
4612 	cq->cq_doorbell = MCX_CQ_DOORBELL_BASE + (MCX_CQ_DOORBELL_STRIDE * db);
4613 
4614 	npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
4615 	    MCX_PAGE_SIZE);
4616 	paslen = npages * sizeof(*pas);
4617 	insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
4618 
4619 	if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
4620 	    MCX_PAGE_SIZE) != 0) {
4621 		printf("%s: unable to allocate completion queue memory\n",
4622 		    DEVNAME(sc));
4623 		return (-1);
4624 	}
4625 	cqe = MCX_DMA_KVA(&cq->cq_mem);
4626 	for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
4627 		cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
4628 	}
4629 
4630 	cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4631 	token = mcx_cmdq_token(sc);
4632 	mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
4633 
4634 	in = mcx_cmdq_in(cmde);
4635 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
4636 	in->cmd_op_mod = htobe16(0);
4637 
4638 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4639 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4640 	    &cmde->cq_input_ptr, token) != 0) {
4641 		printf("%s: unable to allocate create cq mailboxen\n",
4642 		    DEVNAME(sc));
4643 		goto free_cq;
4644 	}
4645 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4646 	mbin->cmd_cq_ctx.cq_uar_size = htobe32(
4647 	    (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar);
4648 	mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4649 	mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4650 	    (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4651 	    MCX_CQ_MOD_COUNTER);
4652 	mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4653 	    MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell);
4654 
4655 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4656 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
4657 
4658 	/* physical addresses follow the mailbox in data */
4659 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem);
4660 	mcx_cmdq_post(sc, cmde, 0);
4661 
4662 	error = mcx_cmdq_poll(sc, cmde, 1000);
4663 	if (error != 0) {
4664 		printf("%s: create cq timeout\n", DEVNAME(sc));
4665 		goto free_mxm;
4666 	}
4667 	if (mcx_cmdq_verify(cmde) != 0) {
4668 		printf("%s: create cq command corrupt\n", DEVNAME(sc));
4669 		goto free_mxm;
4670 	}
4671 
4672 	out = mcx_cmdq_out(cmde);
4673 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4674 		printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4675 		    out->cmd_status, betoh32(out->cmd_syndrome));
4676 		goto free_mxm;
4677 	}
4678 
4679 	cq->cq_n = mcx_get_id(out->cmd_cqn);
4680 	cq->cq_cons = 0;
4681 	cq->cq_count = 0;
4682 
4683 	mcx_dmamem_free(sc, &mxm);
4684 
4685 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4686 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4687 	    BUS_DMASYNC_PREWRITE);
4688 
4689 	mcx_arm_cq(sc, cq, uar);
4690 
4691 	return (0);
4692 
4693 free_mxm:
4694 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4695 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4696 	mcx_dmamem_free(sc, &mxm);
4697 free_cq:
4698 	mcx_dmamem_free(sc, &cq->cq_mem);
4699 	return (-1);
4700 }
4701 
4702 static int
4703 mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq)
4704 {
4705 	struct mcx_cmdq_entry *cqe;
4706 	struct mcx_cmd_destroy_cq_in *in;
4707 	struct mcx_cmd_destroy_cq_out *out;
4708 	int error;
4709 	int token;
4710 
4711 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4712 	token = mcx_cmdq_token(sc);
4713 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4714 
4715 	in = mcx_cmdq_in(cqe);
4716 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4717 	in->cmd_op_mod = htobe16(0);
4718 	in->cmd_cqn = htobe32(cq->cq_n);
4719 
4720 	mcx_cmdq_post(sc, cqe, 0);
4721 	error = mcx_cmdq_poll(sc, cqe, 1000);
4722 	if (error != 0) {
4723 		printf("%s: destroy cq timeout\n", DEVNAME(sc));
4724 		return error;
4725 	}
4726 	if (mcx_cmdq_verify(cqe) != 0) {
4727 		printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4728 		return error;
4729 	}
4730 
4731 	out = mcx_cmdq_out(cqe);
4732 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4733 		printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4734 		    out->cmd_status, betoh32(out->cmd_syndrome));
4735 		return -1;
4736 	}
4737 
4738 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4739 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4740 	    BUS_DMASYNC_POSTWRITE);
4741 
4742 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4743 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4744 	mcx_dmamem_free(sc, &cq->cq_mem);
4745 
4746 	cq->cq_n = 0;
4747 	cq->cq_cons = 0;
4748 	cq->cq_count = 0;
4749 	return 0;
4750 }
4751 
4752 static int
4753 mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn)
4754 {
4755 	struct mcx_cmdq_entry *cqe;
4756 	struct mcx_dmamem mxm;
4757 	struct mcx_cmd_create_rq_in *in;
4758 	struct mcx_cmd_create_rq_out *out;
4759 	struct mcx_rq_ctx *mbin;
4760 	int error;
4761 	uint64_t *pas;
4762 	uint32_t rq_flags;
4763 	int insize, npages, paslen, token;
4764 
4765 	rx->rx_doorbell = MCX_WQ_DOORBELL_BASE +
4766 	    (db * MCX_WQ_DOORBELL_STRIDE);
4767 
4768 	npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4769 	    MCX_PAGE_SIZE);
4770 	paslen = npages * sizeof(*pas);
4771 	insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4772 
4773 	if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE,
4774 	    MCX_PAGE_SIZE) != 0) {
4775 		printf("%s: unable to allocate receive queue memory\n",
4776 		    DEVNAME(sc));
4777 		return (-1);
4778 	}
4779 
4780 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4781 	token = mcx_cmdq_token(sc);
4782 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4783 
4784 	in = mcx_cmdq_in(cqe);
4785 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4786 	in->cmd_op_mod = htobe16(0);
4787 
4788 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4789 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4790 	    &cqe->cq_input_ptr, token) != 0) {
4791 		printf("%s: unable to allocate create rq mailboxen\n",
4792 		    DEVNAME(sc));
4793 		goto free_rq;
4794 	}
4795 	mbin = (struct mcx_rq_ctx *)
4796 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4797 	rq_flags = MCX_RQ_CTX_RLKEY;
4798 #if NVLAN == 0
4799 	rq_flags |= MCX_RQ_CTX_VLAN_STRIP_DIS;
4800 #endif
4801 	mbin->rq_flags = htobe32(rq_flags);
4802 	mbin->rq_cqn = htobe32(cqn);
4803 	mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4804 	mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4805 	mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4806 	    rx->rx_doorbell);
4807 	mbin->rq_wq.wq_log_stride = htobe16(4);
4808 	mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4809 
4810 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4811 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
4812 
4813 	/* physical addresses follow the mailbox in data */
4814 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem);
4815 	mcx_cmdq_post(sc, cqe, 0);
4816 
4817 	error = mcx_cmdq_poll(sc, cqe, 1000);
4818 	if (error != 0) {
4819 		printf("%s: create rq timeout\n", DEVNAME(sc));
4820 		goto free_mxm;
4821 	}
4822 	if (mcx_cmdq_verify(cqe) != 0) {
4823 		printf("%s: create rq command corrupt\n", DEVNAME(sc));
4824 		goto free_mxm;
4825 	}
4826 
4827 	out = mcx_cmdq_out(cqe);
4828 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4829 		printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4830 		    out->cmd_status, betoh32(out->cmd_syndrome));
4831 		goto free_mxm;
4832 	}
4833 
4834 	rx->rx_rqn = mcx_get_id(out->cmd_rqn);
4835 
4836 	mcx_dmamem_free(sc, &mxm);
4837 
4838 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4839 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
4840 
4841 	return (0);
4842 
4843 free_mxm:
4844 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4845 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4846 	mcx_dmamem_free(sc, &mxm);
4847 free_rq:
4848 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
4849 	return (-1);
4850 }
4851 
4852 static int
4853 mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4854 {
4855 	struct mcx_cmdq_entry *cqe;
4856 	struct mcx_dmamem mxm;
4857 	struct mcx_cmd_modify_rq_in *in;
4858 	struct mcx_cmd_modify_rq_mb_in *mbin;
4859 	struct mcx_cmd_modify_rq_out *out;
4860 	int error;
4861 	int token;
4862 
4863 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4864 	token = mcx_cmdq_token(sc);
4865 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4866 	    sizeof(*out), token);
4867 
4868 	in = mcx_cmdq_in(cqe);
4869 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4870 	in->cmd_op_mod = htobe16(0);
4871 	in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn);
4872 
4873 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4874 	    &cqe->cq_input_ptr, token) != 0) {
4875 		printf("%s: unable to allocate modify rq mailbox\n",
4876 		    DEVNAME(sc));
4877 		return (-1);
4878 	}
4879 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4880 	mbin->cmd_rq_ctx.rq_flags = htobe32(
4881 	    MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4882 
4883 	mcx_cmdq_mboxes_sign(&mxm, 1);
4884 	mcx_cmdq_post(sc, cqe, 0);
4885 	error = mcx_cmdq_poll(sc, cqe, 1000);
4886 	if (error != 0) {
4887 		printf("%s: modify rq timeout\n", DEVNAME(sc));
4888 		goto free;
4889 	}
4890 	if (mcx_cmdq_verify(cqe) != 0) {
4891 		printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4892 		goto free;
4893 	}
4894 
4895 	out = mcx_cmdq_out(cqe);
4896 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4897 		printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4898 		    out->cmd_status, betoh32(out->cmd_syndrome));
4899 		error = -1;
4900 		goto free;
4901 	}
4902 
4903 free:
4904 	mcx_dmamem_free(sc, &mxm);
4905 	return (error);
4906 }
4907 
4908 static int
4909 mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4910 {
4911 	struct mcx_cmdq_entry *cqe;
4912 	struct mcx_cmd_destroy_rq_in *in;
4913 	struct mcx_cmd_destroy_rq_out *out;
4914 	int error;
4915 	int token;
4916 
4917 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4918 	token = mcx_cmdq_token(sc);
4919 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4920 
4921 	in = mcx_cmdq_in(cqe);
4922 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4923 	in->cmd_op_mod = htobe16(0);
4924 	in->cmd_rqn = htobe32(rx->rx_rqn);
4925 
4926 	mcx_cmdq_post(sc, cqe, 0);
4927 	error = mcx_cmdq_poll(sc, cqe, 1000);
4928 	if (error != 0) {
4929 		printf("%s: destroy rq timeout\n", DEVNAME(sc));
4930 		return error;
4931 	}
4932 	if (mcx_cmdq_verify(cqe) != 0) {
4933 		printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4934 		return error;
4935 	}
4936 
4937 	out = mcx_cmdq_out(cqe);
4938 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4939 		printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4940 		    out->cmd_status, betoh32(out->cmd_syndrome));
4941 		return -1;
4942 	}
4943 
4944 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4945 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
4946 
4947 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4948 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4949 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
4950 
4951 	rx->rx_rqn = 0;
4952 	return 0;
4953 }
4954 
4955 static int
4956 mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn)
4957 {
4958 	struct mcx_cmdq_entry *cqe;
4959 	struct mcx_dmamem mxm;
4960 	struct mcx_cmd_create_tir_in *in;
4961 	struct mcx_cmd_create_tir_mb_in *mbin;
4962 	struct mcx_cmd_create_tir_out *out;
4963 	int error;
4964 	int token;
4965 
4966 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4967 	token = mcx_cmdq_token(sc);
4968 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4969 	    sizeof(*out), token);
4970 
4971 	in = mcx_cmdq_in(cqe);
4972 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
4973 	in->cmd_op_mod = htobe16(0);
4974 
4975 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4976 	    &cqe->cq_input_ptr, token) != 0) {
4977 		printf("%s: unable to allocate create tir mailbox\n",
4978 		    DEVNAME(sc));
4979 		return (-1);
4980 	}
4981 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4982 	/* leave disp_type = 0, so packets get sent to the inline rqn */
4983 	mbin->cmd_inline_rqn = htobe32(rx->rx_rqn);
4984 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
4985 
4986 	mcx_cmdq_post(sc, cqe, 0);
4987 	error = mcx_cmdq_poll(sc, cqe, 1000);
4988 	if (error != 0) {
4989 		printf("%s: create tir timeout\n", DEVNAME(sc));
4990 		goto free;
4991 	}
4992 	if (mcx_cmdq_verify(cqe) != 0) {
4993 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
4994 		goto free;
4995 	}
4996 
4997 	out = mcx_cmdq_out(cqe);
4998 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4999 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5000 		    out->cmd_status, betoh32(out->cmd_syndrome));
5001 		error = -1;
5002 		goto free;
5003 	}
5004 
5005 	*tirn = mcx_get_id(out->cmd_tirn);
5006 free:
5007 	mcx_dmamem_free(sc, &mxm);
5008 	return (error);
5009 }
5010 
5011 static int
5012 mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel,
5013     int *tirn)
5014 {
5015 	struct mcx_cmdq_entry *cqe;
5016 	struct mcx_dmamem mxm;
5017 	struct mcx_cmd_create_tir_in *in;
5018 	struct mcx_cmd_create_tir_mb_in *mbin;
5019 	struct mcx_cmd_create_tir_out *out;
5020 	int error;
5021 	int token;
5022 
5023 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5024 	token = mcx_cmdq_token(sc);
5025 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5026 	    sizeof(*out), token);
5027 
5028 	in = mcx_cmdq_in(cqe);
5029 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
5030 	in->cmd_op_mod = htobe16(0);
5031 
5032 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5033 	    &cqe->cq_input_ptr, token) != 0) {
5034 		printf("%s: unable to allocate create tir mailbox\n",
5035 		    DEVNAME(sc));
5036 		return (-1);
5037 	}
5038 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5039 	mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT
5040 	    << MCX_TIR_CTX_DISP_TYPE_SHIFT);
5041 	mbin->cmd_indir_table = htobe32(rqtn);
5042 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain |
5043 	    MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT);
5044 	mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel);
5045 	stoeplitz_to_key(&mbin->cmd_rx_hash_key,
5046 	    sizeof(mbin->cmd_rx_hash_key));
5047 
5048 	mcx_cmdq_post(sc, cqe, 0);
5049 	error = mcx_cmdq_poll(sc, cqe, 1000);
5050 	if (error != 0) {
5051 		printf("%s: create tir timeout\n", DEVNAME(sc));
5052 		goto free;
5053 	}
5054 	if (mcx_cmdq_verify(cqe) != 0) {
5055 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
5056 		goto free;
5057 	}
5058 
5059 	out = mcx_cmdq_out(cqe);
5060 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5061 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5062 		    out->cmd_status, betoh32(out->cmd_syndrome));
5063 		error = -1;
5064 		goto free;
5065 	}
5066 
5067 	*tirn = mcx_get_id(out->cmd_tirn);
5068 free:
5069 	mcx_dmamem_free(sc, &mxm);
5070 	return (error);
5071 }
5072 
5073 static int
5074 mcx_destroy_tir(struct mcx_softc *sc, int tirn)
5075 {
5076 	struct mcx_cmdq_entry *cqe;
5077 	struct mcx_cmd_destroy_tir_in *in;
5078 	struct mcx_cmd_destroy_tir_out *out;
5079 	int error;
5080 	int token;
5081 
5082 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5083 	token = mcx_cmdq_token(sc);
5084 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5085 
5086 	in = mcx_cmdq_in(cqe);
5087 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
5088 	in->cmd_op_mod = htobe16(0);
5089 	in->cmd_tirn = htobe32(tirn);
5090 
5091 	mcx_cmdq_post(sc, cqe, 0);
5092 	error = mcx_cmdq_poll(sc, cqe, 1000);
5093 	if (error != 0) {
5094 		printf("%s: destroy tir timeout\n", DEVNAME(sc));
5095 		return error;
5096 	}
5097 	if (mcx_cmdq_verify(cqe) != 0) {
5098 		printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
5099 		return error;
5100 	}
5101 
5102 	out = mcx_cmdq_out(cqe);
5103 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5104 		printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
5105 		    out->cmd_status, betoh32(out->cmd_syndrome));
5106 		return -1;
5107 	}
5108 
5109 	return (0);
5110 }
5111 
5112 static int
5113 mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db,
5114     int cqn)
5115 {
5116 	struct mcx_cmdq_entry *cqe;
5117 	struct mcx_dmamem mxm;
5118 	struct mcx_cmd_create_sq_in *in;
5119 	struct mcx_sq_ctx *mbin;
5120 	struct mcx_cmd_create_sq_out *out;
5121 	int error;
5122 	uint64_t *pas;
5123 	int insize, npages, paslen, token;
5124 
5125 	tx->tx_doorbell = MCX_WQ_DOORBELL_BASE +
5126 	    (db * MCX_WQ_DOORBELL_STRIDE) + 4;
5127 
5128 	npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
5129 	    MCX_PAGE_SIZE);
5130 	paslen = npages * sizeof(*pas);
5131 	insize = sizeof(struct mcx_sq_ctx) + paslen;
5132 
5133 	if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE,
5134 	    MCX_PAGE_SIZE) != 0) {
5135 		printf("%s: unable to allocate send queue memory\n",
5136 		    DEVNAME(sc));
5137 		return (-1);
5138 	}
5139 
5140 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5141 	token = mcx_cmdq_token(sc);
5142 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
5143 	    token);
5144 
5145 	in = mcx_cmdq_in(cqe);
5146 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
5147 	in->cmd_op_mod = htobe16(0);
5148 
5149 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
5150 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
5151 	    &cqe->cq_input_ptr, token) != 0) {
5152 		printf("%s: unable to allocate create sq mailboxen\n",
5153 		    DEVNAME(sc));
5154 		goto free_sq;
5155 	}
5156 	mbin = (struct mcx_sq_ctx *)
5157 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
5158 	mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
5159 	    (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
5160 	mbin->sq_cqn = htobe32(cqn);
5161 	mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
5162 	mbin->sq_tis_num = htobe32(sc->sc_tis);
5163 	mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
5164 	mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
5165 	mbin->sq_wq.wq_uar_page = htobe32(uar);
5166 	mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
5167 	    tx->tx_doorbell);
5168 	mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
5169 	mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
5170 
5171 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5172 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
5173 
5174 	/* physical addresses follow the mailbox in data */
5175 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10,
5176 	    npages, &tx->tx_sq_mem);
5177 	mcx_cmdq_post(sc, cqe, 0);
5178 
5179 	error = mcx_cmdq_poll(sc, cqe, 1000);
5180 	if (error != 0) {
5181 		printf("%s: create sq timeout\n", DEVNAME(sc));
5182 		goto free_mxm;
5183 	}
5184 	if (mcx_cmdq_verify(cqe) != 0) {
5185 		printf("%s: create sq command corrupt\n", DEVNAME(sc));
5186 		goto free_mxm;
5187 	}
5188 
5189 	out = mcx_cmdq_out(cqe);
5190 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5191 		printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
5192 		    out->cmd_status, betoh32(out->cmd_syndrome));
5193 		goto free_mxm;
5194 	}
5195 
5196 	tx->tx_uar = uar;
5197 	tx->tx_sqn = mcx_get_id(out->cmd_sqn);
5198 
5199 	mcx_dmamem_free(sc, &mxm);
5200 
5201 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5202 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
5203 
5204 	return (0);
5205 
5206 free_mxm:
5207 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5208 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5209 	mcx_dmamem_free(sc, &mxm);
5210 free_sq:
5211 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
5212 	return (-1);
5213 }
5214 
5215 static int
5216 mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5217 {
5218 	struct mcx_cmdq_entry *cqe;
5219 	struct mcx_cmd_destroy_sq_in *in;
5220 	struct mcx_cmd_destroy_sq_out *out;
5221 	int error;
5222 	int token;
5223 
5224 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5225 	token = mcx_cmdq_token(sc);
5226 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5227 
5228 	in = mcx_cmdq_in(cqe);
5229 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
5230 	in->cmd_op_mod = htobe16(0);
5231 	in->cmd_sqn = htobe32(tx->tx_sqn);
5232 
5233 	mcx_cmdq_post(sc, cqe, 0);
5234 	error = mcx_cmdq_poll(sc, cqe, 1000);
5235 	if (error != 0) {
5236 		printf("%s: destroy sq timeout\n", DEVNAME(sc));
5237 		return error;
5238 	}
5239 	if (mcx_cmdq_verify(cqe) != 0) {
5240 		printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
5241 		return error;
5242 	}
5243 
5244 	out = mcx_cmdq_out(cqe);
5245 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5246 		printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
5247 		    out->cmd_status, betoh32(out->cmd_syndrome));
5248 		return -1;
5249 	}
5250 
5251 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5252 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
5253 
5254 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5255 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5256 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
5257 
5258 	tx->tx_sqn = 0;
5259 	return 0;
5260 }
5261 
5262 static int
5263 mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5264 {
5265 	struct mcx_cmdq_entry *cqe;
5266 	struct mcx_dmamem mxm;
5267 	struct mcx_cmd_modify_sq_in *in;
5268 	struct mcx_cmd_modify_sq_mb_in *mbin;
5269 	struct mcx_cmd_modify_sq_out *out;
5270 	int error;
5271 	int token;
5272 
5273 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5274 	token = mcx_cmdq_token(sc);
5275 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5276 	    sizeof(*out), token);
5277 
5278 	in = mcx_cmdq_in(cqe);
5279 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
5280 	in->cmd_op_mod = htobe16(0);
5281 	in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn);
5282 
5283 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5284 	    &cqe->cq_input_ptr, token) != 0) {
5285 		printf("%s: unable to allocate modify sq mailbox\n",
5286 		    DEVNAME(sc));
5287 		return (-1);
5288 	}
5289 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5290 	mbin->cmd_sq_ctx.sq_flags = htobe32(
5291 	    MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
5292 
5293 	mcx_cmdq_mboxes_sign(&mxm, 1);
5294 	mcx_cmdq_post(sc, cqe, 0);
5295 	error = mcx_cmdq_poll(sc, cqe, 1000);
5296 	if (error != 0) {
5297 		printf("%s: modify sq timeout\n", DEVNAME(sc));
5298 		goto free;
5299 	}
5300 	if (mcx_cmdq_verify(cqe) != 0) {
5301 		printf("%s: modify sq command corrupt\n", DEVNAME(sc));
5302 		goto free;
5303 	}
5304 
5305 	out = mcx_cmdq_out(cqe);
5306 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5307 		printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
5308 		    out->cmd_status, betoh32(out->cmd_syndrome));
5309 		error = -1;
5310 		goto free;
5311 	}
5312 
5313 free:
5314 	mcx_dmamem_free(sc, &mxm);
5315 	return (error);
5316 }
5317 
5318 static int
5319 mcx_create_tis(struct mcx_softc *sc, int *tis)
5320 {
5321 	struct mcx_cmdq_entry *cqe;
5322 	struct mcx_dmamem mxm;
5323 	struct mcx_cmd_create_tis_in *in;
5324 	struct mcx_cmd_create_tis_mb_in *mbin;
5325 	struct mcx_cmd_create_tis_out *out;
5326 	int error;
5327 	int token;
5328 
5329 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5330 	token = mcx_cmdq_token(sc);
5331 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5332 	    sizeof(*out), token);
5333 
5334 	in = mcx_cmdq_in(cqe);
5335 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
5336 	in->cmd_op_mod = htobe16(0);
5337 
5338 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5339 	    &cqe->cq_input_ptr, token) != 0) {
5340 		printf("%s: unable to allocate create tis mailbox\n",
5341 		    DEVNAME(sc));
5342 		return (-1);
5343 	}
5344 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5345 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5346 
5347 	mcx_cmdq_mboxes_sign(&mxm, 1);
5348 	mcx_cmdq_post(sc, cqe, 0);
5349 	error = mcx_cmdq_poll(sc, cqe, 1000);
5350 	if (error != 0) {
5351 		printf("%s: create tis timeout\n", DEVNAME(sc));
5352 		goto free;
5353 	}
5354 	if (mcx_cmdq_verify(cqe) != 0) {
5355 		printf("%s: create tis command corrupt\n", DEVNAME(sc));
5356 		goto free;
5357 	}
5358 
5359 	out = mcx_cmdq_out(cqe);
5360 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5361 		printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
5362 		    out->cmd_status, betoh32(out->cmd_syndrome));
5363 		error = -1;
5364 		goto free;
5365 	}
5366 
5367 	*tis = mcx_get_id(out->cmd_tisn);
5368 free:
5369 	mcx_dmamem_free(sc, &mxm);
5370 	return (error);
5371 }
5372 
5373 static int
5374 mcx_destroy_tis(struct mcx_softc *sc, int tis)
5375 {
5376 	struct mcx_cmdq_entry *cqe;
5377 	struct mcx_cmd_destroy_tis_in *in;
5378 	struct mcx_cmd_destroy_tis_out *out;
5379 	int error;
5380 	int token;
5381 
5382 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5383 	token = mcx_cmdq_token(sc);
5384 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5385 
5386 	in = mcx_cmdq_in(cqe);
5387 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
5388 	in->cmd_op_mod = htobe16(0);
5389 	in->cmd_tisn = htobe32(tis);
5390 
5391 	mcx_cmdq_post(sc, cqe, 0);
5392 	error = mcx_cmdq_poll(sc, cqe, 1000);
5393 	if (error != 0) {
5394 		printf("%s: destroy tis timeout\n", DEVNAME(sc));
5395 		return error;
5396 	}
5397 	if (mcx_cmdq_verify(cqe) != 0) {
5398 		printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
5399 		return error;
5400 	}
5401 
5402 	out = mcx_cmdq_out(cqe);
5403 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5404 		printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
5405 		    out->cmd_status, betoh32(out->cmd_syndrome));
5406 		return -1;
5407 	}
5408 
5409 	return 0;
5410 }
5411 
5412 static int
5413 mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt)
5414 {
5415 	struct mcx_cmdq_entry *cqe;
5416 	struct mcx_dmamem mxm;
5417 	struct mcx_cmd_create_rqt_in *in;
5418 	struct mcx_cmd_create_rqt_mb_in *mbin;
5419 	struct mcx_cmd_create_rqt_out *out;
5420 	struct mcx_rqt_ctx *rqt_ctx;
5421 	int *rqtn;
5422 	int error;
5423 	int token;
5424 	int i;
5425 
5426 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5427 	token = mcx_cmdq_token(sc);
5428 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) +
5429 	    (size * sizeof(int)), sizeof(*out), token);
5430 
5431 	in = mcx_cmdq_in(cqe);
5432 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT);
5433 	in->cmd_op_mod = htobe16(0);
5434 
5435 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5436 	    &cqe->cq_input_ptr, token) != 0) {
5437 		printf("%s: unable to allocate create rqt mailbox\n",
5438 		    DEVNAME(sc));
5439 		return (-1);
5440 	}
5441 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5442 	rqt_ctx = &mbin->cmd_rqt;
5443 	rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size);
5444 	rqt_ctx->cmd_rqt_actual_size = htobe16(size);
5445 
5446 	/* rqt list follows the rqt context */
5447 	rqtn = (int *)(rqt_ctx + 1);
5448 	for (i = 0; i < size; i++) {
5449 		rqtn[i] = htobe32(rqns[i]);
5450 	}
5451 
5452 	mcx_cmdq_mboxes_sign(&mxm, 1);
5453 	mcx_cmdq_post(sc, cqe, 0);
5454 	error = mcx_cmdq_poll(sc, cqe, 1000);
5455 	if (error != 0) {
5456 		printf("%s: create rqt timeout\n", DEVNAME(sc));
5457 		goto free;
5458 	}
5459 	if (mcx_cmdq_verify(cqe) != 0) {
5460 		printf("%s: create rqt command corrupt\n", DEVNAME(sc));
5461 		goto free;
5462 	}
5463 
5464 	out = mcx_cmdq_out(cqe);
5465 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5466 		printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc),
5467 		    out->cmd_status, betoh32(out->cmd_syndrome));
5468 		error = -1;
5469 		goto free;
5470 	}
5471 
5472 	*rqt = mcx_get_id(out->cmd_rqtn);
5473 	return (0);
5474 free:
5475 	mcx_dmamem_free(sc, &mxm);
5476 	return (error);
5477 }
5478 
5479 static int
5480 mcx_destroy_rqt(struct mcx_softc *sc, int rqt)
5481 {
5482 	struct mcx_cmdq_entry *cqe;
5483 	struct mcx_cmd_destroy_rqt_in *in;
5484 	struct mcx_cmd_destroy_rqt_out *out;
5485 	int error;
5486 	int token;
5487 
5488 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5489 	token = mcx_cmdq_token(sc);
5490 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5491 
5492 	in = mcx_cmdq_in(cqe);
5493 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT);
5494 	in->cmd_op_mod = htobe16(0);
5495 	in->cmd_rqtn = htobe32(rqt);
5496 
5497 	mcx_cmdq_post(sc, cqe, 0);
5498 	error = mcx_cmdq_poll(sc, cqe, 1000);
5499 	if (error != 0) {
5500 		printf("%s: destroy rqt timeout\n", DEVNAME(sc));
5501 		return error;
5502 	}
5503 	if (mcx_cmdq_verify(cqe) != 0) {
5504 		printf("%s: destroy rqt command corrupt\n", DEVNAME(sc));
5505 		return error;
5506 	}
5507 
5508 	out = mcx_cmdq_out(cqe);
5509 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5510 		printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc),
5511 		    out->cmd_status, betoh32(out->cmd_syndrome));
5512 		return -1;
5513 	}
5514 
5515 	return 0;
5516 }
5517 
5518 #if 0
5519 static int
5520 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
5521 {
5522 	struct mcx_cmdq_entry *cqe;
5523 	struct mcx_cmd_alloc_flow_counter_in *in;
5524 	struct mcx_cmd_alloc_flow_counter_out *out;
5525 	int error;
5526 
5527 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5528 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
5529 
5530 	in = mcx_cmdq_in(cqe);
5531 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
5532 	in->cmd_op_mod = htobe16(0);
5533 
5534 	mcx_cmdq_post(sc, cqe, 0);
5535 
5536 	error = mcx_cmdq_poll(sc, cqe, 1000);
5537 	if (error != 0) {
5538 		printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
5539 		return (-1);
5540 	}
5541 	if (mcx_cmdq_verify(cqe) != 0) {
5542 		printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
5543 		return (-1);
5544 	}
5545 
5546 	out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
5547 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5548 		printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
5549 		    out->cmd_status);
5550 		return (-1);
5551 	}
5552 
5553 	sc->sc_flow_counter_id[i]  = betoh16(out->cmd_flow_counter_id);
5554 	printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
5555 
5556 	return (0);
5557 }
5558 #endif
5559 
5560 static int
5561 mcx_create_flow_table(struct mcx_softc *sc, int log_size, int level,
5562     int *flow_table_id)
5563 {
5564 	struct mcx_cmdq_entry *cqe;
5565 	struct mcx_dmamem mxm;
5566 	struct mcx_cmd_create_flow_table_in *in;
5567 	struct mcx_cmd_create_flow_table_mb_in *mbin;
5568 	struct mcx_cmd_create_flow_table_out *out;
5569 	int error;
5570 	int token;
5571 
5572 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5573 	token = mcx_cmdq_token(sc);
5574 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5575 	    sizeof(*out), token);
5576 
5577 	in = mcx_cmdq_in(cqe);
5578 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
5579 	in->cmd_op_mod = htobe16(0);
5580 
5581 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5582 	    &cqe->cq_input_ptr, token) != 0) {
5583 		printf("%s: unable to allocate create flow table mailbox\n",
5584 		    DEVNAME(sc));
5585 		return (-1);
5586 	}
5587 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5588 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5589 	mbin->cmd_ctx.ft_log_size = log_size;
5590 	mbin->cmd_ctx.ft_level = level;
5591 
5592 	mcx_cmdq_mboxes_sign(&mxm, 1);
5593 	mcx_cmdq_post(sc, cqe, 0);
5594 	error = mcx_cmdq_poll(sc, cqe, 1000);
5595 	if (error != 0) {
5596 		printf("%s: create flow table timeout\n", DEVNAME(sc));
5597 		goto free;
5598 	}
5599 	if (mcx_cmdq_verify(cqe) != 0) {
5600 		printf("%s: create flow table command corrupt\n", DEVNAME(sc));
5601 		goto free;
5602 	}
5603 
5604 	out = mcx_cmdq_out(cqe);
5605 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5606 		printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
5607 		    out->cmd_status, betoh32(out->cmd_syndrome));
5608 		error = -1;
5609 		goto free;
5610 	}
5611 
5612 	*flow_table_id = mcx_get_id(out->cmd_table_id);
5613 free:
5614 	mcx_dmamem_free(sc, &mxm);
5615 	return (error);
5616 }
5617 
5618 static int
5619 mcx_set_flow_table_root(struct mcx_softc *sc, int flow_table_id)
5620 {
5621 	struct mcx_cmdq_entry *cqe;
5622 	struct mcx_dmamem mxm;
5623 	struct mcx_cmd_set_flow_table_root_in *in;
5624 	struct mcx_cmd_set_flow_table_root_mb_in *mbin;
5625 	struct mcx_cmd_set_flow_table_root_out *out;
5626 	int error;
5627 	int token;
5628 
5629 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5630 	token = mcx_cmdq_token(sc);
5631 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5632 	    sizeof(*out), token);
5633 
5634 	in = mcx_cmdq_in(cqe);
5635 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
5636 	in->cmd_op_mod = htobe16(0);
5637 
5638 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5639 	    &cqe->cq_input_ptr, token) != 0) {
5640 		printf("%s: unable to allocate set flow table root mailbox\n",
5641 		    DEVNAME(sc));
5642 		return (-1);
5643 	}
5644 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5645 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5646 	mbin->cmd_table_id = htobe32(flow_table_id);
5647 
5648 	mcx_cmdq_mboxes_sign(&mxm, 1);
5649 	mcx_cmdq_post(sc, cqe, 0);
5650 	error = mcx_cmdq_poll(sc, cqe, 1000);
5651 	if (error != 0) {
5652 		printf("%s: set flow table root timeout\n", DEVNAME(sc));
5653 		goto free;
5654 	}
5655 	if (mcx_cmdq_verify(cqe) != 0) {
5656 		printf("%s: set flow table root command corrupt\n",
5657 		    DEVNAME(sc));
5658 		goto free;
5659 	}
5660 
5661 	out = mcx_cmdq_out(cqe);
5662 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5663 		printf("%s: set flow table root failed (%x, %x)\n",
5664 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
5665 		error = -1;
5666 		goto free;
5667 	}
5668 
5669 free:
5670 	mcx_dmamem_free(sc, &mxm);
5671 	return (error);
5672 }
5673 
5674 static int
5675 mcx_destroy_flow_table(struct mcx_softc *sc, int flow_table_id)
5676 {
5677 	struct mcx_cmdq_entry *cqe;
5678 	struct mcx_dmamem mxm;
5679 	struct mcx_cmd_destroy_flow_table_in *in;
5680 	struct mcx_cmd_destroy_flow_table_mb_in *mb;
5681 	struct mcx_cmd_destroy_flow_table_out *out;
5682 	int error;
5683 	int token;
5684 
5685 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5686 	token = mcx_cmdq_token(sc);
5687 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5688 
5689 	in = mcx_cmdq_in(cqe);
5690 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
5691 	in->cmd_op_mod = htobe16(0);
5692 
5693 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5694 	    &cqe->cq_input_ptr, token) != 0) {
5695 		printf("%s: unable to allocate destroy flow table mailbox\n",
5696 		    DEVNAME(sc));
5697 		return (-1);
5698 	}
5699 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5700 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5701 	mb->cmd_table_id = htobe32(flow_table_id);
5702 
5703 	mcx_cmdq_mboxes_sign(&mxm, 1);
5704 	mcx_cmdq_post(sc, cqe, 0);
5705 	error = mcx_cmdq_poll(sc, cqe, 1000);
5706 	if (error != 0) {
5707 		printf("%s: destroy flow table timeout\n", DEVNAME(sc));
5708 		goto free;
5709 	}
5710 	if (mcx_cmdq_verify(cqe) != 0) {
5711 		printf("%s: destroy flow table command corrupt\n",
5712 		    DEVNAME(sc));
5713 		goto free;
5714 	}
5715 
5716 	out = mcx_cmdq_out(cqe);
5717 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5718 		printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
5719 		    out->cmd_status, betoh32(out->cmd_syndrome));
5720 		error = -1;
5721 		goto free;
5722 	}
5723 
5724 free:
5725 	mcx_dmamem_free(sc, &mxm);
5726 	return (error);
5727 }
5728 
5729 
5730 static int
5731 mcx_create_flow_group(struct mcx_softc *sc, int flow_table_id, int group,
5732     int start, int size, int match_enable, struct mcx_flow_match *match)
5733 {
5734 	struct mcx_cmdq_entry *cqe;
5735 	struct mcx_dmamem mxm;
5736 	struct mcx_cmd_create_flow_group_in *in;
5737 	struct mcx_cmd_create_flow_group_mb_in *mbin;
5738 	struct mcx_cmd_create_flow_group_out *out;
5739 	struct mcx_flow_group *mfg;
5740 	int error;
5741 	int token;
5742 
5743 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5744 	token = mcx_cmdq_token(sc);
5745 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5746 	    token);
5747 
5748 	in = mcx_cmdq_in(cqe);
5749 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
5750 	in->cmd_op_mod = htobe16(0);
5751 
5752 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5753 	    != 0) {
5754 		printf("%s: unable to allocate create flow group mailbox\n",
5755 		    DEVNAME(sc));
5756 		return (-1);
5757 	}
5758 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5759 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5760 	mbin->cmd_table_id = htobe32(flow_table_id);
5761 	mbin->cmd_start_flow_index = htobe32(start);
5762 	mbin->cmd_end_flow_index = htobe32(start + (size - 1));
5763 
5764 	mbin->cmd_match_criteria_enable = match_enable;
5765 	memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
5766 
5767 	mcx_cmdq_mboxes_sign(&mxm, 2);
5768 	mcx_cmdq_post(sc, cqe, 0);
5769 	error = mcx_cmdq_poll(sc, cqe, 1000);
5770 	if (error != 0) {
5771 		printf("%s: create flow group timeout\n", DEVNAME(sc));
5772 		goto free;
5773 	}
5774 	if (mcx_cmdq_verify(cqe) != 0) {
5775 		printf("%s: create flow group command corrupt\n", DEVNAME(sc));
5776 		goto free;
5777 	}
5778 
5779 	out = mcx_cmdq_out(cqe);
5780 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5781 		printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
5782 		    out->cmd_status, betoh32(out->cmd_syndrome));
5783 		error = -1;
5784 		goto free;
5785 	}
5786 
5787 	mfg = &sc->sc_flow_group[group];
5788 	mfg->g_id = mcx_get_id(out->cmd_group_id);
5789 	mfg->g_table = flow_table_id;
5790 	mfg->g_start = start;
5791 	mfg->g_size = size;
5792 
5793 free:
5794 	mcx_dmamem_free(sc, &mxm);
5795 	return (error);
5796 }
5797 
5798 static int
5799 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
5800 {
5801 	struct mcx_cmdq_entry *cqe;
5802 	struct mcx_dmamem mxm;
5803 	struct mcx_cmd_destroy_flow_group_in *in;
5804 	struct mcx_cmd_destroy_flow_group_mb_in *mb;
5805 	struct mcx_cmd_destroy_flow_group_out *out;
5806 	struct mcx_flow_group *mfg;
5807 	int error;
5808 	int token;
5809 
5810 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5811 	token = mcx_cmdq_token(sc);
5812 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5813 
5814 	in = mcx_cmdq_in(cqe);
5815 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
5816 	in->cmd_op_mod = htobe16(0);
5817 
5818 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5819 	    &cqe->cq_input_ptr, token) != 0) {
5820 		printf("%s: unable to allocate destroy flow group mailbox\n",
5821 		    DEVNAME(sc));
5822 		return (-1);
5823 	}
5824 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5825 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5826 	mfg = &sc->sc_flow_group[group];
5827 	mb->cmd_table_id = htobe32(mfg->g_table);
5828 	mb->cmd_group_id = htobe32(mfg->g_id);
5829 
5830 	mcx_cmdq_mboxes_sign(&mxm, 2);
5831 	mcx_cmdq_post(sc, cqe, 0);
5832 	error = mcx_cmdq_poll(sc, cqe, 1000);
5833 	if (error != 0) {
5834 		printf("%s: destroy flow group timeout\n", DEVNAME(sc));
5835 		goto free;
5836 	}
5837 	if (mcx_cmdq_verify(cqe) != 0) {
5838 		printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
5839 		goto free;
5840 	}
5841 
5842 	out = mcx_cmdq_out(cqe);
5843 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5844 		printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
5845 		    out->cmd_status, betoh32(out->cmd_syndrome));
5846 		error = -1;
5847 		goto free;
5848 	}
5849 
5850 	mfg->g_id = -1;
5851 	mfg->g_table = -1;
5852 	mfg->g_size = 0;
5853 	mfg->g_start = 0;
5854 free:
5855 	mcx_dmamem_free(sc, &mxm);
5856 	return (error);
5857 }
5858 
5859 static int
5860 mcx_set_flow_table_entry_mac(struct mcx_softc *sc, int group, int index,
5861     uint8_t *macaddr, uint32_t dest)
5862 {
5863 	struct mcx_cmdq_entry *cqe;
5864 	struct mcx_dmamem mxm;
5865 	struct mcx_cmd_set_flow_table_entry_in *in;
5866 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5867 	struct mcx_cmd_set_flow_table_entry_out *out;
5868 	struct mcx_flow_group *mfg;
5869 	uint32_t *pdest;
5870 	int error;
5871 	int token;
5872 
5873 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5874 	token = mcx_cmdq_token(sc);
5875 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5876 	    sizeof(*out), token);
5877 
5878 	in = mcx_cmdq_in(cqe);
5879 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5880 	in->cmd_op_mod = htobe16(0);
5881 
5882 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5883 	    != 0) {
5884 		printf("%s: unable to allocate set flow table entry mailbox\n",
5885 		    DEVNAME(sc));
5886 		return (-1);
5887 	}
5888 
5889 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5890 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5891 
5892 	mfg = &sc->sc_flow_group[group];
5893 	mbin->cmd_table_id = htobe32(mfg->g_table);
5894 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5895 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
5896 
5897 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
5898 	pdest = (uint32_t *)
5899 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5900 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5901 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5902 	*pdest = htobe32(dest);
5903 
5904 	/* the only thing we match on at the moment is the dest mac address */
5905 	if (macaddr != NULL) {
5906 		memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5907 		    ETHER_ADDR_LEN);
5908 	}
5909 
5910 	mcx_cmdq_mboxes_sign(&mxm, 2);
5911 	mcx_cmdq_post(sc, cqe, 0);
5912 	error = mcx_cmdq_poll(sc, cqe, 1000);
5913 	if (error != 0) {
5914 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5915 		goto free;
5916 	}
5917 	if (mcx_cmdq_verify(cqe) != 0) {
5918 		printf("%s: set flow table entry command corrupt\n",
5919 		    DEVNAME(sc));
5920 		goto free;
5921 	}
5922 
5923 	out = mcx_cmdq_out(cqe);
5924 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5925 		printf("%s: set flow table entry failed (%x, %x)\n",
5926 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
5927 		error = -1;
5928 		goto free;
5929 	}
5930 
5931 free:
5932 	mcx_dmamem_free(sc, &mxm);
5933 	return (error);
5934 }
5935 
5936 static int
5937 mcx_set_flow_table_entry_proto(struct mcx_softc *sc, int group, int index,
5938     int ethertype, int ip_proto, uint32_t dest)
5939 {
5940 	struct mcx_cmdq_entry *cqe;
5941 	struct mcx_dmamem mxm;
5942 	struct mcx_cmd_set_flow_table_entry_in *in;
5943 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5944 	struct mcx_cmd_set_flow_table_entry_out *out;
5945 	struct mcx_flow_group *mfg;
5946 	uint32_t *pdest;
5947 	int error;
5948 	int token;
5949 
5950 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5951 	token = mcx_cmdq_token(sc);
5952 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5953 	    sizeof(*out), token);
5954 
5955 	in = mcx_cmdq_in(cqe);
5956 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5957 	in->cmd_op_mod = htobe16(0);
5958 
5959 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5960 	    != 0) {
5961 		printf("%s: unable to allocate set flow table entry mailbox\n",
5962 		    DEVNAME(sc));
5963 		return (-1);
5964 	}
5965 
5966 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5967 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5968 
5969 	mfg = &sc->sc_flow_group[group];
5970 	mbin->cmd_table_id = htobe32(mfg->g_table);
5971 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5972 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
5973 
5974 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
5975 	pdest = (uint32_t *)
5976 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5977 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5978 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5979 	*pdest = htobe32(dest);
5980 
5981 	mbin->cmd_flow_ctx.fc_match_value.mc_ethertype = htobe16(ethertype);
5982 	mbin->cmd_flow_ctx.fc_match_value.mc_ip_proto = ip_proto;
5983 
5984 	mcx_cmdq_mboxes_sign(&mxm, 2);
5985 	mcx_cmdq_post(sc, cqe, 0);
5986 	error = mcx_cmdq_poll(sc, cqe, 1000);
5987 	if (error != 0) {
5988 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5989 		goto free;
5990 	}
5991 	if (mcx_cmdq_verify(cqe) != 0) {
5992 		printf("%s: set flow table entry command corrupt\n",
5993 		    DEVNAME(sc));
5994 		goto free;
5995 	}
5996 
5997 	out = mcx_cmdq_out(cqe);
5998 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5999 		printf("%s: set flow table entry failed (%x, %x)\n",
6000 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6001 		error = -1;
6002 		goto free;
6003 	}
6004 
6005 free:
6006 	mcx_dmamem_free(sc, &mxm);
6007 	return (error);
6008 }
6009 
6010 static int
6011 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
6012 {
6013 	struct mcx_cmdq_entry *cqe;
6014 	struct mcx_dmamem mxm;
6015 	struct mcx_cmd_delete_flow_table_entry_in *in;
6016 	struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
6017 	struct mcx_cmd_delete_flow_table_entry_out *out;
6018 	struct mcx_flow_group *mfg;
6019 	int error;
6020 	int token;
6021 
6022 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6023 	token = mcx_cmdq_token(sc);
6024 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
6025 	    token);
6026 
6027 	in = mcx_cmdq_in(cqe);
6028 	in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
6029 	in->cmd_op_mod = htobe16(0);
6030 
6031 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6032 	    &cqe->cq_input_ptr, token) != 0) {
6033 		printf("%s: unable to allocate "
6034 		    "delete flow table entry mailbox\n", DEVNAME(sc));
6035 		return (-1);
6036 	}
6037 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6038 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
6039 
6040 	mfg = &sc->sc_flow_group[group];
6041 	mbin->cmd_table_id = htobe32(mfg->g_table);
6042 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
6043 
6044 	mcx_cmdq_mboxes_sign(&mxm, 2);
6045 	mcx_cmdq_post(sc, cqe, 0);
6046 	error = mcx_cmdq_poll(sc, cqe, 1000);
6047 	if (error != 0) {
6048 		printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
6049 		goto free;
6050 	}
6051 	if (mcx_cmdq_verify(cqe) != 0) {
6052 		printf("%s: delete flow table entry command corrupt\n",
6053 		    DEVNAME(sc));
6054 		goto free;
6055 	}
6056 
6057 	out = mcx_cmdq_out(cqe);
6058 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6059 		printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
6060 		    DEVNAME(sc), group, index, out->cmd_status,
6061 		    betoh32(out->cmd_syndrome));
6062 		error = -1;
6063 		goto free;
6064 	}
6065 
6066 free:
6067 	mcx_dmamem_free(sc, &mxm);
6068 	return (error);
6069 }
6070 
6071 #if 0
6072 int
6073 mcx_dump_flow_table(struct mcx_softc *sc, int flow_table_id)
6074 {
6075 	struct mcx_dmamem mxm;
6076 	struct mcx_cmdq_entry *cqe;
6077 	struct mcx_cmd_query_flow_table_in *in;
6078 	struct mcx_cmd_query_flow_table_mb_in *mbin;
6079 	struct mcx_cmd_query_flow_table_out *out;
6080 	struct mcx_cmd_query_flow_table_mb_out *mbout;
6081 	uint8_t token = mcx_cmdq_token(sc);
6082 	int error;
6083 	int i;
6084 	uint8_t *dump;
6085 
6086 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6087 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6088 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6089 
6090 	in = mcx_cmdq_in(cqe);
6091 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
6092 	in->cmd_op_mod = htobe16(0);
6093 
6094 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6095 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
6096 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6097 	    &cqe->cq_output_ptr, token) != 0) {
6098 		printf(", unable to allocate query flow table mailboxes\n");
6099 		return (-1);
6100 	}
6101 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6102 
6103 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6104 	mbin->cmd_table_type = 0;
6105 	mbin->cmd_table_id = htobe32(flow_table_id);
6106 
6107 	mcx_cmdq_mboxes_sign(&mxm, 1);
6108 
6109 	mcx_cmdq_post(sc, cqe, 0);
6110 	error = mcx_cmdq_poll(sc, cqe, 1000);
6111 	if (error != 0) {
6112 		printf("%s: query flow table timeout\n", DEVNAME(sc));
6113 		goto free;
6114 	}
6115 	error = mcx_cmdq_verify(cqe);
6116 	if (error != 0) {
6117 		printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
6118 		goto free;
6119 	}
6120 
6121 	out = mcx_cmdq_out(cqe);
6122 	switch (out->cmd_status) {
6123 	case MCX_CQ_STATUS_OK:
6124 		break;
6125 	default:
6126 		printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
6127 		    out->cmd_status, betoh32(out->cmd_syndrome));
6128 		error = -1;
6129 		goto free;
6130 	}
6131 
6132         mbout = (struct mcx_cmd_query_flow_table_mb_out *)
6133 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6134 	dump = (uint8_t *)mbout + 8;
6135 	for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
6136 		printf("%.2x ", dump[i]);
6137 		if (i % 16 == 15)
6138 			printf("\n");
6139 	}
6140 free:
6141 	mcx_cq_mboxes_free(sc, &mxm);
6142 	return (error);
6143 }
6144 int
6145 mcx_dump_flow_table_entry(struct mcx_softc *sc, int flow_table_id, int index)
6146 {
6147 	struct mcx_dmamem mxm;
6148 	struct mcx_cmdq_entry *cqe;
6149 	struct mcx_cmd_query_flow_table_entry_in *in;
6150 	struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
6151 	struct mcx_cmd_query_flow_table_entry_out *out;
6152 	struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
6153 	uint8_t token = mcx_cmdq_token(sc);
6154 	int error;
6155 	int i;
6156 	uint8_t *dump;
6157 
6158 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6159 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6160 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6161 
6162 	in = mcx_cmdq_in(cqe);
6163 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
6164 	in->cmd_op_mod = htobe16(0);
6165 
6166 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6167 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6168 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6169 	    &cqe->cq_output_ptr, token) != 0) {
6170 		printf(", unable to allocate "
6171 		    "query flow table entry mailboxes\n");
6172 		return (-1);
6173 	}
6174 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6175 
6176 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6177 	mbin->cmd_table_type = 0;
6178 	mbin->cmd_table_id = htobe32(flow_table_id);
6179 	mbin->cmd_flow_index = htobe32(index);
6180 
6181 	mcx_cmdq_mboxes_sign(&mxm, 1);
6182 
6183 	mcx_cmdq_post(sc, cqe, 0);
6184 	error = mcx_cmdq_poll(sc, cqe, 1000);
6185 	if (error != 0) {
6186 		printf("%s: query flow table entry timeout\n", DEVNAME(sc));
6187 		goto free;
6188 	}
6189 	error = mcx_cmdq_verify(cqe);
6190 	if (error != 0) {
6191 		printf("%s: query flow table entry reply corrupt\n",
6192 		    DEVNAME(sc));
6193 		goto free;
6194 	}
6195 
6196 	out = mcx_cmdq_out(cqe);
6197 	switch (out->cmd_status) {
6198 	case MCX_CQ_STATUS_OK:
6199 		break;
6200 	default:
6201 		printf("%s: query flow table entry failed (%x/%x)\n",
6202 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6203 		error = -1;
6204 		goto free;
6205 	}
6206 
6207         mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
6208 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6209 	dump = (uint8_t *)mbout;
6210 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6211 		printf("%.2x ", dump[i]);
6212 		if (i % 16 == 15)
6213 			printf("\n");
6214 	}
6215 
6216 free:
6217 	mcx_cq_mboxes_free(sc, &mxm);
6218 	return (error);
6219 }
6220 
6221 int
6222 mcx_dump_flow_group(struct mcx_softc *sc, int flow_table_id)
6223 {
6224 	struct mcx_dmamem mxm;
6225 	struct mcx_cmdq_entry *cqe;
6226 	struct mcx_cmd_query_flow_group_in *in;
6227 	struct mcx_cmd_query_flow_group_mb_in *mbin;
6228 	struct mcx_cmd_query_flow_group_out *out;
6229 	struct mcx_cmd_query_flow_group_mb_out *mbout;
6230 	uint8_t token = mcx_cmdq_token(sc);
6231 	int error;
6232 	int i;
6233 	uint8_t *dump;
6234 
6235 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6236 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6237 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6238 
6239 	in = mcx_cmdq_in(cqe);
6240 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
6241 	in->cmd_op_mod = htobe16(0);
6242 
6243 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6244 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6245 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6246 	    &cqe->cq_output_ptr, token) != 0) {
6247 		printf(", unable to allocate query flow group mailboxes\n");
6248 		return (-1);
6249 	}
6250 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6251 
6252 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6253 	mbin->cmd_table_type = 0;
6254 	mbin->cmd_table_id = htobe32(flow_table_id);
6255 	mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
6256 
6257 	mcx_cmdq_mboxes_sign(&mxm, 1);
6258 
6259 	mcx_cmdq_post(sc, cqe, 0);
6260 	error = mcx_cmdq_poll(sc, cqe, 1000);
6261 	if (error != 0) {
6262 		printf("%s: query flow group timeout\n", DEVNAME(sc));
6263 		goto free;
6264 	}
6265 	error = mcx_cmdq_verify(cqe);
6266 	if (error != 0) {
6267 		printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
6268 		goto free;
6269 	}
6270 
6271 	out = mcx_cmdq_out(cqe);
6272 	switch (out->cmd_status) {
6273 	case MCX_CQ_STATUS_OK:
6274 		break;
6275 	default:
6276 		printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
6277 		    out->cmd_status, betoh32(out->cmd_syndrome));
6278 		error = -1;
6279 		goto free;
6280 	}
6281 
6282         mbout = (struct mcx_cmd_query_flow_group_mb_out *)
6283 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6284 	dump = (uint8_t *)mbout;
6285 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6286 		printf("%.2x ", dump[i]);
6287 		if (i % 16 == 15)
6288 			printf("\n");
6289 	}
6290 	dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
6291 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6292 		printf("%.2x ", dump[i]);
6293 		if (i % 16 == 15)
6294 			printf("\n");
6295 	}
6296 
6297 free:
6298 	mcx_cq_mboxes_free(sc, &mxm);
6299 	return (error);
6300 }
6301 
6302 static int
6303 mcx_dump_counters(struct mcx_softc *sc)
6304 {
6305 	struct mcx_dmamem mxm;
6306 	struct mcx_cmdq_entry *cqe;
6307 	struct mcx_cmd_query_vport_counters_in *in;
6308 	struct mcx_cmd_query_vport_counters_mb_in *mbin;
6309 	struct mcx_cmd_query_vport_counters_out *out;
6310 	struct mcx_nic_vport_counters *counters;
6311 	int error, token;
6312 
6313 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6314 	token = mcx_cmdq_token(sc);
6315 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6316 	    sizeof(*out) + sizeof(*counters), token);
6317 
6318 	in = mcx_cmdq_in(cqe);
6319 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
6320 	in->cmd_op_mod = htobe16(0);
6321 
6322 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6323 	    &cqe->cq_output_ptr, token) != 0) {
6324 		printf(", unable to allocate "
6325 		    "query nic vport counters mailboxen\n");
6326 		return (-1);
6327 	}
6328 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6329 
6330 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6331 	mbin->cmd_clear = 0x80;
6332 
6333 	mcx_cmdq_mboxes_sign(&mxm, 1);
6334 	mcx_cmdq_post(sc, cqe, 0);
6335 
6336 	error = mcx_cmdq_poll(sc, cqe, 1000);
6337 	if (error != 0) {
6338 		printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
6339 		goto free;
6340 	}
6341 	if (mcx_cmdq_verify(cqe) != 0) {
6342 		printf("%s: query nic vport counters command corrupt\n",
6343 		    DEVNAME(sc));
6344 		goto free;
6345 	}
6346 
6347 	out = mcx_cmdq_out(cqe);
6348 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6349 		printf("%s: query nic vport counters failed (%x, %x)\n",
6350 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6351 		error = -1;
6352 		goto free;
6353 	}
6354 
6355 	counters = (struct mcx_nic_vport_counters *)
6356 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6357 	if (counters->rx_bcast.packets + counters->tx_bcast.packets +
6358 	    counters->rx_ucast.packets + counters->tx_ucast.packets +
6359 	    counters->rx_err.packets + counters->tx_err.packets)
6360 		printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
6361 		    DEVNAME(sc),
6362 		    betoh64(counters->tx_err.packets),
6363 		    betoh64(counters->rx_err.packets),
6364 		    betoh64(counters->tx_ucast.packets),
6365 		    betoh64(counters->rx_ucast.packets),
6366 		    betoh64(counters->tx_bcast.packets),
6367 		    betoh64(counters->rx_bcast.packets));
6368 free:
6369 	mcx_dmamem_free(sc, &mxm);
6370 
6371 	return (error);
6372 }
6373 
6374 static int
6375 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
6376 {
6377 	struct mcx_dmamem mxm;
6378 	struct mcx_cmdq_entry *cqe;
6379 	struct mcx_cmd_query_flow_counter_in *in;
6380 	struct mcx_cmd_query_flow_counter_mb_in *mbin;
6381 	struct mcx_cmd_query_flow_counter_out *out;
6382 	struct mcx_counter *counters;
6383 	int error, token;
6384 
6385 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6386 	token = mcx_cmdq_token(sc);
6387 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
6388 	    sizeof(*counters), token);
6389 
6390 	in = mcx_cmdq_in(cqe);
6391 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
6392 	in->cmd_op_mod = htobe16(0);
6393 
6394 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6395 	    &cqe->cq_output_ptr, token) != 0) {
6396 		printf(", unable to allocate query flow counter mailboxen\n");
6397 		return (-1);
6398 	}
6399 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6400 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6401 	mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
6402 	mbin->cmd_clear = 0x80;
6403 
6404 	mcx_cmdq_mboxes_sign(&mxm, 1);
6405 	mcx_cmdq_post(sc, cqe, 0);
6406 
6407 	error = mcx_cmdq_poll(sc, cqe, 1000);
6408 	if (error != 0) {
6409 		printf("%s: query flow counter timeout\n", DEVNAME(sc));
6410 		goto free;
6411 	}
6412 	if (mcx_cmdq_verify(cqe) != 0) {
6413 		printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
6414 		goto free;
6415 	}
6416 
6417 	out = mcx_cmdq_out(cqe);
6418 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6419 		printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
6420 		    out->cmd_status, betoh32(out->cmd_syndrome));
6421 		error = -1;
6422 		goto free;
6423 	}
6424 
6425 	counters = (struct mcx_counter *)
6426 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6427 	if (counters->packets)
6428 		printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
6429 		    betoh64(counters->packets));
6430 free:
6431 	mcx_dmamem_free(sc, &mxm);
6432 
6433 	return (error);
6434 }
6435 
6436 #endif
6437 
6438 #if NKSTAT > 0
6439 
6440 int
6441 mcx_query_rq(struct mcx_softc *sc, struct mcx_rx *rx, struct mcx_rq_ctx *rq_ctx)
6442 {
6443 	struct mcx_dmamem mxm;
6444 	struct mcx_cmdq_entry *cqe;
6445 	struct mcx_cmd_query_rq_in *in;
6446 	struct mcx_cmd_query_rq_out *out;
6447 	struct mcx_cmd_query_rq_mb_out *mbout;
6448 	uint8_t token = mcx_cmdq_token(sc);
6449 	int error;
6450 
6451 	cqe = mcx_get_cmdq_entry(sc, MCX_CMDQ_SLOT_KSTAT);
6452 	if (cqe == NULL)
6453 		return (-1);
6454 
6455 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6456 	    token);
6457 
6458 	in = mcx_cmdq_in(cqe);
6459 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
6460 	in->cmd_op_mod = htobe16(0);
6461 	in->cmd_rqn = htobe32(rx->rx_rqn);
6462 
6463 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6464 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6465 	    &cqe->cq_output_ptr, token) != 0) {
6466 		printf("%s: unable to allocate query rq mailboxes\n", DEVNAME(sc));
6467 		return (-1);
6468 	}
6469 
6470 	mcx_cmdq_mboxes_sign(&mxm, 1);
6471 
6472 	error = mcx_cmdq_exec(sc, cqe, MCX_CMDQ_SLOT_KSTAT, 1000);
6473 	if (error != 0) {
6474 		printf("%s: query rq timeout\n", DEVNAME(sc));
6475 		goto free;
6476 	}
6477 	error = mcx_cmdq_verify(cqe);
6478 	if (error != 0) {
6479 		printf("%s: query rq reply corrupt\n", DEVNAME(sc));
6480 		goto free;
6481 	}
6482 
6483 	out = mcx_cmdq_out(cqe);
6484 	switch (out->cmd_status) {
6485 	case MCX_CQ_STATUS_OK:
6486 		break;
6487 	default:
6488 		printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
6489 		    out->cmd_status, betoh32(out->cmd_syndrome));
6490 		error = -1;
6491 		goto free;
6492 	}
6493 
6494         mbout = (struct mcx_cmd_query_rq_mb_out *)
6495 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6496 	memcpy(rq_ctx, &mbout->cmd_ctx, sizeof(*rq_ctx));
6497 
6498 free:
6499 	mcx_cq_mboxes_free(sc, &mxm);
6500 	return (error);
6501 }
6502 
6503 int
6504 mcx_query_sq(struct mcx_softc *sc, struct mcx_tx *tx, struct mcx_sq_ctx *sq_ctx)
6505 {
6506 	struct mcx_dmamem mxm;
6507 	struct mcx_cmdq_entry *cqe;
6508 	struct mcx_cmd_query_sq_in *in;
6509 	struct mcx_cmd_query_sq_out *out;
6510 	struct mcx_cmd_query_sq_mb_out *mbout;
6511 	uint8_t token = mcx_cmdq_token(sc);
6512 	int error;
6513 
6514 	cqe = mcx_get_cmdq_entry(sc, MCX_CMDQ_SLOT_KSTAT);
6515 	if (cqe == NULL)
6516 		return (-1);
6517 
6518 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6519 	    token);
6520 
6521 	in = mcx_cmdq_in(cqe);
6522 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
6523 	in->cmd_op_mod = htobe16(0);
6524 	in->cmd_sqn = htobe32(tx->tx_sqn);
6525 
6526 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6527 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6528 	    &cqe->cq_output_ptr, token) != 0) {
6529 		printf("%s: unable to allocate query sq mailboxes\n", DEVNAME(sc));
6530 		return (-1);
6531 	}
6532 
6533 	mcx_cmdq_mboxes_sign(&mxm, 1);
6534 
6535 	error = mcx_cmdq_exec(sc, cqe, MCX_CMDQ_SLOT_KSTAT, 1000);
6536 	if (error != 0) {
6537 		printf("%s: query sq timeout\n", DEVNAME(sc));
6538 		goto free;
6539 	}
6540 	error = mcx_cmdq_verify(cqe);
6541 	if (error != 0) {
6542 		printf("%s: query sq reply corrupt\n", DEVNAME(sc));
6543 		goto free;
6544 	}
6545 
6546 	out = mcx_cmdq_out(cqe);
6547 	switch (out->cmd_status) {
6548 	case MCX_CQ_STATUS_OK:
6549 		break;
6550 	default:
6551 		printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
6552 		    out->cmd_status, betoh32(out->cmd_syndrome));
6553 		error = -1;
6554 		goto free;
6555 	}
6556 
6557         mbout = (struct mcx_cmd_query_sq_mb_out *)
6558 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6559 	memcpy(sq_ctx, &mbout->cmd_ctx, sizeof(*sq_ctx));
6560 
6561 free:
6562 	mcx_cq_mboxes_free(sc, &mxm);
6563 	return (error);
6564 }
6565 
6566 int
6567 mcx_query_cq(struct mcx_softc *sc, struct mcx_cq *cq, struct mcx_cq_ctx *cq_ctx)
6568 {
6569 	struct mcx_dmamem mxm;
6570 	struct mcx_cmdq_entry *cqe;
6571 	struct mcx_cmd_query_cq_in *in;
6572 	struct mcx_cmd_query_cq_out *out;
6573 	struct mcx_cq_ctx *ctx;
6574 	uint8_t token = mcx_cmdq_token(sc);
6575 	int error;
6576 
6577 	cqe = mcx_get_cmdq_entry(sc, MCX_CMDQ_SLOT_KSTAT);
6578 	if (cqe == NULL)
6579 		return (-1);
6580 
6581 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6582 	    token);
6583 
6584 	in = mcx_cmdq_in(cqe);
6585 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_CQ);
6586 	in->cmd_op_mod = htobe16(0);
6587 	in->cmd_cqn = htobe32(cq->cq_n);
6588 
6589 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6590 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6591 	    &cqe->cq_output_ptr, token) != 0) {
6592 		printf("%s: unable to allocate query cq mailboxes\n", DEVNAME(sc));
6593 		return (-1);
6594 	}
6595 
6596 	mcx_cmdq_mboxes_sign(&mxm, 1);
6597 
6598 	error = mcx_cmdq_exec(sc, cqe, MCX_CMDQ_SLOT_KSTAT, 1000);
6599 	if (error != 0) {
6600 		printf("%s: query cq timeout\n", DEVNAME(sc));
6601 		goto free;
6602 	}
6603 	error = mcx_cmdq_verify(cqe);
6604 	if (error != 0) {
6605 		printf("%s: query cq reply corrupt\n", DEVNAME(sc));
6606 		goto free;
6607 	}
6608 
6609 	out = mcx_cmdq_out(cqe);
6610 	switch (out->cmd_status) {
6611 	case MCX_CQ_STATUS_OK:
6612 		break;
6613 	default:
6614 		printf("%s: query cq failed (%x/%x)\n", DEVNAME(sc),
6615 		    out->cmd_status, betoh32(out->cmd_syndrome));
6616 		error = -1;
6617 		goto free;
6618 	}
6619 
6620         ctx = (struct mcx_cq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6621 	memcpy(cq_ctx, ctx, sizeof(*cq_ctx));
6622 free:
6623 	mcx_cq_mboxes_free(sc, &mxm);
6624 	return (error);
6625 }
6626 
6627 int
6628 mcx_query_eq(struct mcx_softc *sc, struct mcx_eq *eq, struct mcx_eq_ctx *eq_ctx)
6629 {
6630 	struct mcx_dmamem mxm;
6631 	struct mcx_cmdq_entry *cqe;
6632 	struct mcx_cmd_query_eq_in *in;
6633 	struct mcx_cmd_query_eq_out *out;
6634 	struct mcx_eq_ctx *ctx;
6635 	uint8_t token = mcx_cmdq_token(sc);
6636 	int error;
6637 
6638 	cqe = mcx_get_cmdq_entry(sc, MCX_CMDQ_SLOT_KSTAT);
6639 	if (cqe == NULL)
6640 		return (-1);
6641 
6642 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6643 	    token);
6644 
6645 	in = mcx_cmdq_in(cqe);
6646 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_EQ);
6647 	in->cmd_op_mod = htobe16(0);
6648 	in->cmd_eqn = htobe32(eq->eq_n);
6649 
6650 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6651 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6652 	    &cqe->cq_output_ptr, token) != 0) {
6653 		printf("%s: unable to allocate query eq mailboxes\n", DEVNAME(sc));
6654 		return (-1);
6655 	}
6656 
6657 	mcx_cmdq_mboxes_sign(&mxm, 1);
6658 
6659 	error = mcx_cmdq_exec(sc, cqe, MCX_CMDQ_SLOT_KSTAT, 1000);
6660 	if (error != 0) {
6661 		printf("%s: query eq timeout\n", DEVNAME(sc));
6662 		goto free;
6663 	}
6664 	error = mcx_cmdq_verify(cqe);
6665 	if (error != 0) {
6666 		printf("%s: query eq reply corrupt\n", DEVNAME(sc));
6667 		goto free;
6668 	}
6669 
6670 	out = mcx_cmdq_out(cqe);
6671 	switch (out->cmd_status) {
6672 	case MCX_CQ_STATUS_OK:
6673 		break;
6674 	default:
6675 		printf("%s: query eq failed (%x/%x)\n", DEVNAME(sc),
6676 		    out->cmd_status, betoh32(out->cmd_syndrome));
6677 		error = -1;
6678 		goto free;
6679 	}
6680 
6681         ctx = (struct mcx_eq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6682 	memcpy(eq_ctx, ctx, sizeof(*eq_ctx));
6683 free:
6684 	mcx_cq_mboxes_free(sc, &mxm);
6685 	return (error);
6686 }
6687 
6688 #endif /* NKSTAT > 0 */
6689 
6690 static inline unsigned int
6691 mcx_rx_fill_slots(struct mcx_softc *sc, struct mcx_rx *rx, uint nslots)
6692 {
6693 	struct mcx_rq_entry *ring, *rqe;
6694 	struct mcx_slot *ms;
6695 	struct mbuf *m;
6696 	uint slot, p, fills;
6697 
6698 	ring = MCX_DMA_KVA(&rx->rx_rq_mem);
6699 	p = rx->rx_prod;
6700 
6701 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6702 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
6703 
6704 	for (fills = 0; fills < nslots; fills++) {
6705 		slot = p % (1 << MCX_LOG_RQ_SIZE);
6706 
6707 		ms = &rx->rx_slots[slot];
6708 		rqe = &ring[slot];
6709 
6710 		m = MCLGETL(NULL, M_DONTWAIT, sc->sc_rxbufsz);
6711 		if (m == NULL)
6712 			break;
6713 
6714 		m->m_data += (m->m_ext.ext_size - sc->sc_rxbufsz);
6715 		m->m_data += ETHER_ALIGN;
6716 		m->m_len = m->m_pkthdr.len = sc->sc_hardmtu;
6717 
6718 		if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6719 		    BUS_DMA_NOWAIT) != 0) {
6720 			m_freem(m);
6721 			break;
6722 		}
6723 		ms->ms_m = m;
6724 
6725 		htobem32(&rqe->rqe_byte_count, ms->ms_map->dm_segs[0].ds_len);
6726 		htobem64(&rqe->rqe_addr, ms->ms_map->dm_segs[0].ds_addr);
6727 		htobem32(&rqe->rqe_lkey, sc->sc_lkey);
6728 
6729 		p++;
6730 	}
6731 
6732 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6733 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
6734 
6735 	rx->rx_prod = p;
6736 
6737 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6738 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
6739 	htobem32(MCX_DMA_OFF(&sc->sc_doorbell_mem, rx->rx_doorbell),
6740 	    p & MCX_WQ_DOORBELL_MASK);
6741 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6742 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
6743 
6744 	return (nslots - fills);
6745 }
6746 
6747 int
6748 mcx_rx_fill(struct mcx_softc *sc, struct mcx_rx *rx)
6749 {
6750 	u_int slots;
6751 
6752 	slots = if_rxr_get(&rx->rx_rxr, (1 << MCX_LOG_RQ_SIZE));
6753 	if (slots == 0)
6754 		return (1);
6755 
6756 	slots = mcx_rx_fill_slots(sc, rx, slots);
6757 	if_rxr_put(&rx->rx_rxr, slots);
6758 	return (0);
6759 }
6760 
6761 void
6762 mcx_refill(void *xrx)
6763 {
6764 	struct mcx_rx *rx = xrx;
6765 	struct mcx_softc *sc = rx->rx_softc;
6766 
6767 	mcx_rx_fill(sc, rx);
6768 
6769 	if (if_rxr_inuse(&rx->rx_rxr) == 0)
6770 		timeout_add(&rx->rx_refill, 1);
6771 }
6772 
6773 static int
6774 mcx_process_txeof(struct mcx_softc *sc, struct mcx_tx *tx,
6775     struct mcx_cq_entry *cqe)
6776 {
6777 	struct mcx_slot *ms;
6778 	bus_dmamap_t map;
6779 	int slot, slots;
6780 
6781 	slot = betoh16(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
6782 
6783 	ms = &tx->tx_slots[slot];
6784 	map = ms->ms_map;
6785 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6786 	    BUS_DMASYNC_POSTWRITE);
6787 
6788 	slots = 1;
6789 	if (map->dm_nsegs > 1)
6790 		slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
6791 
6792 	bus_dmamap_unload(sc->sc_dmat, map);
6793 	m_freem(ms->ms_m);
6794 	ms->ms_m = NULL;
6795 
6796 	return (slots);
6797 }
6798 
6799 static void
6800 mcx_calibrate_first(struct mcx_softc *sc)
6801 {
6802 	struct mcx_calibration *c = &sc->sc_calibration[0];
6803 	int s;
6804 
6805 	sc->sc_calibration_gen = 0;
6806 
6807 	s = splhigh(); /* crit_enter? */
6808 	c->c_ubase = nsecuptime();
6809 	c->c_tbase = mcx_timer(sc);
6810 	splx(s);
6811 	c->c_ratio = 0;
6812 
6813 #ifdef notyet
6814 	timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_FIRST);
6815 #endif
6816 }
6817 
6818 #define MCX_TIMESTAMP_SHIFT 24
6819 
6820 static void
6821 mcx_calibrate(void *arg)
6822 {
6823 	struct mcx_softc *sc = arg;
6824 	struct mcx_calibration *nc, *pc;
6825 	uint64_t udiff, tdiff;
6826 	unsigned int gen;
6827 	int s;
6828 
6829 	if (!ISSET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING))
6830 		return;
6831 
6832 	timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL);
6833 
6834 	gen = sc->sc_calibration_gen;
6835 	pc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
6836 	gen++;
6837 	nc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
6838 
6839 	nc->c_uptime = pc->c_ubase;
6840 	nc->c_timestamp = pc->c_tbase;
6841 
6842 	s = splhigh(); /* crit_enter? */
6843 	nc->c_ubase = nsecuptime();
6844 	nc->c_tbase = mcx_timer(sc);
6845 	splx(s);
6846 
6847 	udiff = nc->c_ubase - nc->c_uptime;
6848 	tdiff = nc->c_tbase - nc->c_timestamp;
6849 
6850 	/*
6851 	 * udiff is the wall clock time between calibration ticks,
6852 	 * which should be 32 seconds or 32 billion nanoseconds. if
6853 	 * we squint, 1 billion nanoseconds is kind of like a 32 bit
6854 	 * number, so 32 billion should still have a lot of high bits
6855 	 * spare. we use this space by shifting the nanoseconds up
6856 	 * 24 bits so we have a nice big number to divide by the
6857 	 * number of mcx timer ticks.
6858 	 */
6859 	nc->c_ratio = (udiff << MCX_TIMESTAMP_SHIFT) / tdiff;
6860 
6861 	membar_producer();
6862 	sc->sc_calibration_gen = gen;
6863 }
6864 
6865 static int
6866 mcx_process_rx(struct mcx_softc *sc, struct mcx_rx *rx,
6867     struct mcx_cq_entry *cqe, struct mbuf_list *ml,
6868     const struct mcx_calibration *c)
6869 {
6870 	struct mcx_slot *ms;
6871 	struct mbuf *m;
6872 	uint32_t flags, len;
6873 	int slot;
6874 
6875 	len = bemtoh32(&cqe->cq_byte_cnt);
6876 	slot = betoh16(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
6877 
6878 	ms = &rx->rx_slots[slot];
6879 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, len, BUS_DMASYNC_POSTREAD);
6880 	bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
6881 
6882 	m = ms->ms_m;
6883 	ms->ms_m = NULL;
6884 
6885 	m->m_pkthdr.len = m->m_len = len;
6886 
6887 	if (cqe->cq_rx_hash_type) {
6888 		m->m_pkthdr.ph_flowid = betoh32(cqe->cq_rx_hash);
6889 		m->m_pkthdr.csum_flags |= M_FLOWID;
6890 	}
6891 
6892 	flags = bemtoh32(&cqe->cq_flags);
6893 	if (flags & MCX_CQ_ENTRY_FLAGS_L3_OK)
6894 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
6895 	if (flags & MCX_CQ_ENTRY_FLAGS_L4_OK)
6896 		m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
6897 		    M_UDP_CSUM_IN_OK;
6898 #if NVLAN > 0
6899 	if (flags & MCX_CQ_ENTRY_FLAGS_CV) {
6900 		m->m_pkthdr.ether_vtag = (flags &
6901 		    MCX_CQ_ENTRY_FLAGS_VLAN_MASK);
6902 		m->m_flags |= M_VLANTAG;
6903 	}
6904 #endif
6905 
6906 #ifdef notyet
6907 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_LINK0) && c->c_ratio) {
6908 		uint64_t t = bemtoh64(&cqe->cq_timestamp);
6909 		t -= c->c_timestamp;
6910 		t *= c->c_ratio;
6911 		t >>= MCX_TIMESTAMP_SHIFT;
6912 		t += c->c_uptime;
6913 
6914 		m->m_pkthdr.ph_timestamp = t;
6915 		SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
6916 	}
6917 #endif
6918 
6919 	ml_enqueue(ml, m);
6920 
6921 	return (1);
6922 }
6923 
6924 static struct mcx_cq_entry *
6925 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
6926 {
6927 	struct mcx_cq_entry *cqe;
6928 	int next;
6929 
6930 	cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
6931 	next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
6932 
6933 	if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
6934 	    ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
6935 		return (&cqe[next]);
6936 	}
6937 
6938 	return (NULL);
6939 }
6940 
6941 static void
6942 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar)
6943 {
6944 	struct mcx_cq_doorbell *db;
6945 	bus_size_t offset;
6946 	uint32_t val;
6947 	uint64_t uval;
6948 
6949 	val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
6950 	val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
6951 
6952 	db = MCX_DMA_OFF(&sc->sc_doorbell_mem, cq->cq_doorbell);
6953 
6954 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6955 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_POSTWRITE);
6956 
6957 	htobem32(&db->db_update_ci, cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
6958 	htobem32(&db->db_arm_ci, val);
6959 
6960 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6961 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_PREWRITE);
6962 
6963 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_CQ_DOORBELL;
6964 
6965 	uval = (uint64_t)val << 32;
6966 	uval |= cq->cq_n;
6967 
6968 	bus_space_write_raw_8(sc->sc_memt, sc->sc_memh, offset, htobe64(uval));
6969 	mcx_bar(sc, offset, sizeof(uval), BUS_SPACE_BARRIER_WRITE);
6970 }
6971 
6972 void
6973 mcx_process_cq(struct mcx_softc *sc, struct mcx_queues *q, struct mcx_cq *cq)
6974 {
6975 	struct mcx_rx *rx = &q->q_rx;
6976 	struct mcx_tx *tx = &q->q_tx;
6977 	const struct mcx_calibration *c;
6978 	unsigned int gen;
6979 	struct mcx_cq_entry *cqe;
6980 	uint8_t *cqp;
6981 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
6982 	int rxfree, txfree;
6983 
6984 	gen = sc->sc_calibration_gen;
6985 	membar_consumer();
6986 	c = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
6987 
6988 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
6989 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
6990 
6991 	rxfree = 0;
6992 	txfree = 0;
6993 	while ((cqe = mcx_next_cq_entry(sc, cq))) {
6994 		uint8_t opcode;
6995 		opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
6996 		switch (opcode) {
6997 		case MCX_CQ_ENTRY_OPCODE_REQ:
6998 			txfree += mcx_process_txeof(sc, tx, cqe);
6999 			break;
7000 		case MCX_CQ_ENTRY_OPCODE_SEND:
7001 			rxfree += mcx_process_rx(sc, rx, cqe, &ml, c);
7002 			break;
7003 		case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
7004 		case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
7005 			cqp = (uint8_t *)cqe;
7006 			/* printf("%s: cq completion error: %x\n",
7007 			    DEVNAME(sc), cqp[0x37]); */
7008 			break;
7009 
7010 		default:
7011 			/* printf("%s: cq completion opcode %x??\n",
7012 			    DEVNAME(sc), opcode); */
7013 			break;
7014 		}
7015 
7016 		cq->cq_cons++;
7017 	}
7018 
7019 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
7020 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
7021 
7022 	if (rxfree > 0) {
7023 		if_rxr_put(&rx->rx_rxr, rxfree);
7024 		if (ifiq_input(rx->rx_ifiq, &ml))
7025 			if_rxr_livelocked(&rx->rx_rxr);
7026 
7027 		mcx_rx_fill(sc, rx);
7028 		if (if_rxr_inuse(&rx->rx_rxr) == 0)
7029 			timeout_add(&rx->rx_refill, 1);
7030 	}
7031 
7032 	cq->cq_count++;
7033 	mcx_arm_cq(sc, cq, q->q_uar);
7034 
7035 	if (txfree > 0) {
7036 		tx->tx_cons += txfree;
7037 		if (ifq_is_oactive(tx->tx_ifq))
7038 			ifq_restart(tx->tx_ifq);
7039 	}
7040 }
7041 
7042 
7043 static void
7044 mcx_arm_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar)
7045 {
7046 	bus_size_t offset;
7047 	uint32_t val;
7048 
7049 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_EQ_DOORBELL_ARM;
7050 	val = (eq->eq_n << 24) | (eq->eq_cons & 0xffffff);
7051 
7052 	mcx_wr(sc, offset, val);
7053 	mcx_bar(sc, offset, sizeof(val), BUS_SPACE_BARRIER_WRITE);
7054 }
7055 
7056 static struct mcx_eq_entry *
7057 mcx_next_eq_entry(struct mcx_softc *sc, struct mcx_eq *eq)
7058 {
7059 	struct mcx_eq_entry *eqe;
7060 	int next;
7061 
7062 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
7063 	next = eq->eq_cons % (1 << MCX_LOG_EQ_SIZE);
7064 	if ((eqe[next].eq_owner & 1) ==
7065 	    ((eq->eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
7066 		eq->eq_cons++;
7067 		return (&eqe[next]);
7068 	}
7069 	return (NULL);
7070 }
7071 
7072 int
7073 mcx_admin_intr(void *xsc)
7074 {
7075 	struct mcx_softc *sc = (struct mcx_softc *)xsc;
7076 	struct mcx_eq *eq = &sc->sc_admin_eq;
7077 	struct mcx_eq_entry *eqe;
7078 
7079 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7080 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7081 
7082 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7083 		switch (eqe->eq_event_type) {
7084 		case MCX_EVENT_TYPE_LAST_WQE:
7085 			/* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
7086 			break;
7087 
7088 		case MCX_EVENT_TYPE_CQ_ERROR:
7089 			/* printf("%s: cq error\n", DEVNAME(sc)); */
7090 			break;
7091 
7092 		case MCX_EVENT_TYPE_CMD_COMPLETION:
7093 			mtx_enter(&sc->sc_cmdq_mtx);
7094 			wakeup(&sc->sc_cmdq_token);
7095 			mtx_leave(&sc->sc_cmdq_mtx);
7096 			break;
7097 
7098 		case MCX_EVENT_TYPE_PORT_CHANGE:
7099 			task_add(systq, &sc->sc_port_change);
7100 			break;
7101 
7102 		default:
7103 			/* printf("%s: something happened\n", DEVNAME(sc)); */
7104 			break;
7105 		}
7106 	}
7107 
7108 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7109 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7110 
7111 	mcx_arm_eq(sc, eq, sc->sc_uar);
7112 
7113 	return (1);
7114 }
7115 
7116 int
7117 mcx_cq_intr(void *xq)
7118 {
7119 	struct mcx_queues *q = (struct mcx_queues *)xq;
7120 	struct mcx_softc *sc = q->q_sc;
7121 	struct mcx_eq *eq = &q->q_eq;
7122 	struct mcx_eq_entry *eqe;
7123 	int cqn;
7124 
7125 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7126 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7127 
7128 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7129 		switch (eqe->eq_event_type) {
7130 		case MCX_EVENT_TYPE_COMPLETION:
7131 			cqn = betoh32(eqe->eq_event_data[6]);
7132 			if (cqn == q->q_cq.cq_n)
7133 				mcx_process_cq(sc, q, &q->q_cq);
7134 			break;
7135 		}
7136 	}
7137 
7138 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7139 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7140 
7141 	mcx_arm_eq(sc, eq, q->q_uar);
7142 
7143 	return (1);
7144 }
7145 
7146 static void
7147 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
7148     int total)
7149 {
7150 	struct mcx_slot *ms;
7151 
7152 	int i = allocated;
7153 	while (i-- > 0) {
7154 		ms = &slots[i];
7155 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
7156 		if (ms->ms_m != NULL)
7157 			m_freem(ms->ms_m);
7158 	}
7159 	free(slots, M_DEVBUF, total * sizeof(*ms));
7160 }
7161 
7162 static int
7163 mcx_queue_up(struct mcx_softc *sc, struct mcx_queues *q)
7164 {
7165 	struct mcx_rx *rx;
7166 	struct mcx_tx *tx;
7167 	struct mcx_slot *ms;
7168 	int i;
7169 
7170 	rx = &q->q_rx;
7171 	rx->rx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_RQ_SIZE),
7172 	    M_DEVBUF, M_WAITOK | M_ZERO);
7173 	if (rx->rx_slots == NULL) {
7174 		printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
7175 		return ENOMEM;
7176 	}
7177 
7178 	for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
7179 		ms = &rx->rx_slots[i];
7180 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
7181 		    sc->sc_hardmtu, 0,
7182 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
7183 		    &ms->ms_map) != 0) {
7184 			printf("%s: failed to allocate rx dma maps\n",
7185 			    DEVNAME(sc));
7186 			goto destroy_rx_slots;
7187 		}
7188 	}
7189 
7190 	tx = &q->q_tx;
7191 	tx->tx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_SQ_SIZE),
7192 	    M_DEVBUF, M_WAITOK | M_ZERO);
7193 	if (tx->tx_slots == NULL) {
7194 		printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
7195 		goto destroy_rx_slots;
7196 	}
7197 
7198 	for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
7199 		ms = &tx->tx_slots[i];
7200 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
7201 		    MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
7202 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
7203 		    &ms->ms_map) != 0) {
7204 			printf("%s: failed to allocate tx dma maps\n",
7205 			    DEVNAME(sc));
7206 			goto destroy_tx_slots;
7207 		}
7208 	}
7209 
7210 	if (mcx_create_cq(sc, &q->q_cq, q->q_uar, q->q_index,
7211 	    q->q_eq.eq_n) != 0)
7212 		goto destroy_tx_slots;
7213 
7214 	if (mcx_create_sq(sc, tx, q->q_uar, q->q_index, q->q_cq.cq_n)
7215 	    != 0)
7216 		goto destroy_cq;
7217 
7218 	if (mcx_create_rq(sc, rx, q->q_index, q->q_cq.cq_n) != 0)
7219 		goto destroy_sq;
7220 
7221 	return 0;
7222 
7223 destroy_sq:
7224 	mcx_destroy_sq(sc, tx);
7225 destroy_cq:
7226 	mcx_destroy_cq(sc, &q->q_cq);
7227 destroy_tx_slots:
7228 	mcx_free_slots(sc, tx->tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
7229 	tx->tx_slots = NULL;
7230 
7231 	i = (1 << MCX_LOG_RQ_SIZE);
7232 destroy_rx_slots:
7233 	mcx_free_slots(sc, rx->rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
7234 	rx->rx_slots = NULL;
7235 	return ENOMEM;
7236 }
7237 
7238 static int
7239 mcx_rss_group_entry_count(struct mcx_softc *sc, int group)
7240 {
7241 	int i;
7242 	int count;
7243 
7244 	count = 0;
7245 	for (i = 0; i < nitems(mcx_rss_config); i++) {
7246 		if (mcx_rss_config[i].flow_group == group)
7247 			count++;
7248 	}
7249 
7250 	return count;
7251 }
7252 
7253 static int
7254 mcx_up(struct mcx_softc *sc)
7255 {
7256 	struct ifnet *ifp = &sc->sc_ac.ac_if;
7257 	struct mcx_rx *rx;
7258 	struct mcx_tx *tx;
7259 	int i, start, count, flow_group, flow_index;
7260 	struct mcx_flow_match match_crit;
7261 	struct mcx_rss_rule *rss;
7262 	uint32_t dest;
7263 	int rqns[MCX_MAX_QUEUES];
7264 
7265 	if (mcx_create_tis(sc, &sc->sc_tis) != 0)
7266 		goto down;
7267 
7268 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7269 		if (mcx_queue_up(sc, &sc->sc_queues[i]) != 0) {
7270 			goto down;
7271 		}
7272 	}
7273 
7274 	/* RSS flow table and flow groups */
7275 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 1,
7276 	    &sc->sc_rss_flow_table_id) != 0)
7277 		goto down;
7278 
7279 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7280 	    sc->sc_rss_flow_table_id;
7281 
7282 	/* L4 RSS flow group (v4/v6 tcp/udp, no fragments) */
7283 	memset(&match_crit, 0, sizeof(match_crit));
7284 	match_crit.mc_ethertype = 0xffff;
7285 	match_crit.mc_ip_proto = 0xff;
7286 	match_crit.mc_vlan_flags = MCX_FLOW_MATCH_IP_FRAG;
7287 	start = 0;
7288 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L4);
7289 	if (count != 0) {
7290 		if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7291 		    MCX_FLOW_GROUP_RSS_L4, start, count,
7292 		    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7293 			goto down;
7294 		start += count;
7295 	}
7296 
7297 	/* L3 RSS flow group (v4/v6, including fragments) */
7298 	memset(&match_crit, 0, sizeof(match_crit));
7299 	match_crit.mc_ethertype = 0xffff;
7300 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L3);
7301 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7302 	    MCX_FLOW_GROUP_RSS_L3, start, count,
7303 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7304 		goto down;
7305 	start += count;
7306 
7307 	/* non-RSS flow group */
7308 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_NONE);
7309 	memset(&match_crit, 0, sizeof(match_crit));
7310 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7311 	    MCX_FLOW_GROUP_RSS_NONE, start, count, 0, &match_crit) != 0)
7312 		goto down;
7313 
7314 	/* Root flow table, matching packets based on mac address */
7315 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 0,
7316 	    &sc->sc_mac_flow_table_id) != 0)
7317 		goto down;
7318 
7319 	/* promisc flow group */
7320 	start = 0;
7321 	memset(&match_crit, 0, sizeof(match_crit));
7322 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7323 	    MCX_FLOW_GROUP_PROMISC, start, 1, 0, &match_crit) != 0)
7324 		goto down;
7325 	sc->sc_promisc_flow_enabled = 0;
7326 	start++;
7327 
7328 	/* all multicast flow group */
7329 	match_crit.mc_dest_mac[0] = 0x01;
7330 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7331 	    MCX_FLOW_GROUP_ALLMULTI, start, 1,
7332 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7333 		goto down;
7334 	sc->sc_allmulti_flow_enabled = 0;
7335 	start++;
7336 
7337 	/* mac address matching flow group */
7338 	memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
7339 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7340 	    MCX_FLOW_GROUP_MAC, start, (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
7341 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7342 		goto down;
7343 
7344 	/* flow table entries for unicast and broadcast */
7345 	start = 0;
7346 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7347 	    sc->sc_ac.ac_enaddr, dest) != 0)
7348 		goto down;
7349 	start++;
7350 
7351 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7352 	    etherbroadcastaddr, dest) != 0)
7353 		goto down;
7354 	start++;
7355 
7356 	/* multicast entries go after that */
7357 	sc->sc_mcast_flow_base = start;
7358 
7359 	/* re-add any existing multicast flows */
7360 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7361 		if (sc->sc_mcast_flows[i][0] != 0) {
7362 			mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC,
7363 			    sc->sc_mcast_flow_base + i,
7364 			    sc->sc_mcast_flows[i], dest);
7365 		}
7366 	}
7367 
7368 	if (mcx_set_flow_table_root(sc, sc->sc_mac_flow_table_id) != 0)
7369 		goto down;
7370 
7371 	/*
7372 	 * the RQT can be any size as long as it's a power of two.
7373 	 * since we also restrict the number of queues to a power of two,
7374 	 * we can just put each rx queue in once.
7375 	 */
7376 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++)
7377 		rqns[i] = sc->sc_queues[i].q_rx.rx_rqn;
7378 
7379 	if (mcx_create_rqt(sc, intrmap_count(sc->sc_intrmap), rqns,
7380 	    &sc->sc_rqt) != 0)
7381 		goto down;
7382 
7383 	start = 0;
7384 	flow_index = 0;
7385 	flow_group = -1;
7386 	for (i = 0; i < nitems(mcx_rss_config); i++) {
7387 		rss = &mcx_rss_config[i];
7388 		if (rss->flow_group != flow_group) {
7389 			flow_group = rss->flow_group;
7390 			flow_index = 0;
7391 		}
7392 
7393 		if (rss->hash_sel == 0) {
7394 			if (mcx_create_tir_direct(sc, &sc->sc_queues[0].q_rx,
7395 			    &sc->sc_tir[i]) != 0)
7396 				goto down;
7397 		} else {
7398 			if (mcx_create_tir_indirect(sc, sc->sc_rqt,
7399 			    rss->hash_sel, &sc->sc_tir[i]) != 0)
7400 				goto down;
7401 		}
7402 
7403 		if (mcx_set_flow_table_entry_proto(sc, flow_group,
7404 		    flow_index, rss->ethertype, rss->ip_proto,
7405 		    MCX_FLOW_CONTEXT_DEST_TYPE_TIR | sc->sc_tir[i]) != 0)
7406 			goto down;
7407 		flow_index++;
7408 	}
7409 
7410 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7411 		struct mcx_queues *q = &sc->sc_queues[i];
7412 		rx = &q->q_rx;
7413 		tx = &q->q_tx;
7414 
7415 		/* start the queues */
7416 		if (mcx_ready_sq(sc, tx) != 0)
7417 			goto down;
7418 
7419 		if (mcx_ready_rq(sc, rx) != 0)
7420 			goto down;
7421 
7422 		if_rxr_init(&rx->rx_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
7423 		rx->rx_prod = 0;
7424 		mcx_rx_fill(sc, rx);
7425 
7426 		tx->tx_cons = 0;
7427 		tx->tx_prod = 0;
7428 		ifq_clr_oactive(tx->tx_ifq);
7429 	}
7430 
7431 	mcx_calibrate_first(sc);
7432 
7433 	SET(ifp->if_flags, IFF_RUNNING);
7434 
7435 	return ENETRESET;
7436 down:
7437 	mcx_down(sc);
7438 	return ENOMEM;
7439 }
7440 
7441 static void
7442 mcx_down(struct mcx_softc *sc)
7443 {
7444 	struct ifnet *ifp = &sc->sc_ac.ac_if;
7445 	struct mcx_rss_rule *rss;
7446 	int group, i, flow_group, flow_index;
7447 
7448 	CLR(ifp->if_flags, IFF_RUNNING);
7449 
7450 	/*
7451 	 * delete flow table entries first, so no packets can arrive
7452 	 * after the barriers
7453 	 */
7454 	if (sc->sc_promisc_flow_enabled)
7455 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
7456 	if (sc->sc_allmulti_flow_enabled)
7457 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
7458 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
7459 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
7460 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7461 		if (sc->sc_mcast_flows[i][0] != 0) {
7462 			mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
7463 			    sc->sc_mcast_flow_base + i);
7464 		}
7465 	}
7466 
7467 	flow_group = -1;
7468 	flow_index = 0;
7469 	for (i = 0; i < nitems(mcx_rss_config); i++) {
7470 		rss = &mcx_rss_config[i];
7471 		if (rss->flow_group != flow_group) {
7472 			flow_group = rss->flow_group;
7473 			flow_index = 0;
7474 		}
7475 
7476 		mcx_delete_flow_table_entry(sc, flow_group, flow_index);
7477 
7478 		mcx_destroy_tir(sc, sc->sc_tir[i]);
7479 		sc->sc_tir[i] = 0;
7480 
7481 		flow_index++;
7482 	}
7483 	intr_barrier(sc->sc_ihc);
7484 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7485 		struct ifqueue *ifq = sc->sc_queues[i].q_tx.tx_ifq;
7486 		ifq_barrier(ifq);
7487 
7488 		timeout_del_barrier(&sc->sc_queues[i].q_rx.rx_refill);
7489 
7490 		intr_barrier(sc->sc_queues[i].q_ihc);
7491 	}
7492 
7493 	timeout_del_barrier(&sc->sc_calibrate);
7494 
7495 	for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
7496 		if (sc->sc_flow_group[group].g_id != -1)
7497 			mcx_destroy_flow_group(sc, group);
7498 	}
7499 
7500 	if (sc->sc_mac_flow_table_id != -1) {
7501 		mcx_destroy_flow_table(sc, sc->sc_mac_flow_table_id);
7502 		sc->sc_mac_flow_table_id = -1;
7503 	}
7504 	if (sc->sc_rss_flow_table_id != -1) {
7505 		mcx_destroy_flow_table(sc, sc->sc_rss_flow_table_id);
7506 		sc->sc_rss_flow_table_id = -1;
7507 	}
7508 	if (sc->sc_rqt != -1) {
7509 		mcx_destroy_rqt(sc, sc->sc_rqt);
7510 		sc->sc_rqt = -1;
7511 	}
7512 
7513 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7514 		struct mcx_queues *q = &sc->sc_queues[i];
7515 		struct mcx_rx *rx = &q->q_rx;
7516 		struct mcx_tx *tx = &q->q_tx;
7517 		struct mcx_cq *cq = &q->q_cq;
7518 
7519 		if (rx->rx_rqn != 0)
7520 			mcx_destroy_rq(sc, rx);
7521 
7522 		if (tx->tx_sqn != 0)
7523 			mcx_destroy_sq(sc, tx);
7524 
7525 		if (tx->tx_slots != NULL) {
7526 			mcx_free_slots(sc, tx->tx_slots,
7527 			    (1 << MCX_LOG_SQ_SIZE), (1 << MCX_LOG_SQ_SIZE));
7528 			tx->tx_slots = NULL;
7529 		}
7530 		if (rx->rx_slots != NULL) {
7531 			mcx_free_slots(sc, rx->rx_slots,
7532 			    (1 << MCX_LOG_RQ_SIZE), (1 << MCX_LOG_RQ_SIZE));
7533 			rx->rx_slots = NULL;
7534 		}
7535 
7536 		if (cq->cq_n != 0)
7537 			mcx_destroy_cq(sc, cq);
7538 	}
7539 	if (sc->sc_tis != 0) {
7540 		mcx_destroy_tis(sc, sc->sc_tis);
7541 		sc->sc_tis = 0;
7542 	}
7543 }
7544 
7545 static int
7546 mcx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
7547 {
7548 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7549 	struct ifreq *ifr = (struct ifreq *)data;
7550 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
7551 	int s, i, error = 0;
7552 	uint32_t dest;
7553 
7554 	s = splnet();
7555 	switch (cmd) {
7556 	case SIOCSIFADDR:
7557 		ifp->if_flags |= IFF_UP;
7558 		/* FALLTHROUGH */
7559 
7560 	case SIOCSIFFLAGS:
7561 		if (ISSET(ifp->if_flags, IFF_UP)) {
7562 			if (ISSET(ifp->if_flags, IFF_RUNNING))
7563 				error = ENETRESET;
7564 			else
7565 				error = mcx_up(sc);
7566 		} else {
7567 			if (ISSET(ifp->if_flags, IFF_RUNNING))
7568 				mcx_down(sc);
7569 		}
7570 		break;
7571 
7572 	case SIOCGIFMEDIA:
7573 	case SIOCSIFMEDIA:
7574 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
7575 		break;
7576 
7577 	case SIOCGIFSFFPAGE:
7578 		error = mcx_get_sffpage(ifp, (struct if_sffpage *)data);
7579 		break;
7580 
7581 	case SIOCGIFRXR:
7582 		error = mcx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
7583 		break;
7584 
7585 	case SIOCADDMULTI:
7586 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
7587 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7588 			if (error != 0)
7589 				break;
7590 
7591 			dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7592 			    sc->sc_rss_flow_table_id;
7593 
7594 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7595 				if (sc->sc_mcast_flows[i][0] == 0) {
7596 					memcpy(sc->sc_mcast_flows[i], addrlo,
7597 					    ETHER_ADDR_LEN);
7598 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7599 						mcx_set_flow_table_entry_mac(sc,
7600 						    MCX_FLOW_GROUP_MAC,
7601 						    sc->sc_mcast_flow_base + i,
7602 						    sc->sc_mcast_flows[i], dest);
7603 					}
7604 					break;
7605 				}
7606 			}
7607 
7608 			if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
7609 				if (i == MCX_NUM_MCAST_FLOWS) {
7610 					SET(ifp->if_flags, IFF_ALLMULTI);
7611 					sc->sc_extra_mcast++;
7612 					error = ENETRESET;
7613 				}
7614 
7615 				if (sc->sc_ac.ac_multirangecnt > 0) {
7616 					SET(ifp->if_flags, IFF_ALLMULTI);
7617 					error = ENETRESET;
7618 				}
7619 			}
7620 		}
7621 		break;
7622 
7623 	case SIOCDELMULTI:
7624 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
7625 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7626 			if (error != 0)
7627 				break;
7628 
7629 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7630 				if (memcmp(sc->sc_mcast_flows[i], addrlo,
7631 				    ETHER_ADDR_LEN) == 0) {
7632 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7633 						mcx_delete_flow_table_entry(sc,
7634 						    MCX_FLOW_GROUP_MAC,
7635 						    sc->sc_mcast_flow_base + i);
7636 					}
7637 					sc->sc_mcast_flows[i][0] = 0;
7638 					break;
7639 				}
7640 			}
7641 
7642 			if (i == MCX_NUM_MCAST_FLOWS)
7643 				sc->sc_extra_mcast--;
7644 
7645 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
7646 			    (sc->sc_extra_mcast == 0) &&
7647 			    (sc->sc_ac.ac_multirangecnt == 0)) {
7648 				CLR(ifp->if_flags, IFF_ALLMULTI);
7649 				error = ENETRESET;
7650 			}
7651 		}
7652 		break;
7653 
7654 	default:
7655 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
7656 	}
7657 
7658 	if (error == ENETRESET) {
7659 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7660 		    (IFF_UP | IFF_RUNNING))
7661 			mcx_iff(sc);
7662 		error = 0;
7663 	}
7664 	splx(s);
7665 
7666 	return (error);
7667 }
7668 
7669 static int
7670 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
7671 {
7672 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7673 	struct mcx_reg_mcia mcia;
7674 	struct mcx_reg_pmlp pmlp;
7675 	int offset, error;
7676 
7677 	rw_enter_write(&sc->sc_cmdq_ioctl_lk);
7678 
7679 	/* get module number */
7680 	memset(&pmlp, 0, sizeof(pmlp));
7681 	pmlp.rp_local_port = 1;
7682 	error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
7683 	    sizeof(pmlp), MCX_CMDQ_SLOT_IOCTL);
7684 	if (error != 0) {
7685 		printf("%s: unable to get eeprom module number\n",
7686 		    DEVNAME(sc));
7687 		goto out;
7688 	}
7689 
7690 	for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
7691 		memset(&mcia, 0, sizeof(mcia));
7692 		mcia.rm_l = 0;
7693 		mcia.rm_module = betoh32(pmlp.rp_lane0_mapping) &
7694 		    MCX_PMLP_MODULE_NUM_MASK;
7695 		mcia.rm_i2c_addr = sff->sff_addr / 2;	/* apparently */
7696 		mcia.rm_page_num = sff->sff_page;
7697 		mcia.rm_dev_addr = htobe16(offset);
7698 		mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
7699 
7700 		error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
7701 		    &mcia, sizeof(mcia), MCX_CMDQ_SLOT_IOCTL);
7702 		if (error != 0) {
7703 			printf("%s: unable to read eeprom at %x\n",
7704 			    DEVNAME(sc), offset);
7705 			goto out;
7706 		}
7707 
7708 		memcpy(sff->sff_data + offset, mcia.rm_data,
7709 		    MCX_MCIA_EEPROM_BYTES);
7710 	}
7711 
7712  out:
7713 	rw_exit_write(&sc->sc_cmdq_ioctl_lk);
7714 	return (error);
7715 }
7716 
7717 static int
7718 mcx_rxrinfo(struct mcx_softc *sc, struct if_rxrinfo *ifri)
7719 {
7720 	struct if_rxring_info *ifrs;
7721 	unsigned int i;
7722 	int error;
7723 
7724 	ifrs = mallocarray(intrmap_count(sc->sc_intrmap), sizeof(*ifrs),
7725 	    M_TEMP, M_WAITOK|M_ZERO|M_CANFAIL);
7726 	if (ifrs == NULL)
7727 		return (ENOMEM);
7728 
7729 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7730 		struct mcx_rx *rx = &sc->sc_queues[i].q_rx;
7731 		struct if_rxring_info *ifr = &ifrs[i];
7732 
7733 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
7734 		ifr->ifr_size = sc->sc_hardmtu;
7735 		ifr->ifr_info = rx->rx_rxr;
7736 	}
7737 
7738 	error = if_rxr_info_ioctl(ifri, i, ifrs);
7739 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
7740 
7741 	return (error);
7742 }
7743 
7744 int
7745 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
7746 {
7747 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7748 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
7749 	case 0:
7750 		break;
7751 
7752 	case EFBIG:
7753 		if (m_defrag(m, M_DONTWAIT) == 0 &&
7754 		    bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7755 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
7756 			break;
7757 
7758 	default:
7759 		return (1);
7760 	}
7761 
7762 	ms->ms_m = m;
7763 	return (0);
7764 }
7765 
7766 static void
7767 mcx_start(struct ifqueue *ifq)
7768 {
7769 	struct mcx_tx *tx = ifq->ifq_softc;
7770 	struct ifnet *ifp = ifq->ifq_if;
7771 	struct mcx_softc *sc = ifp->if_softc;
7772 	struct mcx_sq_entry *sq, *sqe;
7773 	struct mcx_sq_entry_seg *sqs;
7774 	struct mcx_slot *ms;
7775 	bus_dmamap_t map;
7776 	struct mbuf *m;
7777 	u_int idx, free, used;
7778 	uint64_t *bf;
7779 	uint32_t csum;
7780 	size_t bf_base;
7781 	int i, seg, nseg;
7782 
7783 	bf_base = (tx->tx_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
7784 
7785 	idx = tx->tx_prod % (1 << MCX_LOG_SQ_SIZE);
7786 	free = (tx->tx_cons + (1 << MCX_LOG_SQ_SIZE)) - tx->tx_prod;
7787 
7788 	used = 0;
7789 	bf = NULL;
7790 
7791 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7792 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
7793 
7794 	sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&tx->tx_sq_mem);
7795 
7796 	for (;;) {
7797 		if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
7798 			ifq_set_oactive(ifq);
7799 			break;
7800 		}
7801 
7802 		m = ifq_dequeue(ifq);
7803 		if (m == NULL) {
7804 			break;
7805 		}
7806 
7807 		sqe = sq + idx;
7808 		ms = &tx->tx_slots[idx];
7809 		memset(sqe, 0, sizeof(*sqe));
7810 
7811 		/* ctrl segment */
7812 		sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
7813 		    ((tx->tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
7814 		/* always generate a completion event */
7815 		sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
7816 
7817 		/* eth segment */
7818 		csum = 0;
7819 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
7820 			csum |= MCX_SQE_L3_CSUM;
7821 		if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
7822 			csum |= MCX_SQE_L4_CSUM;
7823 		sqe->sqe_mss_csum = htobe32(csum);
7824 		sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
7825 #if NVLAN > 0
7826 		if (m->m_flags & M_VLANTAG) {
7827 			struct ether_vlan_header *evh;
7828 			evh = (struct ether_vlan_header *)
7829 			    &sqe->sqe_inline_headers;
7830 
7831 			/* slightly cheaper vlan_inject() */
7832 			m_copydata(m, 0, ETHER_HDR_LEN, evh);
7833 			evh->evl_proto = evh->evl_encap_proto;
7834 			evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
7835 			evh->evl_tag = htons(m->m_pkthdr.ether_vtag);
7836 
7837 			m_adj(m, ETHER_HDR_LEN);
7838 		} else
7839 #endif
7840 		{
7841 			m_copydata(m, 0, MCX_SQ_INLINE_SIZE,
7842 			    sqe->sqe_inline_headers);
7843 			m_adj(m, MCX_SQ_INLINE_SIZE);
7844 		}
7845 
7846 		if (mcx_load_mbuf(sc, ms, m) != 0) {
7847 			m_freem(m);
7848 			ifp->if_oerrors++;
7849 			continue;
7850 		}
7851 		bf = (uint64_t *)sqe;
7852 
7853 #if NBPFILTER > 0
7854 		if (ifp->if_bpf)
7855 			bpf_mtap_hdr(ifp->if_bpf,
7856 			    (caddr_t)sqe->sqe_inline_headers,
7857 			    MCX_SQ_INLINE_SIZE, m, BPF_DIRECTION_OUT);
7858 #endif
7859 		map = ms->ms_map;
7860 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
7861 		    BUS_DMASYNC_PREWRITE);
7862 
7863 		sqe->sqe_ds_sq_num =
7864 		    htobe32((tx->tx_sqn << MCX_SQE_SQ_NUM_SHIFT) |
7865 		    (map->dm_nsegs + 3));
7866 
7867 		/* data segment - first wqe has one segment */
7868 		sqs = sqe->sqe_segs;
7869 		seg = 0;
7870 		nseg = 1;
7871 		for (i = 0; i < map->dm_nsegs; i++) {
7872 			if (seg == nseg) {
7873 				/* next slot */
7874 				idx++;
7875 				if (idx == (1 << MCX_LOG_SQ_SIZE))
7876 					idx = 0;
7877 				tx->tx_prod++;
7878 				used++;
7879 
7880 				sqs = (struct mcx_sq_entry_seg *)(sq + idx);
7881 				seg = 0;
7882 				nseg = MCX_SQ_SEGS_PER_SLOT;
7883 			}
7884 			sqs[seg].sqs_byte_count =
7885 			    htobe32(map->dm_segs[i].ds_len);
7886 			sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
7887 			sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
7888 			seg++;
7889 		}
7890 
7891 		idx++;
7892 		if (idx == (1 << MCX_LOG_SQ_SIZE))
7893 			idx = 0;
7894 		tx->tx_prod++;
7895 		used++;
7896 	}
7897 
7898 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7899 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
7900 
7901 	if (used) {
7902 		bus_size_t blueflame;
7903 
7904 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7905 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
7906 		htobem32(MCX_DMA_OFF(&sc->sc_doorbell_mem, tx->tx_doorbell),
7907 		    tx->tx_prod & MCX_WQ_DOORBELL_MASK);
7908 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7909 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
7910 
7911 		/*
7912 		 * write the first 64 bits of the last sqe we produced
7913 		 * to the blue flame buffer
7914 		 */
7915 
7916 		blueflame = bf_base + tx->tx_bf_offset;
7917 		bus_space_write_raw_8(sc->sc_memt, sc->sc_memh,
7918 		    blueflame, *bf);
7919 		mcx_bar(sc, blueflame, sizeof(*bf), BUS_SPACE_BARRIER_WRITE);
7920 
7921 		/* next write goes to the other buffer */
7922 		tx->tx_bf_offset ^= sc->sc_bf_size;
7923 	}
7924 }
7925 
7926 static void
7927 mcx_watchdog(struct ifnet *ifp)
7928 {
7929 }
7930 
7931 static void
7932 mcx_media_add_types(struct mcx_softc *sc)
7933 {
7934 	struct mcx_reg_ptys ptys;
7935 	int i;
7936 	uint32_t proto_cap;
7937 
7938 	memset(&ptys, 0, sizeof(ptys));
7939 	ptys.rp_local_port = 1;
7940 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
7941 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
7942 	    sizeof(ptys), MCX_CMDQ_SLOT_POLL) != 0) {
7943 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
7944 		return;
7945 	}
7946 
7947 	proto_cap = betoh32(ptys.rp_eth_proto_cap);
7948 	for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
7949 		const struct mcx_eth_proto_capability *cap;
7950 		if (!ISSET(proto_cap, 1 << i))
7951 			continue;
7952 
7953 		cap = &mcx_eth_cap_map[i];
7954 		if (cap->cap_media == 0)
7955 			continue;
7956 
7957 		ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
7958 	}
7959 }
7960 
7961 static void
7962 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
7963 {
7964 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7965 	struct mcx_reg_ptys ptys;
7966 	int i;
7967 	uint32_t proto_oper;
7968 	uint64_t media_oper;
7969 
7970 	memset(&ptys, 0, sizeof(ptys));
7971 	ptys.rp_local_port = 1;
7972 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
7973 
7974 	rw_enter_write(&sc->sc_cmdq_ioctl_lk);
7975 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
7976 	    sizeof(ptys), MCX_CMDQ_SLOT_IOCTL) != 0) {
7977 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
7978 		goto out;
7979 	}
7980 
7981 	proto_oper = betoh32(ptys.rp_eth_proto_oper);
7982 
7983 	media_oper = 0;
7984 
7985 	for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
7986 		const struct mcx_eth_proto_capability *cap;
7987 		if (!ISSET(proto_oper, 1 << i))
7988 			continue;
7989 
7990 		cap = &mcx_eth_cap_map[i];
7991 
7992 		if (cap->cap_media != 0)
7993 			media_oper = cap->cap_media;
7994 	}
7995 
7996 	ifmr->ifm_status = IFM_AVALID;
7997 	if (proto_oper != 0) {
7998 		ifmr->ifm_status |= IFM_ACTIVE;
7999 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
8000 		/* txpause, rxpause, duplex? */
8001 	}
8002  out:
8003 	rw_exit_write(&sc->sc_cmdq_ioctl_lk);
8004 }
8005 
8006 static int
8007 mcx_media_change(struct ifnet *ifp)
8008 {
8009 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
8010 	struct mcx_reg_ptys ptys;
8011 	struct mcx_reg_paos paos;
8012 	uint32_t media;
8013 	int i, error;
8014 
8015 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
8016 		return EINVAL;
8017 
8018 	error = 0;
8019 	rw_enter_write(&sc->sc_cmdq_ioctl_lk);
8020 
8021 	if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
8022 		/* read ptys to get supported media */
8023 		memset(&ptys, 0, sizeof(ptys));
8024 		ptys.rp_local_port = 1;
8025 		ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8026 		if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
8027 		    &ptys, sizeof(ptys), MCX_CMDQ_SLOT_IOCTL) != 0) {
8028 			printf("%s: unable to read port type/speed\n",
8029 			    DEVNAME(sc));
8030 			error = EIO;
8031 			goto out;
8032 		}
8033 
8034 		media = betoh32(ptys.rp_eth_proto_cap);
8035 	} else {
8036 		/* map media type */
8037 		media = 0;
8038 		for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
8039 			const struct  mcx_eth_proto_capability *cap;
8040 
8041 			cap = &mcx_eth_cap_map[i];
8042 			if (cap->cap_media ==
8043 			    IFM_SUBTYPE(sc->sc_media.ifm_media)) {
8044 				media = (1 << i);
8045 				break;
8046 			}
8047 		}
8048 	}
8049 
8050 	/* disable the port */
8051 	memset(&paos, 0, sizeof(paos));
8052 	paos.rp_local_port = 1;
8053 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
8054 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8055 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8056 	    sizeof(paos), MCX_CMDQ_SLOT_IOCTL) != 0) {
8057 		printf("%s: unable to set port state to down\n", DEVNAME(sc));
8058 		error = EIO;
8059 		goto out;
8060 	}
8061 
8062 	memset(&ptys, 0, sizeof(ptys));
8063 	ptys.rp_local_port = 1;
8064 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8065 	ptys.rp_eth_proto_admin = htobe32(media);
8066 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
8067 	    sizeof(ptys), MCX_CMDQ_SLOT_IOCTL) != 0) {
8068 		printf("%s: unable to set port media type/speed\n",
8069 		    DEVNAME(sc));
8070 		error = EIO;
8071 		/* continue on */
8072 	}
8073 
8074 	/* re-enable the port to start negotiation */
8075 	memset(&paos, 0, sizeof(paos));
8076 	paos.rp_local_port = 1;
8077 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
8078 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8079 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8080 	    sizeof(paos), MCX_CMDQ_SLOT_IOCTL) != 0) {
8081 		printf("%s: unable to set port state to up\n", DEVNAME(sc));
8082 		error = EIO;
8083 	}
8084 
8085  out:
8086 	rw_exit_write(&sc->sc_cmdq_ioctl_lk);
8087 	return error;
8088 }
8089 
8090 static void
8091 mcx_port_change(void *xsc)
8092 {
8093 	struct mcx_softc *sc = xsc;
8094 	struct ifnet *ifp = &sc->sc_ac.ac_if;
8095 	struct mcx_reg_ptys ptys = {
8096 		.rp_local_port = 1,
8097 		.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH,
8098 	};
8099 	int link_state = LINK_STATE_DOWN;
8100 	int slot;
8101 
8102 	if (cold) {
8103 		slot = MCX_CMDQ_SLOT_POLL;
8104 	} else
8105 		slot = MCX_CMDQ_SLOT_LINK;
8106 
8107 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8108 	    sizeof(ptys), slot) == 0) {
8109 		uint32_t proto_oper = betoh32(ptys.rp_eth_proto_oper);
8110 		uint64_t baudrate = 0;
8111 		unsigned int i;
8112 
8113 		if (proto_oper != 0)
8114 			link_state = LINK_STATE_FULL_DUPLEX;
8115 
8116 		for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
8117 			const struct mcx_eth_proto_capability *cap;
8118 			if (!ISSET(proto_oper, 1 << i))
8119 				continue;
8120 
8121 			cap = &mcx_eth_cap_map[i];
8122 			if (cap->cap_baudrate == 0)
8123 				continue;
8124 
8125 			baudrate = cap->cap_baudrate;
8126 			break;
8127 		}
8128 
8129 		ifp->if_baudrate = baudrate;
8130 	}
8131 
8132 	if (link_state != ifp->if_link_state) {
8133 		ifp->if_link_state = link_state;
8134 		if_link_state_change(ifp);
8135 	}
8136 }
8137 
8138 static inline uint32_t
8139 mcx_rd(struct mcx_softc *sc, bus_size_t r)
8140 {
8141 	uint32_t word;
8142 
8143 	word = bus_space_read_raw_4(sc->sc_memt, sc->sc_memh, r);
8144 
8145 	return (betoh32(word));
8146 }
8147 
8148 static inline void
8149 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
8150 {
8151 	bus_space_write_raw_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
8152 }
8153 
8154 static inline void
8155 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
8156 {
8157 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
8158 }
8159 
8160 static uint64_t
8161 mcx_timer(struct mcx_softc *sc)
8162 {
8163 	uint32_t hi, lo, ni;
8164 
8165 	hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8166 	for (;;) {
8167 		lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
8168 		mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
8169 		ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8170 
8171 		if (ni == hi)
8172 			break;
8173 
8174 		hi = ni;
8175 	}
8176 
8177 	return (((uint64_t)hi << 32) | (uint64_t)lo);
8178 }
8179 
8180 static int
8181 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
8182     bus_size_t size, u_int align)
8183 {
8184 	mxm->mxm_size = size;
8185 
8186 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
8187 	    mxm->mxm_size, 0,
8188 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
8189 	    &mxm->mxm_map) != 0)
8190 		return (1);
8191 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
8192 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
8193 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
8194 		goto destroy;
8195 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
8196 	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
8197 		goto free;
8198 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
8199 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
8200 		goto unmap;
8201 
8202 	return (0);
8203 unmap:
8204 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8205 free:
8206 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8207 destroy:
8208 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8209 	return (1);
8210 }
8211 
8212 static void
8213 mcx_dmamem_zero(struct mcx_dmamem *mxm)
8214 {
8215 	memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
8216 }
8217 
8218 static void
8219 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
8220 {
8221 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
8222 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8223 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8224 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8225 }
8226 
8227 static int
8228 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
8229 {
8230 	bus_dma_segment_t *segs;
8231 	bus_size_t len = pages * MCX_PAGE_SIZE;
8232 	size_t seglen;
8233 
8234 	segs = mallocarray(sizeof(*segs), pages, M_DEVBUF, M_WAITOK|M_CANFAIL);
8235 	if (segs == NULL)
8236 		return (-1);
8237 
8238 	seglen = sizeof(*segs) * pages;
8239 
8240 	if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
8241 	    segs, pages, &mhm->mhm_seg_count, BUS_DMA_NOWAIT) != 0)
8242 		goto free_segs;
8243 
8244 	if (mhm->mhm_seg_count < pages) {
8245 		size_t nseglen;
8246 
8247 		mhm->mhm_segs = mallocarray(sizeof(*mhm->mhm_segs),
8248 		    mhm->mhm_seg_count, M_DEVBUF, M_WAITOK|M_CANFAIL);
8249 		if (mhm->mhm_segs == NULL)
8250 			goto free_dmamem;
8251 
8252 		nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
8253 
8254 		memcpy(mhm->mhm_segs, segs, nseglen);
8255 
8256 		free(segs, M_DEVBUF, seglen);
8257 
8258 		segs = mhm->mhm_segs;
8259 		seglen = nseglen;
8260 	} else
8261 		mhm->mhm_segs = segs;
8262 
8263 	if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
8264 	    MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW /*|BUS_DMA_64BIT*/,
8265 	    &mhm->mhm_map) != 0)
8266 		goto free_dmamem;
8267 
8268 	if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
8269 	    mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
8270 		goto destroy;
8271 
8272 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8273 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
8274 
8275 	mhm->mhm_npages = pages;
8276 
8277 	return (0);
8278 
8279 destroy:
8280 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8281 free_dmamem:
8282 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8283 free_segs:
8284 	free(segs, M_DEVBUF, seglen);
8285 	mhm->mhm_segs = NULL;
8286 
8287 	return (-1);
8288 }
8289 
8290 static void
8291 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
8292 {
8293 	if (mhm->mhm_npages == 0)
8294 		return;
8295 
8296 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8297 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
8298 
8299 	bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
8300 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8301 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8302 	free(mhm->mhm_segs, M_DEVBUF,
8303 	    sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
8304 
8305 	mhm->mhm_npages = 0;
8306 }
8307 
8308 #if NKSTAT > 0
8309 struct mcx_ppcnt {
8310 	char			 name[KSTAT_KV_NAMELEN];
8311 	enum kstat_kv_unit	 unit;
8312 };
8313 
8314 static const struct mcx_ppcnt mcx_ppcnt_ieee8023_tpl[] = {
8315 	{ "Good Tx",		KSTAT_KV_U_PACKETS, },
8316 	{ "Good Rx",		KSTAT_KV_U_PACKETS, },
8317 	{ "FCS errs",		KSTAT_KV_U_PACKETS, },
8318 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
8319 	{ "Good Tx",		KSTAT_KV_U_BYTES, },
8320 	{ "Good Rx",		KSTAT_KV_U_BYTES, },
8321 	{ "Multicast Tx",	KSTAT_KV_U_PACKETS, },
8322 	{ "Broadcast Tx",	KSTAT_KV_U_PACKETS, },
8323 	{ "Multicast Rx",	KSTAT_KV_U_PACKETS, },
8324 	{ "Broadcast Rx",	KSTAT_KV_U_PACKETS, },
8325 	{ "In Range Len",	KSTAT_KV_U_PACKETS, },
8326 	{ "Out Of Range Len",	KSTAT_KV_U_PACKETS, },
8327 	{ "Frame Too Long",	KSTAT_KV_U_PACKETS, },
8328 	{ "Symbol Errs",	KSTAT_KV_U_PACKETS, },
8329 	{ "MAC Ctrl Tx",	KSTAT_KV_U_PACKETS, },
8330 	{ "MAC Ctrl Rx",	KSTAT_KV_U_PACKETS, },
8331 	{ "MAC Ctrl Unsup",	KSTAT_KV_U_PACKETS, },
8332 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
8333 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
8334 };
8335 CTASSERT(nitems(mcx_ppcnt_ieee8023_tpl) == mcx_ppcnt_ieee8023_count);
8336 
8337 static const struct mcx_ppcnt mcx_ppcnt_rfc2863_tpl[] = {
8338 	{ "Rx Bytes",		KSTAT_KV_U_BYTES, },
8339 	{ "Rx Unicast",		KSTAT_KV_U_PACKETS, },
8340 	{ "Rx Discards",	KSTAT_KV_U_PACKETS, },
8341 	{ "Rx Errors",		KSTAT_KV_U_PACKETS, },
8342 	{ "Rx Unknown Proto",	KSTAT_KV_U_PACKETS, },
8343 	{ "Tx Bytes",		KSTAT_KV_U_BYTES, },
8344 	{ "Tx Unicast",		KSTAT_KV_U_PACKETS, },
8345 	{ "Tx Discards",	KSTAT_KV_U_PACKETS, },
8346 	{ "Tx Errors",		KSTAT_KV_U_PACKETS, },
8347 	{ "Rx Multicast",	KSTAT_KV_U_PACKETS, },
8348 	{ "Rx Broadcast",	KSTAT_KV_U_PACKETS, },
8349 	{ "Tx Multicast",	KSTAT_KV_U_PACKETS, },
8350 	{ "Tx Broadcast",	KSTAT_KV_U_PACKETS, },
8351 };
8352 CTASSERT(nitems(mcx_ppcnt_rfc2863_tpl) == mcx_ppcnt_rfc2863_count);
8353 
8354 static const struct mcx_ppcnt mcx_ppcnt_rfc2819_tpl[] = {
8355 	{ "Drop Events",	KSTAT_KV_U_PACKETS, },
8356 	{ "Octets",		KSTAT_KV_U_BYTES, },
8357 	{ "Packets",		KSTAT_KV_U_PACKETS, },
8358 	{ "Broadcasts",		KSTAT_KV_U_PACKETS, },
8359 	{ "Multicasts",		KSTAT_KV_U_PACKETS, },
8360 	{ "CRC Align Errs",	KSTAT_KV_U_PACKETS, },
8361 	{ "Undersize",		KSTAT_KV_U_PACKETS, },
8362 	{ "Oversize",		KSTAT_KV_U_PACKETS, },
8363 	{ "Fragments",		KSTAT_KV_U_PACKETS, },
8364 	{ "Jabbers",		KSTAT_KV_U_PACKETS, },
8365 	{ "Collisions",		KSTAT_KV_U_NONE, },
8366 	{ "64B",		KSTAT_KV_U_PACKETS, },
8367 	{ "65-127B",		KSTAT_KV_U_PACKETS, },
8368 	{ "128-255B",		KSTAT_KV_U_PACKETS, },
8369 	{ "256-511B",		KSTAT_KV_U_PACKETS, },
8370 	{ "512-1023B",		KSTAT_KV_U_PACKETS, },
8371 	{ "1024-1518B",		KSTAT_KV_U_PACKETS, },
8372 	{ "1519-2047B",		KSTAT_KV_U_PACKETS, },
8373 	{ "2048-4095B",		KSTAT_KV_U_PACKETS, },
8374 	{ "4096-8191B",		KSTAT_KV_U_PACKETS, },
8375 	{ "8192-10239B",	KSTAT_KV_U_PACKETS, },
8376 };
8377 CTASSERT(nitems(mcx_ppcnt_rfc2819_tpl) == mcx_ppcnt_rfc2819_count);
8378 
8379 static const struct mcx_ppcnt mcx_ppcnt_rfc3635_tpl[] = {
8380 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
8381 	{ "FCS Errs",		KSTAT_KV_U_PACKETS, },
8382 	{ "Single Colls",	KSTAT_KV_U_PACKETS, },
8383 	{ "Multiple Colls",	KSTAT_KV_U_PACKETS, },
8384 	{ "SQE Test Errs",	KSTAT_KV_U_NONE, },
8385 	{ "Deferred Tx",	KSTAT_KV_U_PACKETS, },
8386 	{ "Late Colls",		KSTAT_KV_U_NONE, },
8387 	{ "Exess Colls",	KSTAT_KV_U_NONE, },
8388 	{ "Int MAC Tx Errs",	KSTAT_KV_U_PACKETS, },
8389 	{ "CSM Sense Errs",	KSTAT_KV_U_NONE, },
8390 	{ "Too Long",		KSTAT_KV_U_PACKETS, },
8391 	{ "Int MAC Rx Errs",	KSTAT_KV_U_PACKETS, },
8392 	{ "Symbol Errs",	KSTAT_KV_U_NONE, },
8393 	{ "Unknown Control",	KSTAT_KV_U_PACKETS, },
8394 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
8395 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
8396 };
8397 CTASSERT(nitems(mcx_ppcnt_rfc3635_tpl) == mcx_ppcnt_rfc3635_count);
8398 
8399 struct mcx_kstat_ppcnt {
8400 	const char		*ksp_name;
8401 	const struct mcx_ppcnt	*ksp_tpl;
8402 	unsigned int		 ksp_n;
8403 	uint8_t			 ksp_grp;
8404 };
8405 
8406 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = {
8407 	.ksp_name =		"ieee802.3",
8408 	.ksp_tpl =		mcx_ppcnt_ieee8023_tpl,
8409 	.ksp_n =		nitems(mcx_ppcnt_ieee8023_tpl),
8410 	.ksp_grp =		MCX_REG_PPCNT_GRP_IEEE8023,
8411 };
8412 
8413 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = {
8414 	.ksp_name =		"rfc2863",
8415 	.ksp_tpl =		mcx_ppcnt_rfc2863_tpl,
8416 	.ksp_n =		nitems(mcx_ppcnt_rfc2863_tpl),
8417 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2863,
8418 };
8419 
8420 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = {
8421 	.ksp_name =		"rfc2819",
8422 	.ksp_tpl =		mcx_ppcnt_rfc2819_tpl,
8423 	.ksp_n =		nitems(mcx_ppcnt_rfc2819_tpl),
8424 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2819,
8425 };
8426 
8427 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = {
8428 	.ksp_name =		"rfc3635",
8429 	.ksp_tpl =		mcx_ppcnt_rfc3635_tpl,
8430 	.ksp_n =		nitems(mcx_ppcnt_rfc3635_tpl),
8431 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC3635,
8432 };
8433 
8434 static int	mcx_kstat_ppcnt_read(struct kstat *);
8435 
8436 static void	mcx_kstat_attach_tmps(struct mcx_softc *sc);
8437 static void	mcx_kstat_attach_queues(struct mcx_softc *sc);
8438 
8439 static struct kstat *
8440 mcx_kstat_attach_ppcnt(struct mcx_softc *sc,
8441     const struct mcx_kstat_ppcnt *ksp)
8442 {
8443 	struct kstat *ks;
8444 	struct kstat_kv *kvs;
8445 	unsigned int i;
8446 
8447 	ks = kstat_create(DEVNAME(sc), 0, ksp->ksp_name, 0, KSTAT_T_KV, 0);
8448 	if (ks == NULL)
8449 		return (NULL);
8450 
8451 	kvs = mallocarray(ksp->ksp_n, sizeof(*kvs),
8452 	    M_DEVBUF, M_WAITOK);
8453 
8454 	for (i = 0; i < ksp->ksp_n; i++) {
8455 		const struct mcx_ppcnt *tpl = &ksp->ksp_tpl[i];
8456 
8457 		kstat_kv_unit_init(&kvs[i], tpl->name,
8458 		    KSTAT_KV_T_COUNTER64, tpl->unit);
8459 	}
8460 
8461 	ks->ks_softc = sc;
8462 	ks->ks_ptr = (void *)ksp;
8463 	ks->ks_data = kvs;
8464 	ks->ks_datalen = ksp->ksp_n * sizeof(*kvs);
8465 	ks->ks_read = mcx_kstat_ppcnt_read;
8466 	kstat_set_wlock(ks, &sc->sc_cmdq_kstat_lk);
8467 
8468 	kstat_install(ks);
8469 
8470 	return (ks);
8471 }
8472 
8473 static void
8474 mcx_kstat_attach(struct mcx_softc *sc)
8475 {
8476 	sc->sc_kstat_ieee8023 = mcx_kstat_attach_ppcnt(sc,
8477 	    &mcx_kstat_ppcnt_ieee8023);
8478 	sc->sc_kstat_rfc2863 = mcx_kstat_attach_ppcnt(sc,
8479 	    &mcx_kstat_ppcnt_rfc2863);
8480 	sc->sc_kstat_rfc2819 = mcx_kstat_attach_ppcnt(sc,
8481 	    &mcx_kstat_ppcnt_rfc2819);
8482 	sc->sc_kstat_rfc3635 = mcx_kstat_attach_ppcnt(sc,
8483 	    &mcx_kstat_ppcnt_rfc3635);
8484 
8485 	mcx_kstat_attach_tmps(sc);
8486 	mcx_kstat_attach_queues(sc);
8487 }
8488 
8489 static int
8490 mcx_kstat_ppcnt_read(struct kstat *ks)
8491 {
8492 	struct mcx_softc *sc = ks->ks_softc;
8493 	struct mcx_kstat_ppcnt *ksp = ks->ks_ptr;
8494 	struct mcx_reg_ppcnt ppcnt = {
8495 		.ppcnt_grp = ksp->ksp_grp,
8496 		.ppcnt_local_port = 1,
8497 	};
8498 	struct kstat_kv *kvs = ks->ks_data;
8499 	uint64_t *vs = (uint64_t *)&ppcnt.ppcnt_counter_set;
8500 	unsigned int i;
8501 	int rv;
8502 
8503 	rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT, MCX_REG_OP_READ,
8504 	    &ppcnt, sizeof(ppcnt), MCX_CMDQ_SLOT_KSTAT);
8505 	if (rv != 0)
8506 		return (EIO);
8507 
8508 	nanouptime(&ks->ks_updated);
8509 
8510 	for (i = 0; i < ksp->ksp_n; i++)
8511 		kstat_kv_u64(&kvs[i]) = bemtoh64(&vs[i]);
8512 
8513 	return (0);
8514 }
8515 
8516 struct mcx_kstat_mtmp {
8517 	struct kstat_kv		ktmp_name;
8518 	struct kstat_kv		ktmp_temperature;
8519 	struct kstat_kv		ktmp_threshold_lo;
8520 	struct kstat_kv		ktmp_threshold_hi;
8521 };
8522 
8523 static const struct mcx_kstat_mtmp mcx_kstat_mtmp_tpl = {
8524 	KSTAT_KV_INITIALIZER("name",		KSTAT_KV_T_ISTR),
8525 	KSTAT_KV_INITIALIZER("temperature",	KSTAT_KV_T_TEMP),
8526 	KSTAT_KV_INITIALIZER("lo threshold",	KSTAT_KV_T_TEMP),
8527 	KSTAT_KV_INITIALIZER("hi threshold",	KSTAT_KV_T_TEMP),
8528 };
8529 
8530 static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 };
8531 
8532 static int mcx_kstat_mtmp_read(struct kstat *);
8533 
8534 static void
8535 mcx_kstat_attach_tmps(struct mcx_softc *sc)
8536 {
8537 	struct kstat *ks;
8538 	struct mcx_reg_mcam mcam;
8539 	struct mcx_reg_mtcap mtcap;
8540 	struct mcx_kstat_mtmp *ktmp;
8541 	uint64_t map;
8542 	unsigned int i, n;
8543 
8544 	memset(&mtcap, 0, sizeof(mtcap));
8545 	memset(&mcam, 0, sizeof(mcam));
8546 
8547 	if (sc->sc_mcam_reg == 0) {
8548 		/* no management capabilities */
8549 		return;
8550 	}
8551 
8552 	if (mcx_access_hca_reg(sc, MCX_REG_MCAM, MCX_REG_OP_READ,
8553 	    &mcam, sizeof(mcam), MCX_CMDQ_SLOT_POLL) != 0) {
8554 		/* unable to check management capabilities? */
8555 		return;
8556 	}
8557 
8558 	if (MCX_BITFIELD_BIT(mcam.mcam_feature_cap_mask,
8559 	    MCX_MCAM_FEATURE_CAP_SENSOR_MAP) == 0) {
8560 		/* no sensor map */
8561 		return;
8562 	}
8563 
8564 	if (mcx_access_hca_reg(sc, MCX_REG_MTCAP, MCX_REG_OP_READ,
8565 	    &mtcap, sizeof(mtcap), MCX_CMDQ_SLOT_POLL) != 0) {
8566 		/* unable to find temperature sensors */
8567 		return;
8568 	}
8569 
8570 	sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count;
8571 	sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count,
8572 	    sizeof(*sc->sc_kstat_mtmp), M_DEVBUF, M_WAITOK);
8573 
8574 	n = 0;
8575 	map = bemtoh64(&mtcap.mtcap_sensor_map);
8576 	for (i = 0; i < sizeof(map) * NBBY; i++) {
8577 		if (!ISSET(map, (1ULL << i)))
8578 			continue;
8579 
8580 		ks = kstat_create(DEVNAME(sc), 0, "temperature", i,
8581 		    KSTAT_T_KV, 0);
8582 		if (ks == NULL) {
8583 			/* unable to attach temperature sensor %u, i */
8584 			continue;
8585 		}
8586 
8587 		ktmp = malloc(sizeof(*ktmp), M_DEVBUF, M_WAITOK|M_ZERO);
8588 		*ktmp = mcx_kstat_mtmp_tpl;
8589 
8590 		ks->ks_data = ktmp;
8591 		ks->ks_datalen = sizeof(*ktmp);
8592 		TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval);
8593 		ks->ks_read = mcx_kstat_mtmp_read;
8594 		kstat_set_wlock(ks, &sc->sc_cmdq_kstat_lk);
8595 
8596 		ks->ks_softc = sc;
8597 		kstat_install(ks);
8598 
8599 		sc->sc_kstat_mtmp[n++] = ks;
8600 		if (n >= sc->sc_kstat_mtmp_count)
8601 			break;
8602 	}
8603 }
8604 
8605 static uint64_t
8606 mcx_tmp_to_uK(uint16_t *t)
8607 {
8608 	int64_t mt = (int16_t)bemtoh16(t); /* 0.125 C units */
8609 	mt *= 1000000 / 8; /* convert to uC */
8610 	mt += 273150000; /* convert to uK */
8611 
8612 	return (mt);
8613 }
8614 
8615 static int
8616 mcx_kstat_mtmp_read(struct kstat *ks)
8617 {
8618 	struct mcx_softc *sc = ks->ks_softc;
8619 	struct mcx_kstat_mtmp *ktmp = ks->ks_data;
8620 	struct mcx_reg_mtmp mtmp;
8621 	int rv;
8622 	struct timeval updated;
8623 
8624 	TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated);
8625 
8626 	if (!ratecheck(&updated, &mcx_kstat_mtmp_rate))
8627 		return (0);
8628 
8629 	memset(&mtmp, 0, sizeof(mtmp));
8630 	htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit);
8631 
8632 	rv = mcx_access_hca_reg(sc, MCX_REG_MTMP, MCX_REG_OP_READ,
8633 	    &mtmp, sizeof(mtmp), MCX_CMDQ_SLOT_KSTAT);
8634 	if (rv != 0)
8635 		return (EIO);
8636 
8637 	memset(kstat_kv_istr(&ktmp->ktmp_name), 0,
8638 	    sizeof(kstat_kv_istr(&ktmp->ktmp_name)));
8639 	memcpy(kstat_kv_istr(&ktmp->ktmp_name),
8640 	    mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name));
8641 	kstat_kv_temp(&ktmp->ktmp_temperature) =
8642 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature);
8643 	kstat_kv_temp(&ktmp->ktmp_threshold_lo) =
8644 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo);
8645 	kstat_kv_temp(&ktmp->ktmp_threshold_hi) =
8646 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi);
8647 
8648 	TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated);
8649 
8650 	return (0);
8651 }
8652 
8653 struct mcx_queuestat {
8654 	char			 name[KSTAT_KV_NAMELEN];
8655 	enum kstat_kv_type	 type;
8656 };
8657 
8658 static const struct mcx_queuestat mcx_queue_kstat_tpl[] = {
8659 	{ "RQ SW prod",		KSTAT_KV_T_COUNTER64 },
8660 	{ "RQ HW prod",		KSTAT_KV_T_COUNTER64 },
8661 	{ "RQ HW cons",		KSTAT_KV_T_COUNTER64 },
8662 	{ "RQ HW state",	KSTAT_KV_T_ISTR },
8663 
8664 	{ "SQ SW prod",		KSTAT_KV_T_COUNTER64 },
8665 	{ "SQ SW cons",		KSTAT_KV_T_COUNTER64 },
8666 	{ "SQ HW prod",		KSTAT_KV_T_COUNTER64 },
8667 	{ "SQ HW cons",		KSTAT_KV_T_COUNTER64 },
8668 	{ "SQ HW state",	KSTAT_KV_T_ISTR },
8669 
8670 	{ "CQ SW cons",		KSTAT_KV_T_COUNTER64 },
8671 	{ "CQ HW prod",		KSTAT_KV_T_COUNTER64 },
8672 	{ "CQ HW cons",		KSTAT_KV_T_COUNTER64 },
8673 	{ "CQ HW notify",	KSTAT_KV_T_COUNTER64 },
8674 	{ "CQ HW solicit",	KSTAT_KV_T_COUNTER64 },
8675 	{ "CQ HW status",	KSTAT_KV_T_ISTR },
8676 	{ "CQ HW state",	KSTAT_KV_T_ISTR },
8677 
8678 	{ "EQ SW cons",		KSTAT_KV_T_COUNTER64 },
8679 	{ "EQ HW prod",		KSTAT_KV_T_COUNTER64 },
8680 	{ "EQ HW cons",		KSTAT_KV_T_COUNTER64 },
8681 	{ "EQ HW status",	KSTAT_KV_T_ISTR },
8682 	{ "EQ HW state",	KSTAT_KV_T_ISTR },
8683 };
8684 
8685 static int	mcx_kstat_queue_read(struct kstat *);
8686 
8687 static void
8688 mcx_kstat_attach_queues(struct mcx_softc *sc)
8689 {
8690 	struct kstat *ks;
8691 	struct kstat_kv *kvs;
8692 	int q, i;
8693 
8694 	for (q = 0; q < intrmap_count(sc->sc_intrmap); q++) {
8695 		ks = kstat_create(DEVNAME(sc), 0, "mcx-queues", q,
8696 		    KSTAT_T_KV, 0);
8697 		if (ks == NULL) {
8698 			/* unable to attach queue stats %u, q */
8699 			continue;
8700 		}
8701 
8702 		kvs = mallocarray(nitems(mcx_queue_kstat_tpl),
8703 		    sizeof(*kvs), M_DEVBUF, M_WAITOK);
8704 
8705 		for (i = 0; i < nitems(mcx_queue_kstat_tpl); i++) {
8706 			const struct mcx_queuestat *tpl =
8707 			    &mcx_queue_kstat_tpl[i];
8708 
8709 			kstat_kv_init(&kvs[i], tpl->name, tpl->type);
8710 		}
8711 
8712 		ks->ks_softc = &sc->sc_queues[q];
8713 		ks->ks_data = kvs;
8714 		ks->ks_datalen = nitems(mcx_queue_kstat_tpl) * sizeof(*kvs);
8715 		ks->ks_read = mcx_kstat_queue_read;
8716 
8717 		sc->sc_queues[q].q_kstat = ks;
8718 		kstat_install(ks);
8719 	}
8720 }
8721 
8722 static int
8723 mcx_kstat_queue_read(struct kstat *ks)
8724 {
8725 	struct mcx_queues *q = ks->ks_softc;
8726 	struct mcx_softc *sc = q->q_sc;
8727 	struct kstat_kv *kvs = ks->ks_data;
8728 	union {
8729 		struct mcx_rq_ctx rq;
8730 		struct mcx_sq_ctx sq;
8731 		struct mcx_cq_ctx cq;
8732 		struct mcx_eq_ctx eq;
8733 	} u;
8734 	const char *text;
8735 	int error = 0;
8736 
8737 	if (mcx_query_rq(sc, &q->q_rx, &u.rq) != 0) {
8738 		error = EIO;
8739 		goto out;
8740 	}
8741 
8742 	kstat_kv_u64(kvs++) = q->q_rx.rx_prod;
8743 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_sw_counter);
8744 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_hw_counter);
8745 	switch ((bemtoh32(&u.rq.rq_flags) & MCX_RQ_CTX_STATE_MASK) >>
8746 	    MCX_RQ_CTX_STATE_SHIFT) {
8747 	case MCX_RQ_CTX_STATE_RST:
8748 		text = "RST";
8749 		break;
8750 	case MCX_RQ_CTX_STATE_RDY:
8751 		text = "RDY";
8752 		break;
8753 	case MCX_RQ_CTX_STATE_ERR:
8754 		text = "ERR";
8755 		break;
8756 	default:
8757 		text = "unknown";
8758 		break;
8759 	}
8760 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8761 	kvs++;
8762 
8763 	if (mcx_query_sq(sc, &q->q_tx, &u.sq) != 0) {
8764 		error = EIO;
8765 		goto out;
8766 	}
8767 
8768 	kstat_kv_u64(kvs++) = q->q_tx.tx_prod;
8769 	kstat_kv_u64(kvs++) = q->q_tx.tx_cons;
8770 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_sw_counter);
8771 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_hw_counter);
8772 	switch ((bemtoh32(&u.sq.sq_flags) & MCX_SQ_CTX_STATE_MASK) >>
8773 	    MCX_SQ_CTX_STATE_SHIFT) {
8774 	case MCX_SQ_CTX_STATE_RST:
8775 		text = "RST";
8776 		break;
8777 	case MCX_SQ_CTX_STATE_RDY:
8778 		text = "RDY";
8779 		break;
8780 	case MCX_SQ_CTX_STATE_ERR:
8781 		text = "ERR";
8782 		break;
8783 	default:
8784 		text = "unknown";
8785 		break;
8786 	}
8787 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8788 	kvs++;
8789 
8790 	if (mcx_query_cq(sc, &q->q_cq, &u.cq) != 0) {
8791 		error = EIO;
8792 		goto out;
8793 	}
8794 
8795 	kstat_kv_u64(kvs++) = q->q_cq.cq_cons;
8796 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_producer_counter);
8797 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_consumer_counter);
8798 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_notified);
8799 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_solicit);
8800 
8801 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATUS_MASK) >>
8802 	    MCX_CQ_CTX_STATUS_SHIFT) {
8803 	case MCX_CQ_CTX_STATUS_OK:
8804 		text = "OK";
8805 		break;
8806 	case MCX_CQ_CTX_STATUS_OVERFLOW:
8807 		text = "overflow";
8808 		break;
8809 	case MCX_CQ_CTX_STATUS_WRITE_FAIL:
8810 		text = "write fail";
8811 		break;
8812 	default:
8813 		text = "unknown";
8814 		break;
8815 	}
8816 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8817 	kvs++;
8818 
8819 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATE_MASK) >>
8820 	    MCX_CQ_CTX_STATE_SHIFT) {
8821 	case MCX_CQ_CTX_STATE_SOLICITED:
8822 		text = "solicited";
8823 		break;
8824 	case MCX_CQ_CTX_STATE_ARMED:
8825 		text = "armed";
8826 		break;
8827 	case MCX_CQ_CTX_STATE_FIRED:
8828 		text = "fired";
8829 		break;
8830 	default:
8831 		text = "unknown";
8832 		break;
8833 	}
8834 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8835 	kvs++;
8836 
8837 	if (mcx_query_eq(sc, &q->q_eq, &u.eq) != 0) {
8838 		error = EIO;
8839 		goto out;
8840 	}
8841 
8842 	kstat_kv_u64(kvs++) = q->q_eq.eq_cons;
8843 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_producer_counter);
8844 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_consumer_counter);
8845 
8846 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATUS_MASK) >>
8847 	    MCX_EQ_CTX_STATUS_SHIFT) {
8848 	case MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE:
8849 		text = "write fail";
8850 		break;
8851 	case MCX_EQ_CTX_STATUS_OK:
8852 		text = "OK";
8853 		break;
8854 	default:
8855 		text = "unknown";
8856 		break;
8857 	}
8858 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8859 	kvs++;
8860 
8861 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATE_MASK) >>
8862 	    MCX_EQ_CTX_STATE_SHIFT) {
8863 	case MCX_EQ_CTX_STATE_ARMED:
8864 		text = "armed";
8865 		break;
8866 	case MCX_EQ_CTX_STATE_FIRED:
8867 		text = "fired";
8868 		break;
8869 	default:
8870 		text = "unknown";
8871 		break;
8872 	}
8873 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8874 	kvs++;
8875 
8876 	nanouptime(&ks->ks_updated);
8877 out:
8878 	return (error);
8879 }
8880 
8881 #endif /* NKSTAT > 0 */
8882 
8883 static unsigned int
8884 mcx_timecounter_read(struct timecounter *tc)
8885 {
8886 	struct mcx_softc *sc = tc->tc_priv;
8887 
8888 	return (mcx_rd(sc, MCX_INTERNAL_TIMER_L));
8889 }
8890 
8891 static void
8892 mcx_timecounter_attach(struct mcx_softc *sc)
8893 {
8894 	struct timecounter *tc = &sc->sc_timecounter;
8895 
8896 	tc->tc_get_timecount = mcx_timecounter_read;
8897 	tc->tc_counter_mask = ~0U;
8898 	tc->tc_frequency = sc->sc_khz * 1000;
8899 	tc->tc_name = sc->sc_dev.dv_xname;
8900 	tc->tc_quality = -100;
8901 	tc->tc_priv = sc;
8902 
8903 	tc_init(tc);
8904 }
8905