xref: /openbsd/sys/dev/pci/if_mcx.c (revision dd0896fb)
1 /*	$OpenBSD: if_mcx.c,v 1.118 2024/12/20 03:31:09 jmatthew Exp $ */
2 
3 /*
4  * Copyright (c) 2017 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bpfilter.h"
21 #include "vlan.h"
22 #include "kstat.h"
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/sockio.h>
27 #include <sys/mbuf.h>
28 #include <sys/socket.h>
29 #include <sys/device.h>
30 #include <sys/timeout.h>
31 #include <sys/task.h>
32 #include <sys/atomic.h>
33 #include <sys/timetc.h>
34 #include <sys/intrmap.h>
35 
36 #include <machine/bus.h>
37 #include <machine/intr.h>
38 
39 #include <net/if.h>
40 #include <net/if_media.h>
41 #include <net/toeplitz.h>
42 
43 #if NBPFILTER > 0
44 #include <net/bpf.h>
45 #endif
46 
47 #if NKSTAT > 0
48 #include <sys/kstat.h>
49 #endif
50 
51 #include <netinet/in.h>
52 #include <netinet/if_ether.h>
53 
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 #include <dev/pci/pcidevs.h>
57 
58 #define BUS_DMASYNC_PRERW	(BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
59 #define BUS_DMASYNC_POSTRW	(BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
60 
61 #define MCX_HCA_BAR	PCI_MAPREG_START /* BAR 0 */
62 
63 #define MCX_FW_VER			0x0000
64 #define  MCX_FW_VER_MAJOR(_v)			((_v) & 0xffff)
65 #define  MCX_FW_VER_MINOR(_v)			((_v) >> 16)
66 #define MCX_CMDIF_FW_SUBVER		0x0004
67 #define  MCX_FW_VER_SUBMINOR(_v)		((_v) & 0xffff)
68 #define  MCX_CMDIF(_v)				((_v) >> 16)
69 
70 #define MCX_ISSI			1 /* as per the PRM */
71 #define MCX_CMD_IF_SUPPORTED		5
72 
73 #define MCX_HARDMTU			9500
74 
75 enum mcx_cmdq_slot {
76 	MCX_CMDQ_SLOT_POLL = 0,
77 	MCX_CMDQ_SLOT_IOCTL,
78 	MCX_CMDQ_SLOT_KSTAT,
79 	MCX_CMDQ_SLOT_LINK,
80 
81 	MCX_CMDQ_NUM_SLOTS
82 };
83 
84 #define MCX_PAGE_SHIFT			12
85 #define MCX_PAGE_SIZE			(1 << MCX_PAGE_SHIFT)
86 
87 /* queue sizes */
88 #define MCX_LOG_EQ_SIZE			7
89 #define MCX_LOG_CQ_SIZE			12
90 #define MCX_LOG_RQ_SIZE			10
91 #define MCX_LOG_SQ_SIZE			11
92 
93 #define MCX_MAX_QUEUES			16
94 
95 /* completion event moderation - about 10khz, or 90% of the cq */
96 #define MCX_CQ_MOD_PERIOD		50
97 #define MCX_CQ_MOD_COUNTER		\
98 	(((1 << (MCX_LOG_CQ_SIZE - 1)) * 9) / 10)
99 
100 #define MCX_LOG_SQ_ENTRY_SIZE		6
101 #define MCX_SQ_ENTRY_MAX_SLOTS		4
102 #define MCX_SQ_SEGS_PER_SLOT		\
103 	(sizeof(struct mcx_sq_entry) / sizeof(struct mcx_sq_entry_seg))
104 #define MCX_SQ_MAX_SEGMENTS		\
105 	1 + ((MCX_SQ_ENTRY_MAX_SLOTS-1) * MCX_SQ_SEGS_PER_SLOT)
106 
107 #define MCX_LOG_FLOW_TABLE_SIZE		5
108 #define MCX_NUM_STATIC_FLOWS		4 /* promisc, allmulti, ucast, bcast */
109 #define MCX_NUM_MCAST_FLOWS 		\
110 	((1 << MCX_LOG_FLOW_TABLE_SIZE) - MCX_NUM_STATIC_FLOWS)
111 
112 #define MCX_SQ_INLINE_SIZE		18
113 CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN == MCX_SQ_INLINE_SIZE);
114 
115 /* doorbell offsets */
116 #define MCX_DOORBELL_AREA_SIZE		MCX_PAGE_SIZE
117 
118 #define MCX_CQ_DOORBELL_BASE		0
119 #define MCX_CQ_DOORBELL_STRIDE		64
120 
121 #define MCX_WQ_DOORBELL_BASE		MCX_PAGE_SIZE/2
122 #define MCX_WQ_DOORBELL_STRIDE		64
123 /* make sure the doorbells fit */
124 CTASSERT(MCX_MAX_QUEUES * MCX_CQ_DOORBELL_STRIDE < MCX_WQ_DOORBELL_BASE);
125 CTASSERT(MCX_MAX_QUEUES * MCX_WQ_DOORBELL_STRIDE <
126     MCX_DOORBELL_AREA_SIZE - MCX_WQ_DOORBELL_BASE);
127 
128 #define MCX_WQ_DOORBELL_MASK		0xffff
129 
130 /* uar registers */
131 #define MCX_UAR_CQ_DOORBELL		0x20
132 #define MCX_UAR_EQ_DOORBELL_ARM		0x40
133 #define MCX_UAR_EQ_DOORBELL		0x48
134 #define MCX_UAR_BF			0x800
135 
136 #define MCX_CMDQ_ADDR_HI		0x0010
137 #define MCX_CMDQ_ADDR_LO		0x0014
138 #define MCX_CMDQ_ADDR_NMASK		0xfff
139 #define MCX_CMDQ_LOG_SIZE(_v)		((_v) >> 4 & 0xf)
140 #define MCX_CMDQ_LOG_STRIDE(_v)		((_v) >> 0 & 0xf)
141 #define MCX_CMDQ_INTERFACE_MASK		(0x3 << 8)
142 #define MCX_CMDQ_INTERFACE_FULL_DRIVER	(0x0 << 8)
143 #define MCX_CMDQ_INTERFACE_DISABLED	(0x1 << 8)
144 
145 #define MCX_CMDQ_DOORBELL		0x0018
146 
147 #define MCX_STATE			0x01fc
148 #define MCX_STATE_MASK				(1U << 31)
149 #define MCX_STATE_INITIALIZING			(1U << 31)
150 #define MCX_STATE_READY				(0 << 31)
151 #define MCX_STATE_INTERFACE_MASK		(0x3 << 24)
152 #define MCX_STATE_INTERFACE_FULL_DRIVER		(0x0 << 24)
153 #define MCX_STATE_INTERFACE_DISABLED		(0x1 << 24)
154 
155 #define MCX_INTERNAL_TIMER		0x1000
156 #define MCX_INTERNAL_TIMER_H		0x1000
157 #define MCX_INTERNAL_TIMER_L		0x1004
158 
159 #define MCX_CLEAR_INT			0x100c
160 
161 #define MCX_REG_OP_WRITE		0
162 #define MCX_REG_OP_READ			1
163 
164 #define MCX_REG_PMLP			0x5002
165 #define MCX_REG_PMTU			0x5003
166 #define MCX_REG_PTYS			0x5004
167 #define MCX_REG_PAOS			0x5006
168 #define MCX_REG_PFCC			0x5007
169 #define MCX_REG_PPCNT			0x5008
170 #define MCX_REG_MTCAP			0x9009 /* mgmt temp capabilities */
171 #define MCX_REG_MTMP			0x900a /* mgmt temp */
172 #define MCX_REG_MCIA			0x9014
173 #define MCX_REG_MCAM			0x907f
174 
175 #define MCX_ETHER_CAP_SGMII		0
176 #define MCX_ETHER_CAP_1000_KX		1
177 #define MCX_ETHER_CAP_10G_CX4		2
178 #define MCX_ETHER_CAP_10G_KX4		3
179 #define MCX_ETHER_CAP_10G_KR		4
180 #define MCX_ETHER_CAP_40G_CR4		6
181 #define MCX_ETHER_CAP_40G_KR4		7
182 #define MCX_ETHER_CAP_10G_CR		12
183 #define MCX_ETHER_CAP_10G_SR		13
184 #define MCX_ETHER_CAP_10G_LR		14
185 #define MCX_ETHER_CAP_40G_SR4		15
186 #define MCX_ETHER_CAP_40G_LR4		16
187 #define MCX_ETHER_CAP_50G_SR2		18
188 #define MCX_ETHER_CAP_100G_CR4		20
189 #define MCX_ETHER_CAP_100G_SR4		21
190 #define MCX_ETHER_CAP_100G_KR4		22
191 #define MCX_ETHER_CAP_100G_LR4		23
192 #define MCX_ETHER_CAP_25G_CR		27
193 #define MCX_ETHER_CAP_25G_KR		28
194 #define MCX_ETHER_CAP_25G_SR		29
195 #define MCX_ETHER_CAP_50G_CR2		30
196 #define MCX_ETHER_CAP_50G_KR2		31
197 
198 #define MCX_ETHER_EXT_CAP_SGMII_100	0
199 #define MCX_ETHER_EXT_CAP_1000_X	1
200 #define MCX_ETHER_EXT_CAP_5G_R		3
201 #define MCX_ETHER_EXT_CAP_XAUI		4
202 #define MCX_ETHER_EXT_CAP_XLAUI		5
203 #define MCX_ETHER_EXT_CAP_25G_AUI1	6
204 #define MCX_ETHER_EXT_CAP_50G_AUI2	7
205 #define MCX_ETHER_EXT_CAP_50G_AUI1	8
206 #define MCX_ETHER_EXT_CAP_CAUI4		9
207 #define MCX_ETHER_EXT_CAP_100G_AUI2	10
208 #define MCX_ETHER_EXT_CAP_200G_AUI4	12
209 #define MCX_ETHER_EXT_CAP_400G_AUI8	15
210 
211 #define MCX_MAX_CQE			32
212 
213 #define MCX_CMD_QUERY_HCA_CAP		0x100
214 #define MCX_CMD_QUERY_ADAPTER		0x101
215 #define MCX_CMD_INIT_HCA		0x102
216 #define MCX_CMD_TEARDOWN_HCA		0x103
217 #define MCX_CMD_ENABLE_HCA		0x104
218 #define MCX_CMD_DISABLE_HCA		0x105
219 #define MCX_CMD_QUERY_PAGES		0x107
220 #define MCX_CMD_MANAGE_PAGES		0x108
221 #define MCX_CMD_SET_HCA_CAP		0x109
222 #define MCX_CMD_QUERY_ISSI		0x10a
223 #define MCX_CMD_SET_ISSI		0x10b
224 #define MCX_CMD_SET_DRIVER_VERSION	0x10d
225 #define MCX_CMD_QUERY_SPECIAL_CONTEXTS	0x203
226 #define MCX_CMD_CREATE_EQ		0x301
227 #define MCX_CMD_DESTROY_EQ		0x302
228 #define MCX_CMD_QUERY_EQ		0x303
229 #define MCX_CMD_CREATE_CQ		0x400
230 #define MCX_CMD_DESTROY_CQ		0x401
231 #define MCX_CMD_QUERY_CQ		0x402
232 #define MCX_CMD_QUERY_NIC_VPORT_CONTEXT	0x754
233 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT \
234 					0x755
235 #define MCX_CMD_QUERY_VPORT_COUNTERS	0x770
236 #define MCX_CMD_ALLOC_PD		0x800
237 #define MCX_CMD_ALLOC_UAR		0x802
238 #define MCX_CMD_ACCESS_REG		0x805
239 #define MCX_CMD_ALLOC_TRANSPORT_DOMAIN	0x816
240 #define MCX_CMD_CREATE_TIR		0x900
241 #define MCX_CMD_DESTROY_TIR		0x902
242 #define MCX_CMD_CREATE_SQ		0x904
243 #define MCX_CMD_MODIFY_SQ		0x905
244 #define MCX_CMD_DESTROY_SQ		0x906
245 #define MCX_CMD_QUERY_SQ		0x907
246 #define MCX_CMD_CREATE_RQ		0x908
247 #define MCX_CMD_MODIFY_RQ		0x909
248 #define MCX_CMD_DESTROY_RQ		0x90a
249 #define MCX_CMD_QUERY_RQ		0x90b
250 #define MCX_CMD_CREATE_TIS		0x912
251 #define MCX_CMD_DESTROY_TIS		0x914
252 #define MCX_CMD_CREATE_RQT		0x916
253 #define MCX_CMD_DESTROY_RQT		0x918
254 #define MCX_CMD_SET_FLOW_TABLE_ROOT	0x92f
255 #define MCX_CMD_CREATE_FLOW_TABLE	0x930
256 #define MCX_CMD_DESTROY_FLOW_TABLE	0x931
257 #define MCX_CMD_QUERY_FLOW_TABLE	0x932
258 #define MCX_CMD_CREATE_FLOW_GROUP	0x933
259 #define MCX_CMD_DESTROY_FLOW_GROUP	0x934
260 #define MCX_CMD_QUERY_FLOW_GROUP	0x935
261 #define MCX_CMD_SET_FLOW_TABLE_ENTRY	0x936
262 #define MCX_CMD_QUERY_FLOW_TABLE_ENTRY	0x937
263 #define MCX_CMD_DELETE_FLOW_TABLE_ENTRY	0x938
264 #define MCX_CMD_ALLOC_FLOW_COUNTER	0x939
265 #define MCX_CMD_QUERY_FLOW_COUNTER	0x93b
266 
267 #define MCX_QUEUE_STATE_RST		0
268 #define MCX_QUEUE_STATE_RDY		1
269 #define MCX_QUEUE_STATE_ERR		3
270 
271 #define MCX_FLOW_TABLE_TYPE_RX		0
272 #define MCX_FLOW_TABLE_TYPE_TX		1
273 
274 #define MCX_CMDQ_INLINE_DATASIZE 16
275 
276 struct mcx_cmdq_entry {
277 	uint8_t			cq_type;
278 #define MCX_CMDQ_TYPE_PCIE		0x7
279 	uint8_t			cq_reserved0[3];
280 
281 	uint32_t		cq_input_length;
282 	uint64_t		cq_input_ptr;
283 	uint8_t			cq_input_data[MCX_CMDQ_INLINE_DATASIZE];
284 
285 	uint8_t			cq_output_data[MCX_CMDQ_INLINE_DATASIZE];
286 	uint64_t		cq_output_ptr;
287 	uint32_t		cq_output_length;
288 
289 	uint8_t			cq_token;
290 	uint8_t			cq_signature;
291 	uint8_t			cq_reserved1[1];
292 	uint8_t			cq_status;
293 #define MCX_CQ_STATUS_SHIFT		1
294 #define MCX_CQ_STATUS_MASK		(0x7f << MCX_CQ_STATUS_SHIFT)
295 #define MCX_CQ_STATUS_OK		(0x00 << MCX_CQ_STATUS_SHIFT)
296 #define MCX_CQ_STATUS_INT_ERR		(0x01 << MCX_CQ_STATUS_SHIFT)
297 #define MCX_CQ_STATUS_BAD_OPCODE	(0x02 << MCX_CQ_STATUS_SHIFT)
298 #define MCX_CQ_STATUS_BAD_PARAM		(0x03 << MCX_CQ_STATUS_SHIFT)
299 #define MCX_CQ_STATUS_BAD_SYS_STATE	(0x04 << MCX_CQ_STATUS_SHIFT)
300 #define MCX_CQ_STATUS_BAD_RESOURCE	(0x05 << MCX_CQ_STATUS_SHIFT)
301 #define MCX_CQ_STATUS_RESOURCE_BUSY	(0x06 << MCX_CQ_STATUS_SHIFT)
302 #define MCX_CQ_STATUS_EXCEED_LIM	(0x08 << MCX_CQ_STATUS_SHIFT)
303 #define MCX_CQ_STATUS_BAD_RES_STATE	(0x09 << MCX_CQ_STATUS_SHIFT)
304 #define MCX_CQ_STATUS_BAD_INDEX		(0x0a << MCX_CQ_STATUS_SHIFT)
305 #define MCX_CQ_STATUS_NO_RESOURCES	(0x0f << MCX_CQ_STATUS_SHIFT)
306 #define MCX_CQ_STATUS_BAD_INPUT_LEN	(0x50 << MCX_CQ_STATUS_SHIFT)
307 #define MCX_CQ_STATUS_BAD_OUTPUT_LEN	(0x51 << MCX_CQ_STATUS_SHIFT)
308 #define MCX_CQ_STATUS_BAD_RESOURCE_STATE \
309 					(0x10 << MCX_CQ_STATUS_SHIFT)
310 #define MCX_CQ_STATUS_BAD_SIZE		(0x40 << MCX_CQ_STATUS_SHIFT)
311 #define MCX_CQ_STATUS_OWN_MASK		0x1
312 #define MCX_CQ_STATUS_OWN_SW		0x0
313 #define MCX_CQ_STATUS_OWN_HW		0x1
314 } __packed __aligned(8);
315 
316 #define MCX_CMDQ_MAILBOX_DATASIZE	512
317 
318 struct mcx_cmdq_mailbox {
319 	uint8_t			mb_data[MCX_CMDQ_MAILBOX_DATASIZE];
320 	uint8_t			mb_reserved0[48];
321 	uint64_t		mb_next_ptr;
322 	uint32_t		mb_block_number;
323 	uint8_t			mb_reserved1[1];
324 	uint8_t			mb_token;
325 	uint8_t			mb_ctrl_signature;
326 	uint8_t			mb_signature;
327 } __packed __aligned(8);
328 
329 #define MCX_CMDQ_MAILBOX_ALIGN	(1 << 10)
330 #define MCX_CMDQ_MAILBOX_SIZE	roundup(sizeof(struct mcx_cmdq_mailbox), \
331 				    MCX_CMDQ_MAILBOX_ALIGN)
332 /*
333  * command mailbox structures
334  */
335 
336 struct mcx_cmd_enable_hca_in {
337 	uint16_t		cmd_opcode;
338 	uint8_t			cmd_reserved0[4];
339 	uint16_t		cmd_op_mod;
340 	uint8_t			cmd_reserved1[2];
341 	uint16_t		cmd_function_id;
342 	uint8_t			cmd_reserved2[4];
343 } __packed __aligned(4);
344 
345 struct mcx_cmd_enable_hca_out {
346 	uint8_t			cmd_status;
347 	uint8_t			cmd_reserved0[3];
348 	uint32_t		cmd_syndrome;
349 	uint8_t			cmd_reserved1[4];
350 } __packed __aligned(4);
351 
352 struct mcx_cmd_init_hca_in {
353 	uint16_t		cmd_opcode;
354 	uint8_t			cmd_reserved0[4];
355 	uint16_t		cmd_op_mod;
356 	uint8_t			cmd_reserved1[8];
357 } __packed __aligned(4);
358 
359 struct mcx_cmd_init_hca_out {
360 	uint8_t			cmd_status;
361 	uint8_t			cmd_reserved0[3];
362 	uint32_t		cmd_syndrome;
363 	uint8_t			cmd_reserved1[8];
364 } __packed __aligned(4);
365 
366 struct mcx_cmd_teardown_hca_in {
367 	uint16_t		cmd_opcode;
368 	uint8_t			cmd_reserved0[4];
369 	uint16_t		cmd_op_mod;
370 	uint8_t			cmd_reserved1[2];
371 #define MCX_CMD_TEARDOWN_HCA_GRACEFUL	0x0
372 #define MCX_CMD_TEARDOWN_HCA_PANIC	0x1
373 	uint16_t		cmd_profile;
374 	uint8_t			cmd_reserved2[4];
375 } __packed __aligned(4);
376 
377 struct mcx_cmd_teardown_hca_out {
378 	uint8_t			cmd_status;
379 	uint8_t			cmd_reserved0[3];
380 	uint32_t		cmd_syndrome;
381 	uint8_t			cmd_reserved1[8];
382 } __packed __aligned(4);
383 
384 struct mcx_cmd_access_reg_in {
385 	uint16_t		cmd_opcode;
386 	uint8_t			cmd_reserved0[4];
387 	uint16_t		cmd_op_mod;
388 	uint8_t			cmd_reserved1[2];
389 	uint16_t		cmd_register_id;
390 	uint32_t		cmd_argument;
391 } __packed __aligned(4);
392 
393 struct mcx_cmd_access_reg_out {
394 	uint8_t			cmd_status;
395 	uint8_t			cmd_reserved0[3];
396 	uint32_t		cmd_syndrome;
397 	uint8_t			cmd_reserved1[8];
398 } __packed __aligned(4);
399 
400 struct mcx_reg_pmtu {
401 	uint8_t			rp_reserved1;
402 	uint8_t			rp_local_port;
403 	uint8_t			rp_reserved2[2];
404 	uint16_t		rp_max_mtu;
405 	uint8_t			rp_reserved3[2];
406 	uint16_t		rp_admin_mtu;
407 	uint8_t			rp_reserved4[2];
408 	uint16_t		rp_oper_mtu;
409 	uint8_t			rp_reserved5[2];
410 } __packed __aligned(4);
411 
412 struct mcx_reg_ptys {
413 	uint8_t			rp_reserved1;
414 	uint8_t			rp_local_port;
415 	uint8_t			rp_reserved2;
416 	uint8_t			rp_proto_mask;
417 #define MCX_REG_PTYS_PROTO_MASK_ETH		(1 << 2)
418 	uint8_t			rp_reserved3[4];
419 	uint32_t		rp_ext_eth_proto_cap;
420 	uint32_t		rp_eth_proto_cap;
421 	uint8_t			rp_reserved4[4];
422 	uint32_t		rp_ext_eth_proto_admin;
423 	uint32_t		rp_eth_proto_admin;
424 	uint8_t			rp_reserved5[4];
425 	uint32_t		rp_ext_eth_proto_oper;
426 	uint32_t		rp_eth_proto_oper;
427 	uint8_t			rp_reserved6[24];
428 } __packed __aligned(4);
429 
430 struct mcx_reg_paos {
431 	uint8_t			rp_reserved1;
432 	uint8_t			rp_local_port;
433 	uint8_t			rp_admin_status;
434 #define MCX_REG_PAOS_ADMIN_STATUS_UP		1
435 #define MCX_REG_PAOS_ADMIN_STATUS_DOWN		2
436 #define MCX_REG_PAOS_ADMIN_STATUS_UP_ONCE	3
437 #define MCX_REG_PAOS_ADMIN_STATUS_DISABLED	4
438 	uint8_t			rp_oper_status;
439 #define MCX_REG_PAOS_OPER_STATUS_UP		1
440 #define MCX_REG_PAOS_OPER_STATUS_DOWN		2
441 #define MCX_REG_PAOS_OPER_STATUS_FAILED		4
442 	uint8_t			rp_admin_state_update;
443 #define MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN	(1 << 7)
444 	uint8_t			rp_reserved2[11];
445 } __packed __aligned(4);
446 
447 struct mcx_reg_pfcc {
448 	uint8_t			rp_reserved1;
449 	uint8_t			rp_local_port;
450 	uint8_t			rp_reserved2[3];
451 	uint8_t			rp_prio_mask_tx;
452 	uint8_t			rp_reserved3;
453 	uint8_t			rp_prio_mask_rx;
454 	uint8_t			rp_pptx_aptx;
455 	uint8_t			rp_pfctx;
456 	uint8_t			rp_fctx_dis;
457 	uint8_t			rp_reserved4;
458 	uint8_t			rp_pprx_aprx;
459 	uint8_t			rp_pfcrx;
460 	uint8_t			rp_reserved5[2];
461 	uint16_t		rp_dev_stall_min;
462 	uint16_t		rp_dev_stall_crit;
463 	uint8_t			rp_reserved6[12];
464 } __packed __aligned(4);
465 
466 #define MCX_PMLP_MODULE_NUM_MASK	0xff
467 struct mcx_reg_pmlp {
468 	uint8_t			rp_rxtx;
469 	uint8_t			rp_local_port;
470 	uint8_t			rp_reserved0;
471 	uint8_t			rp_width;
472 	uint32_t		rp_lane0_mapping;
473 	uint32_t		rp_lane1_mapping;
474 	uint32_t		rp_lane2_mapping;
475 	uint32_t		rp_lane3_mapping;
476 	uint8_t			rp_reserved1[44];
477 } __packed __aligned(4);
478 
479 struct mcx_reg_ppcnt {
480 	uint8_t			ppcnt_swid;
481 	uint8_t			ppcnt_local_port;
482 	uint8_t			ppcnt_pnat;
483 	uint8_t			ppcnt_grp;
484 #define MCX_REG_PPCNT_GRP_IEEE8023		0x00
485 #define MCX_REG_PPCNT_GRP_RFC2863		0x01
486 #define MCX_REG_PPCNT_GRP_RFC2819		0x02
487 #define MCX_REG_PPCNT_GRP_RFC3635		0x03
488 #define MCX_REG_PPCNT_GRP_PER_PRIO		0x10
489 #define MCX_REG_PPCNT_GRP_PER_TC		0x11
490 #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER		0x11
491 
492 	uint8_t			ppcnt_clr;
493 	uint8_t			ppcnt_reserved1[2];
494 	uint8_t			ppcnt_prio_tc;
495 #define MCX_REG_PPCNT_CLR			(1 << 7)
496 
497 	uint8_t			ppcnt_counter_set[248];
498 } __packed __aligned(8);
499 CTASSERT(sizeof(struct mcx_reg_ppcnt) == 256);
500 CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) %
501     sizeof(uint64_t)) == 0);
502 
503 enum mcx_ppcnt_ieee8023 {
504 	frames_transmitted_ok,
505 	frames_received_ok,
506 	frame_check_sequence_errors,
507 	alignment_errors,
508 	octets_transmitted_ok,
509 	octets_received_ok,
510 	multicast_frames_xmitted_ok,
511 	broadcast_frames_xmitted_ok,
512 	multicast_frames_received_ok,
513 	broadcast_frames_received_ok,
514 	in_range_length_errors,
515 	out_of_range_length_field,
516 	frame_too_long_errors,
517 	symbol_error_during_carrier,
518 	mac_control_frames_transmitted,
519 	mac_control_frames_received,
520 	unsupported_opcodes_received,
521 	pause_mac_ctrl_frames_received,
522 	pause_mac_ctrl_frames_transmitted,
523 
524 	mcx_ppcnt_ieee8023_count
525 };
526 CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98);
527 
528 enum mcx_ppcnt_rfc2863 {
529 	in_octets,
530 	in_ucast_pkts,
531 	in_discards,
532 	in_errors,
533 	in_unknown_protos,
534 	out_octets,
535 	out_ucast_pkts,
536 	out_discards,
537 	out_errors,
538 	in_multicast_pkts,
539 	in_broadcast_pkts,
540 	out_multicast_pkts,
541 	out_broadcast_pkts,
542 
543 	mcx_ppcnt_rfc2863_count
544 };
545 CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68);
546 
547 enum mcx_ppcnt_rfc2819 {
548 	drop_events,
549 	octets,
550 	pkts,
551 	broadcast_pkts,
552 	multicast_pkts,
553 	crc_align_errors,
554 	undersize_pkts,
555 	oversize_pkts,
556 	fragments,
557 	jabbers,
558 	collisions,
559 	pkts64octets,
560 	pkts65to127octets,
561 	pkts128to255octets,
562 	pkts256to511octets,
563 	pkts512to1023octets,
564 	pkts1024to1518octets,
565 	pkts1519to2047octets,
566 	pkts2048to4095octets,
567 	pkts4096to8191octets,
568 	pkts8192to10239octets,
569 
570 	mcx_ppcnt_rfc2819_count
571 };
572 CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8);
573 
574 enum mcx_ppcnt_rfc3635 {
575 	dot3stats_alignment_errors,
576 	dot3stats_fcs_errors,
577 	dot3stats_single_collision_frames,
578 	dot3stats_multiple_collision_frames,
579 	dot3stats_sqe_test_errors,
580 	dot3stats_deferred_transmissions,
581 	dot3stats_late_collisions,
582 	dot3stats_excessive_collisions,
583 	dot3stats_internal_mac_transmit_errors,
584 	dot3stats_carrier_sense_errors,
585 	dot3stats_frame_too_longs,
586 	dot3stats_internal_mac_receive_errors,
587 	dot3stats_symbol_errors,
588 	dot3control_in_unknown_opcodes,
589 	dot3in_pause_frames,
590 	dot3out_pause_frames,
591 
592 	mcx_ppcnt_rfc3635_count
593 };
594 CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80);
595 
596 struct mcx_reg_mcam {
597 	uint8_t			_reserved1[1];
598 	uint8_t			mcam_feature_group;
599 	uint8_t			_reserved2[1];
600 	uint8_t			mcam_access_reg_group;
601 	uint8_t			_reserved3[4];
602 	uint8_t			mcam_access_reg_cap_mask[16];
603 	uint8_t			_reserved4[16];
604 	uint8_t			mcam_feature_cap_mask[16];
605 	uint8_t			_reserved5[16];
606 } __packed __aligned(4);
607 
608 #define MCX_BITFIELD_BIT(bf, b)	(bf[(sizeof bf - 1) - (b / 8)] & (b % 8))
609 
610 #define MCX_MCAM_FEATURE_CAP_SENSOR_MAP	6
611 
612 struct mcx_reg_mtcap {
613 	uint8_t			_reserved1[3];
614 	uint8_t			mtcap_sensor_count;
615 	uint8_t			_reserved2[4];
616 
617 	uint64_t		mtcap_sensor_map;
618 };
619 
620 struct mcx_reg_mtmp {
621 	uint8_t			_reserved1[2];
622 	uint16_t		mtmp_sensor_index;
623 
624 	uint8_t			_reserved2[2];
625 	uint16_t		mtmp_temperature;
626 
627 	uint16_t		mtmp_mte_mtr;
628 #define MCX_REG_MTMP_MTE		(1 << 15)
629 #define MCX_REG_MTMP_MTR		(1 << 14)
630 	uint16_t		mtmp_max_temperature;
631 
632 	uint16_t		mtmp_tee;
633 #define MCX_REG_MTMP_TEE_NOPE		(0 << 14)
634 #define MCX_REG_MTMP_TEE_GENERATE	(1 << 14)
635 #define MCX_REG_MTMP_TEE_GENERATE_ONE	(2 << 14)
636 	uint16_t		mtmp_temperature_threshold_hi;
637 
638 	uint8_t			_reserved3[2];
639 	uint16_t		mtmp_temperature_threshold_lo;
640 
641 	uint8_t			_reserved4[4];
642 
643 	uint8_t			mtmp_sensor_name[8];
644 };
645 CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20);
646 CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18);
647 
648 #define MCX_MCIA_EEPROM_BYTES	32
649 struct mcx_reg_mcia {
650 	uint8_t			rm_l;
651 	uint8_t			rm_module;
652 	uint8_t			rm_reserved0;
653 	uint8_t			rm_status;
654 	uint8_t			rm_i2c_addr;
655 	uint8_t			rm_page_num;
656 	uint16_t		rm_dev_addr;
657 	uint16_t		rm_reserved1;
658 	uint16_t		rm_size;
659 	uint32_t		rm_reserved2;
660 	uint8_t			rm_data[48];
661 } __packed __aligned(4);
662 
663 struct mcx_cmd_query_issi_in {
664 	uint16_t		cmd_opcode;
665 	uint8_t			cmd_reserved0[4];
666 	uint16_t		cmd_op_mod;
667 	uint8_t			cmd_reserved1[8];
668 } __packed __aligned(4);
669 
670 struct mcx_cmd_query_issi_il_out {
671 	uint8_t			cmd_status;
672 	uint8_t			cmd_reserved0[3];
673 	uint32_t		cmd_syndrome;
674 	uint8_t			cmd_reserved1[2];
675 	uint16_t		cmd_current_issi;
676 	uint8_t			cmd_reserved2[4];
677 } __packed __aligned(4);
678 
679 CTASSERT(sizeof(struct mcx_cmd_query_issi_il_out) == MCX_CMDQ_INLINE_DATASIZE);
680 
681 struct mcx_cmd_query_issi_mb_out {
682 	uint8_t			cmd_reserved2[16];
683 	uint8_t			cmd_supported_issi[80]; /* very big endian */
684 } __packed __aligned(4);
685 
686 CTASSERT(sizeof(struct mcx_cmd_query_issi_mb_out) <= MCX_CMDQ_MAILBOX_DATASIZE);
687 
688 struct mcx_cmd_set_issi_in {
689 	uint16_t		cmd_opcode;
690 	uint8_t			cmd_reserved0[4];
691 	uint16_t		cmd_op_mod;
692 	uint8_t			cmd_reserved1[2];
693 	uint16_t		cmd_current_issi;
694 	uint8_t			cmd_reserved2[4];
695 } __packed __aligned(4);
696 
697 CTASSERT(sizeof(struct mcx_cmd_set_issi_in) <= MCX_CMDQ_INLINE_DATASIZE);
698 
699 struct mcx_cmd_set_issi_out {
700 	uint8_t			cmd_status;
701 	uint8_t			cmd_reserved0[3];
702 	uint32_t		cmd_syndrome;
703 	uint8_t			cmd_reserved1[8];
704 } __packed __aligned(4);
705 
706 CTASSERT(sizeof(struct mcx_cmd_set_issi_out) <= MCX_CMDQ_INLINE_DATASIZE);
707 
708 struct mcx_cmd_query_pages_in {
709 	uint16_t		cmd_opcode;
710 	uint8_t			cmd_reserved0[4];
711 	uint16_t		cmd_op_mod;
712 #define MCX_CMD_QUERY_PAGES_BOOT	0x01
713 #define MCX_CMD_QUERY_PAGES_INIT	0x02
714 #define MCX_CMD_QUERY_PAGES_REGULAR	0x03
715 	uint8_t			cmd_reserved1[8];
716 } __packed __aligned(4);
717 
718 struct mcx_cmd_query_pages_out {
719 	uint8_t			cmd_status;
720 	uint8_t			cmd_reserved0[3];
721 	uint32_t		cmd_syndrome;
722 	uint8_t			cmd_reserved1[2];
723 	uint16_t		cmd_func_id;
724 	int32_t			cmd_num_pages;
725 } __packed __aligned(4);
726 
727 struct mcx_cmd_manage_pages_in {
728 	uint16_t		cmd_opcode;
729 	uint8_t			cmd_reserved0[4];
730 	uint16_t		cmd_op_mod;
731 #define MCX_CMD_MANAGE_PAGES_ALLOC_FAIL \
732 					0x00
733 #define MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS \
734 					0x01
735 #define MCX_CMD_MANAGE_PAGES_HCA_RETURN_PAGES \
736 					0x02
737 	uint8_t			cmd_reserved1[2];
738 	uint16_t		cmd_func_id;
739 	uint32_t		cmd_input_num_entries;
740 } __packed __aligned(4);
741 
742 CTASSERT(sizeof(struct mcx_cmd_manage_pages_in) == MCX_CMDQ_INLINE_DATASIZE);
743 
744 struct mcx_cmd_manage_pages_out {
745 	uint8_t			cmd_status;
746 	uint8_t			cmd_reserved0[3];
747 	uint32_t		cmd_syndrome;
748 	uint32_t		cmd_output_num_entries;
749 	uint8_t			cmd_reserved1[4];
750 } __packed __aligned(4);
751 
752 CTASSERT(sizeof(struct mcx_cmd_manage_pages_out) == MCX_CMDQ_INLINE_DATASIZE);
753 
754 struct mcx_cmd_query_hca_cap_in {
755 	uint16_t		cmd_opcode;
756 	uint8_t			cmd_reserved0[4];
757 	uint16_t		cmd_op_mod;
758 #define MCX_CMD_QUERY_HCA_CAP_MAX	(0x0 << 0)
759 #define MCX_CMD_QUERY_HCA_CAP_CURRENT	(0x1 << 0)
760 #define MCX_CMD_QUERY_HCA_CAP_DEVICE	(0x0 << 1)
761 #define MCX_CMD_QUERY_HCA_CAP_OFFLOAD	(0x1 << 1)
762 #define MCX_CMD_QUERY_HCA_CAP_FLOW	(0x7 << 1)
763 	uint8_t			cmd_reserved1[8];
764 } __packed __aligned(4);
765 
766 struct mcx_cmd_query_hca_cap_out {
767 	uint8_t			cmd_status;
768 	uint8_t			cmd_reserved0[3];
769 	uint32_t		cmd_syndrome;
770 	uint8_t			cmd_reserved1[8];
771 } __packed __aligned(4);
772 
773 #define MCX_HCA_CAP_LEN			0x1000
774 #define MCX_HCA_CAP_NMAILBOXES		\
775 	(MCX_HCA_CAP_LEN / MCX_CMDQ_MAILBOX_DATASIZE)
776 
777 #if __GNUC_PREREQ__(4, 3)
778 #define __counter__		__COUNTER__
779 #else
780 #define __counter__		__LINE__
781 #endif
782 
783 #define __token(_tok, _num)	_tok##_num
784 #define _token(_tok, _num)	__token(_tok, _num)
785 #define __reserved__		_token(__reserved, __counter__)
786 
787 struct mcx_cap_device {
788 	uint8_t			reserved0[16];
789 
790 	uint8_t			log_max_srq_sz;
791 	uint8_t			log_max_qp_sz;
792 	uint8_t			__reserved__[1];
793 	uint8_t			log_max_qp; /* 5 bits */
794 #define MCX_CAP_DEVICE_LOG_MAX_QP	0x1f
795 
796 	uint8_t			__reserved__[1];
797 	uint8_t			log_max_srq; /* 5 bits */
798 #define MCX_CAP_DEVICE_LOG_MAX_SRQ	0x1f
799 	uint8_t			__reserved__[2];
800 
801 	uint8_t			__reserved__[1];
802 	uint8_t			log_max_cq_sz;
803 	uint8_t			__reserved__[1];
804 	uint8_t			log_max_cq; /* 5 bits */
805 #define MCX_CAP_DEVICE_LOG_MAX_CQ	0x1f
806 
807 	uint8_t			log_max_eq_sz;
808 	uint8_t			log_max_mkey; /* 6 bits */
809 #define MCX_CAP_DEVICE_LOG_MAX_MKEY	0x3f
810 	uint8_t			__reserved__[1];
811 	uint8_t			log_max_eq; /* 4 bits */
812 #define MCX_CAP_DEVICE_LOG_MAX_EQ	0x0f
813 
814 	uint8_t			max_indirection;
815 	uint8_t			log_max_mrw_sz; /* 7 bits */
816 #define MCX_CAP_DEVICE_LOG_MAX_MRW_SZ	0x7f
817 	uint8_t			teardown_log_max_msf_list_size;
818 #define MCX_CAP_DEVICE_FORCE_TEARDOWN	0x80
819 #define MCX_CAP_DEVICE_LOG_MAX_MSF_LIST_SIZE \
820 					0x3f
821 	uint8_t			log_max_klm_list_size; /* 6 bits */
822 #define MCX_CAP_DEVICE_LOG_MAX_KLM_LIST_SIZE \
823 					0x3f
824 
825 	uint8_t			__reserved__[1];
826 	uint8_t			log_max_ra_req_dc; /* 6 bits */
827 #define MCX_CAP_DEVICE_LOG_MAX_REQ_DC	0x3f
828 	uint8_t			__reserved__[1];
829 	uint8_t			log_max_ra_res_dc; /* 6 bits */
830 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_DC \
831 					0x3f
832 
833 	uint8_t			__reserved__[1];
834 	uint8_t			log_max_ra_req_qp; /* 6 bits */
835 #define MCX_CAP_DEVICE_LOG_MAX_RA_REQ_QP \
836 					0x3f
837 	uint8_t			__reserved__[1];
838 	uint8_t			log_max_ra_res_qp; /* 6 bits */
839 #define MCX_CAP_DEVICE_LOG_MAX_RA_RES_QP \
840 					0x3f
841 
842 	uint8_t			flags1;
843 #define MCX_CAP_DEVICE_END_PAD		0x80
844 #define MCX_CAP_DEVICE_CC_QUERY_ALLOWED	0x40
845 #define MCX_CAP_DEVICE_CC_MODIFY_ALLOWED \
846 					0x20
847 #define MCX_CAP_DEVICE_START_PAD	0x10
848 #define MCX_CAP_DEVICE_128BYTE_CACHELINE \
849 					0x08
850 	uint8_t			__reserved__[1];
851 	uint16_t		gid_table_size;
852 
853 	uint16_t		flags2;
854 #define MCX_CAP_DEVICE_OUT_OF_SEQ_CNT	0x8000
855 #define MCX_CAP_DEVICE_VPORT_COUNTERS	0x4000
856 #define MCX_CAP_DEVICE_RETRANSMISSION_Q_COUNTERS \
857 					0x2000
858 #define MCX_CAP_DEVICE_DEBUG		0x1000
859 #define MCX_CAP_DEVICE_MODIFY_RQ_COUNTERS_SET_ID \
860 					0x8000
861 #define MCX_CAP_DEVICE_RQ_DELAY_DROP	0x4000
862 #define MCX_CAP_DEVICe_MAX_QP_CNT_MASK	0x03ff
863 	uint16_t		pkey_table_size;
864 
865 	uint8_t			flags3;
866 #define MCX_CAP_DEVICE_VPORT_GROUP_MANAGER \
867 					0x80
868 #define MCX_CAP_DEVICE_VHCA_GROUP_MANAGER \
869 					0x40
870 #define MCX_CAP_DEVICE_IB_VIRTUAL	0x20
871 #define MCX_CAP_DEVICE_ETH_VIRTUAL	0x10
872 #define MCX_CAP_DEVICE_ETS		0x04
873 #define MCX_CAP_DEVICE_NIC_FLOW_TABLE	0x02
874 #define MCX_CAP_DEVICE_ESWITCH_FLOW_TABLE \
875 					0x01
876 	uint8_t			local_ca_ack_delay; /* 5 bits */
877 #define MCX_CAP_DEVICE_LOCAL_CA_ACK_DELAY \
878 					0x1f
879 #define MCX_CAP_DEVICE_MCAM_REG		0x40
880 	uint8_t			port_type;
881 #define MCX_CAP_DEVICE_PORT_MODULE_EVENT \
882 					0x80
883 #define MCX_CAP_DEVICE_PORT_TYPE	0x03
884 #define MCX_CAP_DEVICE_PORT_TYPE_ETH	0x01
885 	uint8_t			num_ports;
886 
887 	uint8_t			snapshot_log_max_msg;
888 #define MCX_CAP_DEVICE_SNAPSHOT		0x80
889 #define MCX_CAP_DEVICE_LOG_MAX_MSG	0x1f
890 	uint8_t			max_tc; /* 4 bits */
891 #define MCX_CAP_DEVICE_MAX_TC		0x0f
892 	uint8_t			flags4;
893 #define MCX_CAP_DEVICE_TEMP_WARN_EVENT	0x80
894 #define MCX_CAP_DEVICE_DCBX		0x40
895 #define MCX_CAP_DEVICE_ROL_S		0x02
896 #define MCX_CAP_DEVICE_ROL_G		0x01
897 	uint8_t			wol;
898 #define MCX_CAP_DEVICE_WOL_S		0x40
899 #define MCX_CAP_DEVICE_WOL_G		0x20
900 #define MCX_CAP_DEVICE_WOL_A		0x10
901 #define MCX_CAP_DEVICE_WOL_B		0x08
902 #define MCX_CAP_DEVICE_WOL_M		0x04
903 #define MCX_CAP_DEVICE_WOL_U		0x02
904 #define MCX_CAP_DEVICE_WOL_P		0x01
905 
906 	uint16_t		stat_rate_support;
907 	uint8_t			__reserved__[1];
908 	uint8_t			cqe_version; /* 4 bits */
909 #define MCX_CAP_DEVICE_CQE_VERSION	0x0f
910 
911 	uint32_t		flags5;
912 #define MCX_CAP_DEVICE_COMPACT_ADDRESS_VECTOR \
913 					0x80000000
914 #define MCX_CAP_DEVICE_STRIDING_RQ	0x40000000
915 #define MCX_CAP_DEVICE_IPOIP_ENHANCED_OFFLOADS \
916 					0x10000000
917 #define MCX_CAP_DEVICE_IPOIP_IPOIP_OFFLOADS \
918 					0x08000000
919 #define MCX_CAP_DEVICE_DC_CONNECT_CP	0x00040000
920 #define MCX_CAP_DEVICE_DC_CNAK_DRACE	0x00020000
921 #define MCX_CAP_DEVICE_DRAIN_SIGERR	0x00010000
922 #define MCX_CAP_DEVICE_CMDIF_CHECKSUM	0x0000c000
923 #define MCX_CAP_DEVICE_SIGERR_QCE	0x00002000
924 #define MCX_CAP_DEVICE_WQ_SIGNATURE	0x00000800
925 #define MCX_CAP_DEVICE_SCTR_DATA_CQE	0x00000400
926 #define MCX_CAP_DEVICE_SHO		0x00000100
927 #define MCX_CAP_DEVICE_TPH		0x00000080
928 #define MCX_CAP_DEVICE_RF		0x00000040
929 #define MCX_CAP_DEVICE_DCT		0x00000020
930 #define MCX_CAP_DEVICE_QOS		0x00000010
931 #define MCX_CAP_DEVICe_ETH_NET_OFFLOADS	0x00000008
932 #define MCX_CAP_DEVICE_ROCE		0x00000004
933 #define MCX_CAP_DEVICE_ATOMIC		0x00000002
934 
935 	uint32_t		flags6;
936 #define MCX_CAP_DEVICE_CQ_OI		0x80000000
937 #define MCX_CAP_DEVICE_CQ_RESIZE	0x40000000
938 #define MCX_CAP_DEVICE_CQ_MODERATION	0x20000000
939 #define MCX_CAP_DEVICE_CQ_PERIOD_MODE_MODIFY \
940 					0x10000000
941 #define MCX_CAP_DEVICE_CQ_INVALIDATE	0x08000000
942 #define MCX_CAP_DEVICE_RESERVED_AT_255	0x04000000
943 #define MCX_CAP_DEVICE_CQ_EQ_REMAP	0x02000000
944 #define MCX_CAP_DEVICE_PG		0x01000000
945 #define MCX_CAP_DEVICE_BLOCK_LB_MC	0x00800000
946 #define MCX_CAP_DEVICE_EXPONENTIAL_BACKOFF \
947 					0x00400000
948 #define MCX_CAP_DEVICE_SCQE_BREAK_MODERATION \
949 					0x00200000
950 #define MCX_CAP_DEVICE_CQ_PERIOD_START_FROM_CQE \
951 					0x00100000
952 #define MCX_CAP_DEVICE_CD		0x00080000
953 #define MCX_CAP_DEVICE_ATM		0x00040000
954 #define MCX_CAP_DEVICE_APM		0x00020000
955 #define MCX_CAP_DEVICE_IMAICL		0x00010000
956 #define MCX_CAP_DEVICE_QKV		0x00000200
957 #define MCX_CAP_DEVICE_PKV		0x00000100
958 #define MCX_CAP_DEVICE_SET_DETH_SQPN	0x00000080
959 #define MCX_CAP_DEVICE_XRC		0x00000008
960 #define MCX_CAP_DEVICE_UD		0x00000004
961 #define MCX_CAP_DEVICE_UC		0x00000002
962 #define MCX_CAP_DEVICE_RC		0x00000001
963 
964 	uint8_t			uar_flags;
965 #define MCX_CAP_DEVICE_UAR_4K		0x80
966 	uint8_t			uar_sz;	/* 6 bits */
967 #define MCX_CAP_DEVICE_UAR_SZ		0x3f
968 	uint8_t			__reserved__[1];
969 	uint8_t			log_pg_sz;
970 
971 	uint8_t			flags7;
972 #define MCX_CAP_DEVICE_BF		0x80
973 #define MCX_CAP_DEVICE_DRIVER_VERSION	0x40
974 #define MCX_CAP_DEVICE_PAD_TX_ETH_PACKET \
975 					0x20
976 	uint8_t			log_bf_reg_size; /* 5 bits */
977 #define MCX_CAP_DEVICE_LOG_BF_REG_SIZE	0x1f
978 	uint8_t			__reserved__[2];
979 
980 	uint16_t		num_of_diagnostic_counters;
981 	uint16_t		max_wqe_sz_sq;
982 
983 	uint8_t			__reserved__[2];
984 	uint16_t		max_wqe_sz_rq;
985 
986 	uint8_t			__reserved__[2];
987 	uint16_t		max_wqe_sz_sq_dc;
988 
989 	uint32_t		max_qp_mcg; /* 25 bits */
990 #define MCX_CAP_DEVICE_MAX_QP_MCG	0x1ffffff
991 
992 	uint8_t			__reserved__[3];
993 	uint8_t			log_max_mcq;
994 
995 	uint8_t			log_max_transport_domain; /* 5 bits */
996 #define MCX_CAP_DEVICE_LOG_MAX_TRANSORT_DOMAIN \
997 					0x1f
998 	uint8_t			log_max_pd; /* 5 bits */
999 #define MCX_CAP_DEVICE_LOG_MAX_PD	0x1f
1000 	uint8_t			__reserved__[1];
1001 	uint8_t			log_max_xrcd; /* 5 bits */
1002 #define MCX_CAP_DEVICE_LOG_MAX_XRCD	0x1f
1003 
1004 	uint8_t			__reserved__[2];
1005 	uint16_t		max_flow_counter;
1006 
1007 	uint8_t			log_max_rq; /* 5 bits */
1008 #define MCX_CAP_DEVICE_LOG_MAX_RQ	0x1f
1009 	uint8_t			log_max_sq; /* 5 bits */
1010 #define MCX_CAP_DEVICE_LOG_MAX_SQ	0x1f
1011 	uint8_t			log_max_tir; /* 5 bits */
1012 #define MCX_CAP_DEVICE_LOG_MAX_TIR	0x1f
1013 	uint8_t			log_max_tis; /* 5 bits */
1014 #define MCX_CAP_DEVICE_LOG_MAX_TIS	0x1f
1015 
1016 	uint8_t 		flags8;
1017 #define MCX_CAP_DEVICE_BASIC_CYCLIC_RCV_WQE \
1018 					0x80
1019 #define MCX_CAP_DEVICE_LOG_MAX_RMP	0x1f
1020 	uint8_t			log_max_rqt; /* 5 bits */
1021 #define MCX_CAP_DEVICE_LOG_MAX_RQT	0x1f
1022 	uint8_t			log_max_rqt_size; /* 5 bits */
1023 #define MCX_CAP_DEVICE_LOG_MAX_RQT_SIZE	0x1f
1024 	uint8_t			log_max_tis_per_sq; /* 5 bits */
1025 #define MCX_CAP_DEVICE_LOG_MAX_TIS_PER_SQ \
1026 					0x1f
1027 
1028 	uint8_t			flags9;
1029 #define MXC_CAP_DEVICE_EXT_STRIDE_NUM_RANGES \
1030 					0x80
1031 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_RQ \
1032 					0x1f
1033 	uint8_t			log_min_stride_sz_rq; /* 5 bits */
1034 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_RQ \
1035 					0x1f
1036 	uint8_t			log_max_stride_sz_sq; /* 5 bits */
1037 #define MXC_CAP_DEVICE_LOG_MAX_STRIDE_SZ_SQ \
1038 					0x1f
1039 	uint8_t			log_min_stride_sz_sq; /* 5 bits */
1040 #define MXC_CAP_DEVICE_LOG_MIN_STRIDE_SZ_SQ \
1041 					0x1f
1042 
1043 	uint8_t			log_max_hairpin_queues;
1044 #define MXC_CAP_DEVICE_HAIRPIN		0x80
1045 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_QUEUES \
1046 					0x1f
1047 	uint8_t			log_min_hairpin_queues;
1048 #define MXC_CAP_DEVICE_LOG_MIN_HAIRPIN_QUEUES \
1049 					0x1f
1050 	uint8_t			log_max_hairpin_num_packets;
1051 #define MXC_CAP_DEVICE_LOG_MAX_HAIRPIN_NUM_PACKETS \
1052 					0x1f
1053 	uint8_t			log_max_mq_sz;
1054 #define MXC_CAP_DEVICE_LOG_MAX_WQ_SZ \
1055 					0x1f
1056 
1057 	uint8_t			log_min_hairpin_wq_data_sz;
1058 #define MXC_CAP_DEVICE_NIC_VPORT_CHANGE_EVENT \
1059 					0x80
1060 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_UC \
1061 					0x40
1062 #define MXC_CAP_DEVICE_DISABLE_LOCAL_LB_MC \
1063 					0x20
1064 #define MCX_CAP_DEVICE_LOG_MIN_HAIRPIN_WQ_DATA_SZ \
1065 					0x1f
1066 	uint8_t			log_max_vlan_list;
1067 #define MXC_CAP_DEVICE_SYSTEM_IMAGE_GUID_MODIFIABLE \
1068 					0x80
1069 #define MXC_CAP_DEVICE_LOG_MAX_VLAN_LIST \
1070 					0x1f
1071 	uint8_t			log_max_current_mc_list;
1072 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_MC_LIST \
1073 					0x1f
1074 	uint8_t			log_max_current_uc_list;
1075 #define MXC_CAP_DEVICE_LOG_MAX_CURRENT_UC_LIST \
1076 					0x1f
1077 
1078 	uint8_t			__reserved__[4];
1079 
1080 	uint32_t		create_qp_start_hint; /* 24 bits */
1081 
1082 	uint8_t			log_max_uctx; /* 5 bits */
1083 #define MXC_CAP_DEVICE_LOG_MAX_UCTX	0x1f
1084 	uint8_t			log_max_umem; /* 5 bits */
1085 #define MXC_CAP_DEVICE_LOG_MAX_UMEM	0x1f
1086 	uint16_t		max_num_eqs;
1087 
1088 	uint8_t			log_max_l2_table; /* 5 bits */
1089 #define MXC_CAP_DEVICE_LOG_MAX_L2_TABLE	0x1f
1090 	uint8_t			__reserved__[1];
1091 	uint16_t		log_uar_page_sz;
1092 
1093 	uint8_t			__reserved__[8];
1094 
1095 	uint32_t		device_frequency_mhz;
1096 	uint32_t		device_frequency_khz;
1097 } __packed __aligned(8);
1098 
1099 CTASSERT(offsetof(struct mcx_cap_device, max_indirection) == 0x20);
1100 CTASSERT(offsetof(struct mcx_cap_device, flags1) == 0x2c);
1101 CTASSERT(offsetof(struct mcx_cap_device, flags2) == 0x30);
1102 CTASSERT(offsetof(struct mcx_cap_device, snapshot_log_max_msg) == 0x38);
1103 CTASSERT(offsetof(struct mcx_cap_device, flags5) == 0x40);
1104 CTASSERT(offsetof(struct mcx_cap_device, flags7) == 0x4c);
1105 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_mhz) == 0x98);
1106 CTASSERT(offsetof(struct mcx_cap_device, device_frequency_khz) == 0x9c);
1107 CTASSERT(sizeof(struct mcx_cap_device) <= MCX_CMDQ_MAILBOX_DATASIZE);
1108 
1109 struct mcx_cmd_set_driver_version_in {
1110 	uint16_t		cmd_opcode;
1111 	uint8_t			cmd_reserved0[4];
1112 	uint16_t		cmd_op_mod;
1113 	uint8_t			cmd_reserved1[8];
1114 } __packed __aligned(4);
1115 
1116 struct mcx_cmd_set_driver_version_out {
1117 	uint8_t			cmd_status;
1118 	uint8_t			cmd_reserved0[3];
1119 	uint32_t		cmd_syndrome;
1120 	uint8_t			cmd_reserved1[8];
1121 } __packed __aligned(4);
1122 
1123 struct mcx_cmd_set_driver_version {
1124 	uint8_t			cmd_driver_version[64];
1125 } __packed __aligned(8);
1126 
1127 struct mcx_cmd_modify_nic_vport_context_in {
1128 	uint16_t		cmd_opcode;
1129 	uint8_t			cmd_reserved0[4];
1130 	uint16_t		cmd_op_mod;
1131 	uint8_t			cmd_reserved1[4];
1132 	uint32_t		cmd_field_select;
1133 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_ADDR	0x04
1134 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC	0x10
1135 #define MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU	0x40
1136 } __packed __aligned(4);
1137 
1138 struct mcx_cmd_modify_nic_vport_context_out {
1139 	uint8_t			cmd_status;
1140 	uint8_t			cmd_reserved0[3];
1141 	uint32_t		cmd_syndrome;
1142 	uint8_t			cmd_reserved1[8];
1143 } __packed __aligned(4);
1144 
1145 struct mcx_cmd_query_nic_vport_context_in {
1146 	uint16_t		cmd_opcode;
1147 	uint8_t			cmd_reserved0[4];
1148 	uint16_t		cmd_op_mod;
1149 	uint8_t			cmd_reserved1[4];
1150 	uint8_t			cmd_allowed_list_type;
1151 	uint8_t			cmd_reserved2[3];
1152 } __packed __aligned(4);
1153 
1154 struct mcx_cmd_query_nic_vport_context_out {
1155 	uint8_t			cmd_status;
1156 	uint8_t			cmd_reserved0[3];
1157 	uint32_t		cmd_syndrome;
1158 	uint8_t			cmd_reserved1[8];
1159 } __packed __aligned(4);
1160 
1161 struct mcx_nic_vport_ctx {
1162 	uint32_t		vp_min_wqe_inline_mode;
1163 	uint8_t			vp_reserved0[32];
1164 	uint32_t		vp_mtu;
1165 	uint8_t			vp_reserved1[200];
1166 	uint16_t		vp_flags;
1167 #define MCX_NIC_VPORT_CTX_LIST_UC_MAC			(0)
1168 #define MCX_NIC_VPORT_CTX_LIST_MC_MAC			(1 << 24)
1169 #define MCX_NIC_VPORT_CTX_LIST_VLAN			(2 << 24)
1170 #define MCX_NIC_VPORT_CTX_PROMISC_ALL			(1 << 13)
1171 #define MCX_NIC_VPORT_CTX_PROMISC_MCAST			(1 << 14)
1172 #define MCX_NIC_VPORT_CTX_PROMISC_UCAST			(1 << 15)
1173 	uint16_t		vp_allowed_list_size;
1174 	uint64_t		vp_perm_addr;
1175 	uint8_t			vp_reserved2[4];
1176 	/* allowed list follows */
1177 } __packed __aligned(4);
1178 
1179 struct mcx_counter {
1180 	uint64_t		packets;
1181 	uint64_t		octets;
1182 } __packed __aligned(4);
1183 
1184 struct mcx_nic_vport_counters {
1185 	struct mcx_counter	rx_err;
1186 	struct mcx_counter	tx_err;
1187 	uint8_t			reserved0[64]; /* 0x30 */
1188 	struct mcx_counter	rx_bcast;
1189 	struct mcx_counter	tx_bcast;
1190 	struct mcx_counter	rx_ucast;
1191 	struct mcx_counter	tx_ucast;
1192 	struct mcx_counter	rx_mcast;
1193 	struct mcx_counter	tx_mcast;
1194 	uint8_t			reserved1[0x210 - 0xd0];
1195 } __packed __aligned(4);
1196 
1197 struct mcx_cmd_query_vport_counters_in {
1198 	uint16_t		cmd_opcode;
1199 	uint8_t			cmd_reserved0[4];
1200 	uint16_t		cmd_op_mod;
1201 	uint8_t			cmd_reserved1[8];
1202 } __packed __aligned(4);
1203 
1204 struct mcx_cmd_query_vport_counters_mb_in {
1205 	uint8_t			cmd_reserved0[8];
1206 	uint8_t			cmd_clear;
1207 	uint8_t			cmd_reserved1[7];
1208 } __packed __aligned(4);
1209 
1210 struct mcx_cmd_query_vport_counters_out {
1211 	uint8_t			cmd_status;
1212 	uint8_t			cmd_reserved0[3];
1213 	uint32_t		cmd_syndrome;
1214 	uint8_t			cmd_reserved1[8];
1215 } __packed __aligned(4);
1216 
1217 struct mcx_cmd_query_flow_counter_in {
1218 	uint16_t		cmd_opcode;
1219 	uint8_t			cmd_reserved0[4];
1220 	uint16_t		cmd_op_mod;
1221 	uint8_t			cmd_reserved1[8];
1222 } __packed __aligned(4);
1223 
1224 struct mcx_cmd_query_flow_counter_mb_in {
1225 	uint8_t			cmd_reserved0[8];
1226 	uint8_t			cmd_clear;
1227 	uint8_t			cmd_reserved1[5];
1228 	uint16_t		cmd_flow_counter_id;
1229 } __packed __aligned(4);
1230 
1231 struct mcx_cmd_query_flow_counter_out {
1232 	uint8_t			cmd_status;
1233 	uint8_t			cmd_reserved0[3];
1234 	uint32_t		cmd_syndrome;
1235 	uint8_t			cmd_reserved1[8];
1236 } __packed __aligned(4);
1237 
1238 struct mcx_cmd_alloc_uar_in {
1239 	uint16_t		cmd_opcode;
1240 	uint8_t			cmd_reserved0[4];
1241 	uint16_t		cmd_op_mod;
1242 	uint8_t			cmd_reserved1[8];
1243 } __packed __aligned(4);
1244 
1245 struct mcx_cmd_alloc_uar_out {
1246 	uint8_t			cmd_status;
1247 	uint8_t			cmd_reserved0[3];
1248 	uint32_t		cmd_syndrome;
1249 	uint32_t		cmd_uar;
1250 	uint8_t			cmd_reserved1[4];
1251 } __packed __aligned(4);
1252 
1253 struct mcx_cmd_query_special_ctx_in {
1254 	uint16_t		cmd_opcode;
1255 	uint8_t			cmd_reserved0[4];
1256 	uint16_t		cmd_op_mod;
1257 	uint8_t			cmd_reserved1[8];
1258 } __packed __aligned(4);
1259 
1260 struct mcx_cmd_query_special_ctx_out {
1261 	uint8_t			cmd_status;
1262 	uint8_t			cmd_reserved0[3];
1263 	uint32_t		cmd_syndrome;
1264 	uint8_t			cmd_reserved1[4];
1265 	uint32_t		cmd_resd_lkey;
1266 } __packed __aligned(4);
1267 
1268 struct mcx_eq_ctx {
1269 	uint32_t		eq_status;
1270 #define MCX_EQ_CTX_STATE_SHIFT		8
1271 #define MCX_EQ_CTX_STATE_MASK		(0xf << MCX_EQ_CTX_STATE_SHIFT)
1272 #define MCX_EQ_CTX_STATE_ARMED		0x9
1273 #define MCX_EQ_CTX_STATE_FIRED		0xa
1274 #define MCX_EQ_CTX_OI_SHIFT		17
1275 #define MCX_EQ_CTX_OI			(1 << MCX_EQ_CTX_OI_SHIFT)
1276 #define MCX_EQ_CTX_EC_SHIFT		18
1277 #define MCX_EQ_CTX_EC			(1 << MCX_EQ_CTX_EC_SHIFT)
1278 #define MCX_EQ_CTX_STATUS_SHIFT		28
1279 #define MCX_EQ_CTX_STATUS_MASK		(0xf << MCX_EQ_CTX_STATUS_SHIFT)
1280 #define MCX_EQ_CTX_STATUS_OK		0x0
1281 #define MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE 0xa
1282 	uint32_t		eq_reserved1;
1283 	uint32_t		eq_page_offset;
1284 #define MCX_EQ_CTX_PAGE_OFFSET_SHIFT	5
1285 	uint32_t		eq_uar_size;
1286 #define MCX_EQ_CTX_UAR_PAGE_MASK	0xffffff
1287 #define MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT	24
1288 	uint32_t		eq_reserved2;
1289 	uint8_t			eq_reserved3[3];
1290 	uint8_t			eq_intr;
1291 	uint32_t		eq_log_page_size;
1292 #define MCX_EQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1293 	uint32_t		eq_reserved4[3];
1294 	uint32_t		eq_consumer_counter;
1295 	uint32_t		eq_producer_counter;
1296 #define MCX_EQ_CTX_COUNTER_MASK		0xffffff
1297 	uint32_t		eq_reserved5[4];
1298 } __packed __aligned(4);
1299 
1300 CTASSERT(sizeof(struct mcx_eq_ctx) == 64);
1301 
1302 struct mcx_cmd_create_eq_in {
1303 	uint16_t		cmd_opcode;
1304 	uint8_t			cmd_reserved0[4];
1305 	uint16_t		cmd_op_mod;
1306 	uint8_t			cmd_reserved1[8];
1307 } __packed __aligned(4);
1308 
1309 struct mcx_cmd_create_eq_mb_in {
1310 	struct mcx_eq_ctx	cmd_eq_ctx;
1311 	uint8_t			cmd_reserved0[8];
1312 	uint64_t		cmd_event_bitmask;
1313 #define MCX_EVENT_TYPE_COMPLETION	0x00
1314 #define MCX_EVENT_TYPE_CQ_ERROR		0x04
1315 #define MCX_EVENT_TYPE_INTERNAL_ERROR	0x08
1316 #define MCX_EVENT_TYPE_PORT_CHANGE	0x09
1317 #define MCX_EVENT_TYPE_CMD_COMPLETION	0x0a
1318 #define MCX_EVENT_TYPE_PAGE_REQUEST	0x0b
1319 #define MCX_EVENT_TYPE_LAST_WQE		0x13
1320 	uint8_t			cmd_reserved1[176];
1321 } __packed __aligned(4);
1322 
1323 struct mcx_cmd_create_eq_out {
1324 	uint8_t			cmd_status;
1325 	uint8_t			cmd_reserved0[3];
1326 	uint32_t		cmd_syndrome;
1327 	uint32_t		cmd_eqn;
1328 	uint8_t			cmd_reserved1[4];
1329 } __packed __aligned(4);
1330 
1331 struct mcx_cmd_query_eq_in {
1332 	uint16_t		cmd_opcode;
1333 	uint8_t			cmd_reserved0[4];
1334 	uint16_t		cmd_op_mod;
1335 	uint32_t		cmd_eqn;
1336 	uint8_t			cmd_reserved1[4];
1337 } __packed __aligned(4);
1338 
1339 struct mcx_cmd_query_eq_out {
1340 	uint8_t			cmd_status;
1341 	uint8_t			cmd_reserved0[3];
1342 	uint32_t		cmd_syndrome;
1343 	uint8_t			cmd_reserved1[8];
1344 } __packed __aligned(4);
1345 
1346 struct mcx_eq_entry {
1347 	uint8_t			eq_reserved1;
1348 	uint8_t			eq_event_type;
1349 	uint8_t			eq_reserved2;
1350 	uint8_t			eq_event_sub_type;
1351 
1352 	uint8_t			eq_reserved3[28];
1353 	uint32_t		eq_event_data[7];
1354 	uint8_t			eq_reserved4[2];
1355 	uint8_t			eq_signature;
1356 	uint8_t			eq_owner;
1357 #define MCX_EQ_ENTRY_OWNER_INIT			1
1358 } __packed __aligned(4);
1359 
1360 CTASSERT(sizeof(struct mcx_eq_entry) == 64);
1361 
1362 struct mcx_cmd_alloc_pd_in {
1363 	uint16_t		cmd_opcode;
1364 	uint8_t			cmd_reserved0[4];
1365 	uint16_t		cmd_op_mod;
1366 	uint8_t			cmd_reserved1[8];
1367 } __packed __aligned(4);
1368 
1369 struct mcx_cmd_alloc_pd_out {
1370 	uint8_t			cmd_status;
1371 	uint8_t			cmd_reserved0[3];
1372 	uint32_t		cmd_syndrome;
1373 	uint32_t		cmd_pd;
1374 	uint8_t			cmd_reserved1[4];
1375 } __packed __aligned(4);
1376 
1377 struct mcx_cmd_alloc_td_in {
1378 	uint16_t		cmd_opcode;
1379 	uint8_t			cmd_reserved0[4];
1380 	uint16_t		cmd_op_mod;
1381 	uint8_t			cmd_reserved1[8];
1382 } __packed __aligned(4);
1383 
1384 struct mcx_cmd_alloc_td_out {
1385 	uint8_t			cmd_status;
1386 	uint8_t			cmd_reserved0[3];
1387 	uint32_t		cmd_syndrome;
1388 	uint32_t		cmd_tdomain;
1389 	uint8_t			cmd_reserved1[4];
1390 } __packed __aligned(4);
1391 
1392 struct mcx_cmd_create_tir_in {
1393 	uint16_t		cmd_opcode;
1394 	uint8_t			cmd_reserved0[4];
1395 	uint16_t		cmd_op_mod;
1396 	uint8_t			cmd_reserved1[8];
1397 } __packed __aligned(4);
1398 
1399 struct mcx_cmd_create_tir_mb_in {
1400 	uint8_t			cmd_reserved0[20];
1401 	uint32_t		cmd_disp_type;
1402 #define MCX_TIR_CTX_DISP_TYPE_DIRECT	0
1403 #define MCX_TIR_CTX_DISP_TYPE_INDIRECT	1
1404 #define MCX_TIR_CTX_DISP_TYPE_SHIFT	28
1405 	uint8_t			cmd_reserved1[8];
1406 	uint32_t		cmd_lro;
1407 	uint8_t			cmd_reserved2[8];
1408 	uint32_t		cmd_inline_rqn;
1409 	uint32_t		cmd_indir_table;
1410 	uint32_t		cmd_tdomain;
1411 #define MCX_TIR_CTX_HASH_TOEPLITZ	2
1412 #define MCX_TIR_CTX_HASH_SHIFT		28
1413 	uint8_t			cmd_rx_hash_key[40];
1414 	uint32_t		cmd_rx_hash_sel_outer;
1415 #define MCX_TIR_CTX_HASH_SEL_SRC_IP	(1 << 0)
1416 #define MCX_TIR_CTX_HASH_SEL_DST_IP	(1 << 1)
1417 #define MCX_TIR_CTX_HASH_SEL_SPORT	(1 << 2)
1418 #define MCX_TIR_CTX_HASH_SEL_DPORT	(1 << 3)
1419 #define MCX_TIR_CTX_HASH_SEL_IPV4	(0 << 31)
1420 #define MCX_TIR_CTX_HASH_SEL_IPV6	(1U << 31)
1421 #define MCX_TIR_CTX_HASH_SEL_TCP	(0 << 30)
1422 #define MCX_TIR_CTX_HASH_SEL_UDP	(1 << 30)
1423 	uint32_t		cmd_rx_hash_sel_inner;
1424 	uint8_t			cmd_reserved3[152];
1425 } __packed __aligned(4);
1426 
1427 struct mcx_cmd_create_tir_out {
1428 	uint8_t			cmd_status;
1429 	uint8_t			cmd_reserved0[3];
1430 	uint32_t		cmd_syndrome;
1431 	uint32_t		cmd_tirn;
1432 	uint8_t			cmd_reserved1[4];
1433 } __packed __aligned(4);
1434 
1435 struct mcx_cmd_destroy_tir_in {
1436 	uint16_t		cmd_opcode;
1437 	uint8_t			cmd_reserved0[4];
1438 	uint16_t		cmd_op_mod;
1439 	uint32_t		cmd_tirn;
1440 	uint8_t			cmd_reserved1[4];
1441 } __packed __aligned(4);
1442 
1443 struct mcx_cmd_destroy_tir_out {
1444 	uint8_t			cmd_status;
1445 	uint8_t			cmd_reserved0[3];
1446 	uint32_t		cmd_syndrome;
1447 	uint8_t			cmd_reserved1[8];
1448 } __packed __aligned(4);
1449 
1450 struct mcx_cmd_create_tis_in {
1451 	uint16_t		cmd_opcode;
1452 	uint8_t			cmd_reserved0[4];
1453 	uint16_t		cmd_op_mod;
1454 	uint8_t			cmd_reserved1[8];
1455 } __packed __aligned(4);
1456 
1457 struct mcx_cmd_create_tis_mb_in {
1458 	uint8_t			cmd_reserved[16];
1459 	uint32_t		cmd_prio;
1460 	uint8_t			cmd_reserved1[32];
1461 	uint32_t		cmd_tdomain;
1462 	uint8_t			cmd_reserved2[120];
1463 } __packed __aligned(4);
1464 
1465 struct mcx_cmd_create_tis_out {
1466 	uint8_t			cmd_status;
1467 	uint8_t			cmd_reserved0[3];
1468 	uint32_t		cmd_syndrome;
1469 	uint32_t		cmd_tisn;
1470 	uint8_t			cmd_reserved1[4];
1471 } __packed __aligned(4);
1472 
1473 struct mcx_cmd_destroy_tis_in {
1474 	uint16_t		cmd_opcode;
1475 	uint8_t			cmd_reserved0[4];
1476 	uint16_t		cmd_op_mod;
1477 	uint32_t		cmd_tisn;
1478 	uint8_t			cmd_reserved1[4];
1479 } __packed __aligned(4);
1480 
1481 struct mcx_cmd_destroy_tis_out {
1482 	uint8_t			cmd_status;
1483 	uint8_t			cmd_reserved0[3];
1484 	uint32_t		cmd_syndrome;
1485 	uint8_t			cmd_reserved1[8];
1486 } __packed __aligned(4);
1487 
1488 struct mcx_cmd_create_rqt_in {
1489 	uint16_t		cmd_opcode;
1490 	uint8_t			cmd_reserved0[4];
1491 	uint16_t		cmd_op_mod;
1492 	uint8_t			cmd_reserved1[8];
1493 } __packed __aligned(4);
1494 
1495 struct mcx_rqt_ctx {
1496 	uint8_t			cmd_reserved0[20];
1497 	uint16_t		cmd_reserved1;
1498 	uint16_t		cmd_rqt_max_size;
1499 	uint16_t		cmd_reserved2;
1500 	uint16_t		cmd_rqt_actual_size;
1501 	uint8_t			cmd_reserved3[212];
1502 } __packed __aligned(4);
1503 
1504 struct mcx_cmd_create_rqt_mb_in {
1505 	uint8_t			cmd_reserved0[16];
1506 	struct mcx_rqt_ctx	cmd_rqt;
1507 } __packed __aligned(4);
1508 
1509 struct mcx_cmd_create_rqt_out {
1510 	uint8_t			cmd_status;
1511 	uint8_t			cmd_reserved0[3];
1512 	uint32_t		cmd_syndrome;
1513 	uint32_t		cmd_rqtn;
1514 	uint8_t			cmd_reserved1[4];
1515 } __packed __aligned(4);
1516 
1517 struct mcx_cmd_destroy_rqt_in {
1518 	uint16_t		cmd_opcode;
1519 	uint8_t			cmd_reserved0[4];
1520 	uint16_t		cmd_op_mod;
1521 	uint32_t		cmd_rqtn;
1522 	uint8_t			cmd_reserved1[4];
1523 } __packed __aligned(4);
1524 
1525 struct mcx_cmd_destroy_rqt_out {
1526 	uint8_t			cmd_status;
1527 	uint8_t			cmd_reserved0[3];
1528 	uint32_t		cmd_syndrome;
1529 	uint8_t			cmd_reserved1[8];
1530 } __packed __aligned(4);
1531 
1532 struct mcx_cq_ctx {
1533 	uint32_t		cq_status;
1534 #define MCX_CQ_CTX_STATUS_SHIFT		28
1535 #define MCX_CQ_CTX_STATUS_MASK		(0xf << MCX_CQ_CTX_STATUS_SHIFT)
1536 #define MCX_CQ_CTX_STATUS_OK		0x0
1537 #define MCX_CQ_CTX_STATUS_OVERFLOW	0x9
1538 #define MCX_CQ_CTX_STATUS_WRITE_FAIL	0xa
1539 #define MCX_CQ_CTX_STATE_SHIFT		8
1540 #define MCX_CQ_CTX_STATE_MASK		(0xf << MCX_CQ_CTX_STATE_SHIFT)
1541 #define MCX_CQ_CTX_STATE_SOLICITED	0x6
1542 #define MCX_CQ_CTX_STATE_ARMED		0x9
1543 #define MCX_CQ_CTX_STATE_FIRED		0xa
1544 	uint32_t		cq_reserved1;
1545 	uint32_t		cq_page_offset;
1546 	uint32_t		cq_uar_size;
1547 #define MCX_CQ_CTX_UAR_PAGE_MASK	0xffffff
1548 #define MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT	24
1549 	uint32_t		cq_period_max_count;
1550 #define MCX_CQ_CTX_PERIOD_SHIFT		16
1551 	uint32_t		cq_eqn;
1552 	uint32_t		cq_log_page_size;
1553 #define MCX_CQ_CTX_LOG_PAGE_SIZE_SHIFT	24
1554 	uint32_t		cq_reserved2;
1555 	uint32_t		cq_last_notified;
1556 	uint32_t		cq_last_solicit;
1557 	uint32_t		cq_consumer_counter;
1558 	uint32_t		cq_producer_counter;
1559 	uint8_t			cq_reserved3[8];
1560 	uint64_t		cq_doorbell;
1561 } __packed __aligned(4);
1562 
1563 CTASSERT(sizeof(struct mcx_cq_ctx) == 64);
1564 
1565 struct mcx_cmd_create_cq_in {
1566 	uint16_t		cmd_opcode;
1567 	uint8_t			cmd_reserved0[4];
1568 	uint16_t		cmd_op_mod;
1569 	uint8_t			cmd_reserved1[8];
1570 } __packed __aligned(4);
1571 
1572 struct mcx_cmd_create_cq_mb_in {
1573 	struct mcx_cq_ctx	cmd_cq_ctx;
1574 	uint8_t			cmd_reserved1[192];
1575 } __packed __aligned(4);
1576 
1577 struct mcx_cmd_create_cq_out {
1578 	uint8_t			cmd_status;
1579 	uint8_t			cmd_reserved0[3];
1580 	uint32_t		cmd_syndrome;
1581 	uint32_t		cmd_cqn;
1582 	uint8_t			cmd_reserved1[4];
1583 } __packed __aligned(4);
1584 
1585 struct mcx_cmd_destroy_cq_in {
1586 	uint16_t		cmd_opcode;
1587 	uint8_t			cmd_reserved0[4];
1588 	uint16_t		cmd_op_mod;
1589 	uint32_t		cmd_cqn;
1590 	uint8_t			cmd_reserved1[4];
1591 } __packed __aligned(4);
1592 
1593 struct mcx_cmd_destroy_cq_out {
1594 	uint8_t			cmd_status;
1595 	uint8_t			cmd_reserved0[3];
1596 	uint32_t		cmd_syndrome;
1597 	uint8_t			cmd_reserved1[8];
1598 } __packed __aligned(4);
1599 
1600 struct mcx_cmd_query_cq_in {
1601 	uint16_t		cmd_opcode;
1602 	uint8_t			cmd_reserved0[4];
1603 	uint16_t		cmd_op_mod;
1604 	uint32_t		cmd_cqn;
1605 	uint8_t			cmd_reserved1[4];
1606 } __packed __aligned(4);
1607 
1608 struct mcx_cmd_query_cq_out {
1609 	uint8_t			cmd_status;
1610 	uint8_t			cmd_reserved0[3];
1611 	uint32_t		cmd_syndrome;
1612 	uint8_t			cmd_reserved1[8];
1613 } __packed __aligned(4);
1614 
1615 struct mcx_cq_entry {
1616 	uint32_t		__reserved__;
1617 	uint32_t		cq_lro;
1618 	uint32_t		cq_lro_ack_seq_num;
1619 	uint32_t		cq_rx_hash;
1620 	uint8_t			cq_rx_hash_type;
1621 	uint8_t			cq_ml_path;
1622 	uint16_t		__reserved__;
1623 	uint32_t		cq_checksum;
1624 	uint32_t		__reserved__;
1625 	uint32_t		cq_flags;
1626 #define MCX_CQ_ENTRY_FLAGS_L4_OK		(1 << 26)
1627 #define MCX_CQ_ENTRY_FLAGS_L3_OK		(1 << 25)
1628 #define MCX_CQ_ENTRY_FLAGS_L2_OK		(1 << 24)
1629 #define MCX_CQ_ENTRY_FLAGS_CV			(1 << 16)
1630 #define MCX_CQ_ENTRY_FLAGS_VLAN_MASK		(0xffff)
1631 
1632 	uint32_t		cq_lro_srqn;
1633 	uint32_t		__reserved__[2];
1634 	uint32_t		cq_byte_cnt;
1635 	uint64_t		cq_timestamp;
1636 	uint8_t			cq_rx_drops;
1637 	uint8_t			cq_flow_tag[3];
1638 	uint16_t		cq_wqe_count;
1639 	uint8_t			cq_signature;
1640 	uint8_t			cq_opcode_owner;
1641 #define MCX_CQ_ENTRY_FLAG_OWNER			(1 << 0)
1642 #define MCX_CQ_ENTRY_FLAG_SE			(1 << 1)
1643 #define MCX_CQ_ENTRY_FORMAT_SHIFT		2
1644 #define MCX_CQ_ENTRY_OPCODE_SHIFT		4
1645 
1646 #define MCX_CQ_ENTRY_FORMAT_NO_INLINE		0
1647 #define MCX_CQ_ENTRY_FORMAT_INLINE_32		1
1648 #define MCX_CQ_ENTRY_FORMAT_INLINE_64		2
1649 #define MCX_CQ_ENTRY_FORMAT_COMPRESSED		3
1650 
1651 #define MCX_CQ_ENTRY_OPCODE_REQ			0
1652 #define MCX_CQ_ENTRY_OPCODE_SEND		2
1653 #define MCX_CQ_ENTRY_OPCODE_REQ_ERR		13
1654 #define MCX_CQ_ENTRY_OPCODE_SEND_ERR		14
1655 #define MCX_CQ_ENTRY_OPCODE_INVALID		15
1656 
1657 } __packed __aligned(4);
1658 
1659 CTASSERT(sizeof(struct mcx_cq_entry) == 64);
1660 
1661 struct mcx_cq_doorbell {
1662 	uint32_t		 db_update_ci;
1663 	uint32_t		 db_arm_ci;
1664 #define MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT	28
1665 #define MCX_CQ_DOORBELL_ARM_CMD			(1 << 24)
1666 #define MCX_CQ_DOORBELL_ARM_CI_MASK		(0xffffff)
1667 } __packed __aligned(8);
1668 
1669 struct mcx_wq_ctx {
1670 	uint8_t			 wq_type;
1671 #define MCX_WQ_CTX_TYPE_CYCLIC			(1 << 4)
1672 #define MCX_WQ_CTX_TYPE_SIGNATURE		(1 << 3)
1673 	uint8_t			 wq_reserved0[5];
1674 	uint16_t		 wq_lwm;
1675 	uint32_t		 wq_pd;
1676 	uint32_t		 wq_uar_page;
1677 	uint64_t		 wq_doorbell;
1678 	uint32_t		 wq_hw_counter;
1679 	uint32_t		 wq_sw_counter;
1680 	uint16_t		 wq_log_stride;
1681 	uint8_t			 wq_log_page_sz;
1682 	uint8_t			 wq_log_size;
1683 	uint8_t			 wq_reserved1[156];
1684 } __packed __aligned(4);
1685 
1686 CTASSERT(sizeof(struct mcx_wq_ctx) == 0xC0);
1687 
1688 struct mcx_sq_ctx {
1689 	uint32_t		sq_flags;
1690 #define MCX_SQ_CTX_RLKEY			(1U << 31)
1691 #define MCX_SQ_CTX_FRE_SHIFT			(1 << 29)
1692 #define MCX_SQ_CTX_FLUSH_IN_ERROR		(1 << 28)
1693 #define MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT		24
1694 #define MCX_SQ_CTX_STATE_SHIFT			20
1695 #define MCX_SQ_CTX_STATE_MASK			(0xf << 20)
1696 #define MCX_SQ_CTX_STATE_RST			0
1697 #define MCX_SQ_CTX_STATE_RDY			1
1698 #define MCX_SQ_CTX_STATE_ERR			3
1699 	uint32_t		sq_user_index;
1700 	uint32_t		sq_cqn;
1701 	uint32_t		sq_reserved1[5];
1702 	uint32_t		sq_tis_lst_sz;
1703 #define MCX_SQ_CTX_TIS_LST_SZ_SHIFT		16
1704 	uint32_t		sq_reserved2[2];
1705 	uint32_t		sq_tis_num;
1706 	struct mcx_wq_ctx	sq_wq;
1707 } __packed __aligned(4);
1708 
1709 struct mcx_sq_entry_seg {
1710 	uint32_t		sqs_byte_count;
1711 	uint32_t		sqs_lkey;
1712 	uint64_t		sqs_addr;
1713 } __packed __aligned(4);
1714 
1715 struct mcx_sq_entry {
1716 	/* control segment */
1717 	uint32_t		sqe_opcode_index;
1718 #define MCX_SQE_WQE_INDEX_SHIFT			8
1719 #define MCX_SQE_WQE_OPCODE_NOP			0x00
1720 #define MCX_SQE_WQE_OPCODE_SEND			0x0a
1721 	uint32_t		sqe_ds_sq_num;
1722 #define MCX_SQE_SQ_NUM_SHIFT			8
1723 	uint32_t		sqe_signature;
1724 #define MCX_SQE_SIGNATURE_SHIFT			24
1725 #define MCX_SQE_SOLICITED_EVENT			0x02
1726 #define MCX_SQE_CE_CQE_ON_ERR			0x00
1727 #define MCX_SQE_CE_CQE_FIRST_ERR		0x04
1728 #define MCX_SQE_CE_CQE_ALWAYS			0x08
1729 #define MCX_SQE_CE_CQE_SOLICIT			0x0C
1730 #define MCX_SQE_FM_NO_FENCE			0x00
1731 #define MCX_SQE_FM_SMALL_FENCE			0x40
1732 	uint32_t		sqe_mkey;
1733 
1734 	/* ethernet segment */
1735 	uint32_t		sqe_reserved1;
1736 	uint32_t		sqe_mss_csum;
1737 #define MCX_SQE_L4_CSUM				(1U << 31)
1738 #define MCX_SQE_L3_CSUM				(1 << 30)
1739 	uint32_t		sqe_reserved2;
1740 	uint16_t		sqe_inline_header_size;
1741 	uint16_t		sqe_inline_headers[9];
1742 
1743 	/* data segment */
1744 	struct mcx_sq_entry_seg sqe_segs[1];
1745 } __packed __aligned(64);
1746 
1747 CTASSERT(sizeof(struct mcx_sq_entry) == 64);
1748 
1749 struct mcx_cmd_create_sq_in {
1750 	uint16_t		cmd_opcode;
1751 	uint8_t			cmd_reserved0[4];
1752 	uint16_t		cmd_op_mod;
1753 	uint8_t			cmd_reserved1[8];
1754 } __packed __aligned(4);
1755 
1756 struct mcx_cmd_create_sq_out {
1757 	uint8_t			cmd_status;
1758 	uint8_t			cmd_reserved0[3];
1759 	uint32_t		cmd_syndrome;
1760 	uint32_t		cmd_sqn;
1761 	uint8_t			cmd_reserved1[4];
1762 } __packed __aligned(4);
1763 
1764 struct mcx_cmd_modify_sq_in {
1765 	uint16_t		cmd_opcode;
1766 	uint8_t			cmd_reserved0[4];
1767 	uint16_t		cmd_op_mod;
1768 	uint32_t		cmd_sq_state;
1769 	uint8_t			cmd_reserved1[4];
1770 } __packed __aligned(4);
1771 
1772 struct mcx_cmd_modify_sq_mb_in {
1773 	uint32_t		cmd_modify_hi;
1774 	uint32_t		cmd_modify_lo;
1775 	uint8_t			cmd_reserved0[8];
1776 	struct mcx_sq_ctx	cmd_sq_ctx;
1777 } __packed __aligned(4);
1778 
1779 struct mcx_cmd_modify_sq_out {
1780 	uint8_t			cmd_status;
1781 	uint8_t			cmd_reserved0[3];
1782 	uint32_t		cmd_syndrome;
1783 	uint8_t			cmd_reserved1[8];
1784 } __packed __aligned(4);
1785 
1786 struct mcx_cmd_destroy_sq_in {
1787 	uint16_t		cmd_opcode;
1788 	uint8_t			cmd_reserved0[4];
1789 	uint16_t		cmd_op_mod;
1790 	uint32_t		cmd_sqn;
1791 	uint8_t			cmd_reserved1[4];
1792 } __packed __aligned(4);
1793 
1794 struct mcx_cmd_destroy_sq_out {
1795 	uint8_t			cmd_status;
1796 	uint8_t			cmd_reserved0[3];
1797 	uint32_t		cmd_syndrome;
1798 	uint8_t			cmd_reserved1[8];
1799 } __packed __aligned(4);
1800 
1801 
1802 struct mcx_rq_ctx {
1803 	uint32_t		rq_flags;
1804 #define MCX_RQ_CTX_RLKEY			(1U << 31)
1805 #define MCX_RQ_CTX_VLAN_STRIP_DIS		(1 << 28)
1806 #define MCX_RQ_CTX_MEM_RQ_TYPE_SHIFT		24
1807 #define MCX_RQ_CTX_STATE_SHIFT			20
1808 #define MCX_RQ_CTX_STATE_MASK			(0xf << 20)
1809 #define MCX_RQ_CTX_STATE_RST			0
1810 #define MCX_RQ_CTX_STATE_RDY			1
1811 #define MCX_RQ_CTX_STATE_ERR			3
1812 #define MCX_RQ_CTX_FLUSH_IN_ERROR		(1 << 18)
1813 	uint32_t		rq_user_index;
1814 	uint32_t		rq_cqn;
1815 	uint32_t		rq_reserved1;
1816 	uint32_t		rq_rmpn;
1817 	uint32_t		rq_reserved2[7];
1818 	struct mcx_wq_ctx	rq_wq;
1819 } __packed __aligned(4);
1820 
1821 struct mcx_rq_entry {
1822 	uint32_t		rqe_byte_count;
1823 	uint32_t		rqe_lkey;
1824 	uint64_t		rqe_addr;
1825 } __packed __aligned(16);
1826 
1827 struct mcx_cmd_create_rq_in {
1828 	uint16_t		cmd_opcode;
1829 	uint8_t			cmd_reserved0[4];
1830 	uint16_t		cmd_op_mod;
1831 	uint8_t			cmd_reserved1[8];
1832 } __packed __aligned(4);
1833 
1834 struct mcx_cmd_create_rq_out {
1835 	uint8_t			cmd_status;
1836 	uint8_t			cmd_reserved0[3];
1837 	uint32_t		cmd_syndrome;
1838 	uint32_t		cmd_rqn;
1839 	uint8_t			cmd_reserved1[4];
1840 } __packed __aligned(4);
1841 
1842 struct mcx_cmd_modify_rq_in {
1843 	uint16_t		cmd_opcode;
1844 	uint8_t			cmd_reserved0[4];
1845 	uint16_t		cmd_op_mod;
1846 	uint32_t		cmd_rq_state;
1847 	uint8_t			cmd_reserved1[4];
1848 } __packed __aligned(4);
1849 
1850 struct mcx_cmd_modify_rq_mb_in {
1851 	uint32_t		cmd_modify_hi;
1852 	uint32_t		cmd_modify_lo;
1853 	uint8_t			cmd_reserved0[8];
1854 	struct mcx_rq_ctx	cmd_rq_ctx;
1855 } __packed __aligned(4);
1856 
1857 struct mcx_cmd_modify_rq_out {
1858 	uint8_t			cmd_status;
1859 	uint8_t			cmd_reserved0[3];
1860 	uint32_t		cmd_syndrome;
1861 	uint8_t			cmd_reserved1[8];
1862 } __packed __aligned(4);
1863 
1864 struct mcx_cmd_destroy_rq_in {
1865 	uint16_t		cmd_opcode;
1866 	uint8_t			cmd_reserved0[4];
1867 	uint16_t		cmd_op_mod;
1868 	uint32_t		cmd_rqn;
1869 	uint8_t			cmd_reserved1[4];
1870 } __packed __aligned(4);
1871 
1872 struct mcx_cmd_destroy_rq_out {
1873 	uint8_t			cmd_status;
1874 	uint8_t			cmd_reserved0[3];
1875 	uint32_t		cmd_syndrome;
1876 	uint8_t			cmd_reserved1[8];
1877 } __packed __aligned(4);
1878 
1879 struct mcx_cmd_create_flow_table_in {
1880 	uint16_t		cmd_opcode;
1881 	uint8_t			cmd_reserved0[4];
1882 	uint16_t		cmd_op_mod;
1883 	uint8_t			cmd_reserved1[8];
1884 } __packed __aligned(4);
1885 
1886 struct mcx_flow_table_ctx {
1887 	uint8_t			ft_miss_action;
1888 	uint8_t			ft_level;
1889 	uint8_t			ft_reserved0;
1890 	uint8_t			ft_log_size;
1891 	uint32_t		ft_table_miss_id;
1892 	uint8_t			ft_reserved1[28];
1893 } __packed __aligned(4);
1894 
1895 struct mcx_cmd_create_flow_table_mb_in {
1896 	uint8_t			cmd_table_type;
1897 	uint8_t			cmd_reserved0[7];
1898 	struct mcx_flow_table_ctx cmd_ctx;
1899 } __packed __aligned(4);
1900 
1901 struct mcx_cmd_create_flow_table_out {
1902 	uint8_t			cmd_status;
1903 	uint8_t			cmd_reserved0[3];
1904 	uint32_t		cmd_syndrome;
1905 	uint32_t		cmd_table_id;
1906 	uint8_t			cmd_reserved1[4];
1907 } __packed __aligned(4);
1908 
1909 struct mcx_cmd_destroy_flow_table_in {
1910 	uint16_t		cmd_opcode;
1911 	uint8_t			cmd_reserved0[4];
1912 	uint16_t		cmd_op_mod;
1913 	uint8_t			cmd_reserved1[8];
1914 } __packed __aligned(4);
1915 
1916 struct mcx_cmd_destroy_flow_table_mb_in {
1917 	uint8_t			cmd_table_type;
1918 	uint8_t			cmd_reserved0[3];
1919 	uint32_t		cmd_table_id;
1920 	uint8_t			cmd_reserved1[40];
1921 } __packed __aligned(4);
1922 
1923 struct mcx_cmd_destroy_flow_table_out {
1924 	uint8_t			cmd_status;
1925 	uint8_t			cmd_reserved0[3];
1926 	uint32_t		cmd_syndrome;
1927 	uint8_t			cmd_reserved1[8];
1928 } __packed __aligned(4);
1929 
1930 struct mcx_cmd_set_flow_table_root_in {
1931 	uint16_t		cmd_opcode;
1932 	uint8_t			cmd_reserved0[4];
1933 	uint16_t		cmd_op_mod;
1934 	uint8_t			cmd_reserved1[8];
1935 } __packed __aligned(4);
1936 
1937 struct mcx_cmd_set_flow_table_root_mb_in {
1938 	uint8_t			cmd_table_type;
1939 	uint8_t			cmd_reserved0[3];
1940 	uint32_t		cmd_table_id;
1941 	uint8_t			cmd_reserved1[56];
1942 } __packed __aligned(4);
1943 
1944 struct mcx_cmd_set_flow_table_root_out {
1945 	uint8_t			cmd_status;
1946 	uint8_t			cmd_reserved0[3];
1947 	uint32_t		cmd_syndrome;
1948 	uint8_t			cmd_reserved1[8];
1949 } __packed __aligned(4);
1950 
1951 struct mcx_flow_match {
1952 	/* outer headers */
1953 	uint8_t			mc_src_mac[6];
1954 	uint16_t		mc_ethertype;
1955 	uint8_t			mc_dest_mac[6];
1956 	uint16_t		mc_first_vlan;
1957 	uint8_t			mc_ip_proto;
1958 	uint8_t			mc_ip_dscp_ecn;
1959 	uint8_t			mc_vlan_flags;
1960 #define MCX_FLOW_MATCH_IP_FRAG	(1 << 5)
1961 	uint8_t			mc_tcp_flags;
1962 	uint16_t		mc_tcp_sport;
1963 	uint16_t		mc_tcp_dport;
1964 	uint32_t		mc_reserved0;
1965 	uint16_t		mc_udp_sport;
1966 	uint16_t		mc_udp_dport;
1967 	uint8_t			mc_src_ip[16];
1968 	uint8_t			mc_dest_ip[16];
1969 
1970 	/* misc parameters */
1971 	uint8_t			mc_reserved1[8];
1972 	uint16_t		mc_second_vlan;
1973 	uint8_t			mc_reserved2[2];
1974 	uint8_t			mc_second_vlan_flags;
1975 	uint8_t			mc_reserved3[15];
1976 	uint32_t		mc_outer_ipv6_flow_label;
1977 	uint8_t			mc_reserved4[32];
1978 
1979 	uint8_t			mc_reserved[384];
1980 } __packed __aligned(4);
1981 
1982 CTASSERT(sizeof(struct mcx_flow_match) == 512);
1983 
1984 struct mcx_cmd_create_flow_group_in {
1985 	uint16_t		cmd_opcode;
1986 	uint8_t			cmd_reserved0[4];
1987 	uint16_t		cmd_op_mod;
1988 	uint8_t			cmd_reserved1[8];
1989 } __packed __aligned(4);
1990 
1991 struct mcx_cmd_create_flow_group_mb_in {
1992 	uint8_t			cmd_table_type;
1993 	uint8_t			cmd_reserved0[3];
1994 	uint32_t		cmd_table_id;
1995 	uint8_t			cmd_reserved1[4];
1996 	uint32_t		cmd_start_flow_index;
1997 	uint8_t			cmd_reserved2[4];
1998 	uint32_t		cmd_end_flow_index;
1999 	uint8_t			cmd_reserved3[23];
2000 	uint8_t			cmd_match_criteria_enable;
2001 #define MCX_CREATE_FLOW_GROUP_CRIT_OUTER	(1 << 0)
2002 #define MCX_CREATE_FLOW_GROUP_CRIT_MISC		(1 << 1)
2003 #define MCX_CREATE_FLOW_GROUP_CRIT_INNER	(1 << 2)
2004 	struct mcx_flow_match	cmd_match_criteria;
2005 	uint8_t			cmd_reserved4[448];
2006 } __packed __aligned(4);
2007 
2008 struct mcx_cmd_create_flow_group_out {
2009 	uint8_t			cmd_status;
2010 	uint8_t			cmd_reserved0[3];
2011 	uint32_t		cmd_syndrome;
2012 	uint32_t		cmd_group_id;
2013 	uint8_t			cmd_reserved1[4];
2014 } __packed __aligned(4);
2015 
2016 struct mcx_flow_ctx {
2017 	uint8_t			fc_reserved0[4];
2018 	uint32_t		fc_group_id;
2019 	uint32_t		fc_flow_tag;
2020 	uint32_t		fc_action;
2021 #define MCX_FLOW_CONTEXT_ACTION_ALLOW		(1 << 0)
2022 #define MCX_FLOW_CONTEXT_ACTION_DROP		(1 << 1)
2023 #define MCX_FLOW_CONTEXT_ACTION_FORWARD		(1 << 2)
2024 #define MCX_FLOW_CONTEXT_ACTION_COUNT		(1 << 3)
2025 	uint32_t		fc_dest_list_size;
2026 	uint32_t		fc_counter_list_size;
2027 	uint8_t			fc_reserved1[40];
2028 	struct mcx_flow_match	fc_match_value;
2029 	uint8_t			fc_reserved2[192];
2030 } __packed __aligned(4);
2031 
2032 #define MCX_FLOW_CONTEXT_DEST_TYPE_TABLE	(1 << 24)
2033 #define MCX_FLOW_CONTEXT_DEST_TYPE_TIR		(2 << 24)
2034 
2035 struct mcx_cmd_destroy_flow_group_in {
2036 	uint16_t		cmd_opcode;
2037 	uint8_t			cmd_reserved0[4];
2038 	uint16_t		cmd_op_mod;
2039 	uint8_t			cmd_reserved1[8];
2040 } __packed __aligned(4);
2041 
2042 struct mcx_cmd_destroy_flow_group_mb_in {
2043 	uint8_t			cmd_table_type;
2044 	uint8_t			cmd_reserved0[3];
2045 	uint32_t		cmd_table_id;
2046 	uint32_t		cmd_group_id;
2047 	uint8_t			cmd_reserved1[36];
2048 } __packed __aligned(4);
2049 
2050 struct mcx_cmd_destroy_flow_group_out {
2051 	uint8_t			cmd_status;
2052 	uint8_t			cmd_reserved0[3];
2053 	uint32_t		cmd_syndrome;
2054 	uint8_t			cmd_reserved1[8];
2055 } __packed __aligned(4);
2056 
2057 struct mcx_cmd_set_flow_table_entry_in {
2058 	uint16_t		cmd_opcode;
2059 	uint8_t			cmd_reserved0[4];
2060 	uint16_t		cmd_op_mod;
2061 	uint8_t			cmd_reserved1[8];
2062 } __packed __aligned(4);
2063 
2064 struct mcx_cmd_set_flow_table_entry_mb_in {
2065 	uint8_t			cmd_table_type;
2066 	uint8_t			cmd_reserved0[3];
2067 	uint32_t		cmd_table_id;
2068 	uint32_t		cmd_modify_enable_mask;
2069 	uint8_t			cmd_reserved1[4];
2070 	uint32_t		cmd_flow_index;
2071 	uint8_t			cmd_reserved2[28];
2072 	struct mcx_flow_ctx	cmd_flow_ctx;
2073 } __packed __aligned(4);
2074 
2075 struct mcx_cmd_set_flow_table_entry_out {
2076 	uint8_t			cmd_status;
2077 	uint8_t			cmd_reserved0[3];
2078 	uint32_t		cmd_syndrome;
2079 	uint8_t			cmd_reserved1[8];
2080 } __packed __aligned(4);
2081 
2082 struct mcx_cmd_query_flow_table_entry_in {
2083 	uint16_t		cmd_opcode;
2084 	uint8_t			cmd_reserved0[4];
2085 	uint16_t		cmd_op_mod;
2086 	uint8_t			cmd_reserved1[8];
2087 } __packed __aligned(4);
2088 
2089 struct mcx_cmd_query_flow_table_entry_mb_in {
2090 	uint8_t			cmd_table_type;
2091 	uint8_t			cmd_reserved0[3];
2092 	uint32_t		cmd_table_id;
2093 	uint8_t			cmd_reserved1[8];
2094 	uint32_t		cmd_flow_index;
2095 	uint8_t			cmd_reserved2[28];
2096 } __packed __aligned(4);
2097 
2098 struct mcx_cmd_query_flow_table_entry_out {
2099 	uint8_t			cmd_status;
2100 	uint8_t			cmd_reserved0[3];
2101 	uint32_t		cmd_syndrome;
2102 	uint8_t			cmd_reserved1[8];
2103 } __packed __aligned(4);
2104 
2105 struct mcx_cmd_query_flow_table_entry_mb_out {
2106 	uint8_t			cmd_reserved0[48];
2107 	struct mcx_flow_ctx	cmd_flow_ctx;
2108 } __packed __aligned(4);
2109 
2110 struct mcx_cmd_delete_flow_table_entry_in {
2111 	uint16_t		cmd_opcode;
2112 	uint8_t			cmd_reserved0[4];
2113 	uint16_t		cmd_op_mod;
2114 	uint8_t			cmd_reserved1[8];
2115 } __packed __aligned(4);
2116 
2117 struct mcx_cmd_delete_flow_table_entry_mb_in {
2118 	uint8_t			cmd_table_type;
2119 	uint8_t			cmd_reserved0[3];
2120 	uint32_t		cmd_table_id;
2121 	uint8_t			cmd_reserved1[8];
2122 	uint32_t		cmd_flow_index;
2123 	uint8_t			cmd_reserved2[28];
2124 } __packed __aligned(4);
2125 
2126 struct mcx_cmd_delete_flow_table_entry_out {
2127 	uint8_t			cmd_status;
2128 	uint8_t			cmd_reserved0[3];
2129 	uint32_t		cmd_syndrome;
2130 	uint8_t			cmd_reserved1[8];
2131 } __packed __aligned(4);
2132 
2133 struct mcx_cmd_query_flow_group_in {
2134 	uint16_t		cmd_opcode;
2135 	uint8_t			cmd_reserved0[4];
2136 	uint16_t		cmd_op_mod;
2137 	uint8_t			cmd_reserved1[8];
2138 } __packed __aligned(4);
2139 
2140 struct mcx_cmd_query_flow_group_mb_in {
2141 	uint8_t			cmd_table_type;
2142 	uint8_t			cmd_reserved0[3];
2143 	uint32_t		cmd_table_id;
2144 	uint32_t		cmd_group_id;
2145 	uint8_t			cmd_reserved1[36];
2146 } __packed __aligned(4);
2147 
2148 struct mcx_cmd_query_flow_group_out {
2149 	uint8_t			cmd_status;
2150 	uint8_t			cmd_reserved0[3];
2151 	uint32_t		cmd_syndrome;
2152 	uint8_t			cmd_reserved1[8];
2153 } __packed __aligned(4);
2154 
2155 struct mcx_cmd_query_flow_group_mb_out {
2156 	uint8_t			cmd_reserved0[12];
2157 	uint32_t		cmd_start_flow_index;
2158 	uint8_t			cmd_reserved1[4];
2159 	uint32_t		cmd_end_flow_index;
2160 	uint8_t			cmd_reserved2[20];
2161 	uint32_t		cmd_match_criteria_enable;
2162 	uint8_t			cmd_match_criteria[512];
2163 	uint8_t			cmd_reserved4[448];
2164 } __packed __aligned(4);
2165 
2166 struct mcx_cmd_query_flow_table_in {
2167 	uint16_t		cmd_opcode;
2168 	uint8_t			cmd_reserved0[4];
2169 	uint16_t		cmd_op_mod;
2170 	uint8_t			cmd_reserved1[8];
2171 } __packed __aligned(4);
2172 
2173 struct mcx_cmd_query_flow_table_mb_in {
2174 	uint8_t			cmd_table_type;
2175 	uint8_t			cmd_reserved0[3];
2176 	uint32_t		cmd_table_id;
2177 	uint8_t			cmd_reserved1[40];
2178 } __packed __aligned(4);
2179 
2180 struct mcx_cmd_query_flow_table_out {
2181 	uint8_t			cmd_status;
2182 	uint8_t			cmd_reserved0[3];
2183 	uint32_t		cmd_syndrome;
2184 	uint8_t			cmd_reserved1[8];
2185 } __packed __aligned(4);
2186 
2187 struct mcx_cmd_query_flow_table_mb_out {
2188 	uint8_t			cmd_reserved0[4];
2189 	struct mcx_flow_table_ctx cmd_ctx;
2190 } __packed __aligned(4);
2191 
2192 struct mcx_cmd_alloc_flow_counter_in {
2193 	uint16_t		cmd_opcode;
2194 	uint8_t			cmd_reserved0[4];
2195 	uint16_t		cmd_op_mod;
2196 	uint8_t			cmd_reserved1[8];
2197 } __packed __aligned(4);
2198 
2199 struct mcx_cmd_query_rq_in {
2200 	uint16_t		cmd_opcode;
2201 	uint8_t			cmd_reserved0[4];
2202 	uint16_t		cmd_op_mod;
2203 	uint32_t		cmd_rqn;
2204 	uint8_t			cmd_reserved1[4];
2205 } __packed __aligned(4);
2206 
2207 struct mcx_cmd_query_rq_out {
2208 	uint8_t			cmd_status;
2209 	uint8_t			cmd_reserved0[3];
2210 	uint32_t		cmd_syndrome;
2211 	uint8_t			cmd_reserved1[8];
2212 } __packed __aligned(4);
2213 
2214 struct mcx_cmd_query_rq_mb_out {
2215 	uint8_t			cmd_reserved0[16];
2216 	struct mcx_rq_ctx	cmd_ctx;
2217 };
2218 
2219 struct mcx_cmd_query_sq_in {
2220 	uint16_t		cmd_opcode;
2221 	uint8_t			cmd_reserved0[4];
2222 	uint16_t		cmd_op_mod;
2223 	uint32_t		cmd_sqn;
2224 	uint8_t			cmd_reserved1[4];
2225 } __packed __aligned(4);
2226 
2227 struct mcx_cmd_query_sq_out {
2228 	uint8_t			cmd_status;
2229 	uint8_t			cmd_reserved0[3];
2230 	uint32_t		cmd_syndrome;
2231 	uint8_t			cmd_reserved1[8];
2232 } __packed __aligned(4);
2233 
2234 struct mcx_cmd_query_sq_mb_out {
2235 	uint8_t			cmd_reserved0[16];
2236 	struct mcx_sq_ctx	cmd_ctx;
2237 };
2238 
2239 struct mcx_cmd_alloc_flow_counter_out {
2240 	uint8_t			cmd_status;
2241 	uint8_t			cmd_reserved0[3];
2242 	uint32_t		cmd_syndrome;
2243 	uint8_t			cmd_reserved1[2];
2244 	uint16_t		cmd_flow_counter_id;
2245 	uint8_t			cmd_reserved2[4];
2246 } __packed __aligned(4);
2247 
2248 struct mcx_wq_doorbell {
2249 	uint32_t		 db_recv_counter;
2250 	uint32_t		 db_send_counter;
2251 } __packed __aligned(8);
2252 
2253 struct mcx_dmamem {
2254 	bus_dmamap_t		 mxm_map;
2255 	bus_dma_segment_t	 mxm_seg;
2256 	int			 mxm_nsegs;
2257 	size_t			 mxm_size;
2258 	caddr_t			 mxm_kva;
2259 };
2260 #define MCX_DMA_MAP(_mxm)	((_mxm)->mxm_map)
2261 #define MCX_DMA_DVA(_mxm)	((_mxm)->mxm_map->dm_segs[0].ds_addr)
2262 #define MCX_DMA_KVA(_mxm)	((void *)(_mxm)->mxm_kva)
2263 #define MCX_DMA_OFF(_mxm, _off)	((void *)((_mxm)->mxm_kva + (_off)))
2264 #define MCX_DMA_LEN(_mxm)	((_mxm)->mxm_size)
2265 
2266 struct mcx_hwmem {
2267 	bus_dmamap_t		 mhm_map;
2268 	bus_dma_segment_t	*mhm_segs;
2269 	unsigned int		 mhm_seg_count;
2270 	unsigned int		 mhm_npages;
2271 };
2272 
2273 struct mcx_slot {
2274 	bus_dmamap_t		 ms_map;
2275 	struct mbuf		*ms_m;
2276 };
2277 
2278 struct mcx_eq {
2279 	int			 eq_n;
2280 	uint32_t		 eq_cons;
2281 	struct mcx_dmamem	 eq_mem;
2282 };
2283 
2284 struct mcx_cq {
2285 	int			 cq_n;
2286 	struct mcx_dmamem	 cq_mem;
2287 	bus_addr_t		 cq_doorbell;
2288 	uint32_t		 cq_cons;
2289 	uint32_t		 cq_count;
2290 };
2291 
2292 struct mcx_calibration {
2293 	uint64_t		 c_timestamp;	/* previous mcx chip time */
2294 	uint64_t		 c_uptime;	/* previous kernel nanouptime */
2295 	uint64_t		 c_tbase;	/* mcx chip time */
2296 	uint64_t		 c_ubase;	/* kernel nanouptime */
2297 	uint64_t		 c_ratio;
2298 };
2299 
2300 #define MCX_CALIBRATE_FIRST    2
2301 #define MCX_CALIBRATE_NORMAL   32
2302 
2303 struct mcx_rx {
2304 	struct mcx_softc	*rx_softc;
2305 	struct ifiqueue		*rx_ifiq;
2306 
2307 	int			 rx_rqn;
2308 	struct mcx_dmamem	 rx_rq_mem;
2309 	struct mcx_slot		*rx_slots;
2310 	bus_addr_t		 rx_doorbell;
2311 
2312 	uint32_t		 rx_prod;
2313 	struct timeout		 rx_refill;
2314 	struct if_rxring	 rx_rxr;
2315 } __aligned(64);
2316 
2317 struct mcx_tx {
2318 	struct mcx_softc	*tx_softc;
2319 	struct ifqueue		*tx_ifq;
2320 
2321 	int			 tx_uar;
2322 	int			 tx_sqn;
2323 	struct mcx_dmamem	 tx_sq_mem;
2324 	struct mcx_slot		*tx_slots;
2325 	bus_addr_t		 tx_doorbell;
2326 	int			 tx_bf_offset;
2327 
2328 	uint32_t		 tx_cons;
2329 	uint32_t		 tx_prod;
2330 } __aligned(64);
2331 
2332 struct mcx_queues {
2333 	char			 q_name[16];
2334 	void			*q_ihc;
2335 	struct mcx_softc	*q_sc;
2336 	int			 q_uar;
2337 	int			 q_index;
2338 	struct mcx_rx		 q_rx;
2339 	struct mcx_tx		 q_tx;
2340 	struct mcx_cq		 q_cq;
2341 	struct mcx_eq		 q_eq;
2342 #if NKSTAT > 0
2343 	struct kstat		*q_kstat;
2344 #endif
2345 };
2346 
2347 struct mcx_flow_group {
2348 	int			 g_id;
2349 	int			 g_table;
2350 	int			 g_start;
2351 	int			 g_size;
2352 };
2353 
2354 #define MCX_FLOW_GROUP_PROMISC	 0
2355 #define MCX_FLOW_GROUP_ALLMULTI	 1
2356 #define MCX_FLOW_GROUP_MAC	 2
2357 #define MCX_FLOW_GROUP_RSS_L4	 3
2358 #define MCX_FLOW_GROUP_RSS_L3	 4
2359 #define MCX_FLOW_GROUP_RSS_NONE	 5
2360 #define MCX_NUM_FLOW_GROUPS	 6
2361 
2362 #define MCX_HASH_SEL_L3		MCX_TIR_CTX_HASH_SEL_SRC_IP | \
2363 				MCX_TIR_CTX_HASH_SEL_DST_IP
2364 #define MCX_HASH_SEL_L4		MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_SPORT | \
2365 				MCX_TIR_CTX_HASH_SEL_DPORT
2366 
2367 #define MCX_RSS_HASH_SEL_V4_TCP MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP  |\
2368 				MCX_TIR_CTX_HASH_SEL_IPV4
2369 #define MCX_RSS_HASH_SEL_V6_TCP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_TCP | \
2370 				MCX_TIR_CTX_HASH_SEL_IPV6
2371 #define MCX_RSS_HASH_SEL_V4_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2372 				MCX_TIR_CTX_HASH_SEL_IPV4
2373 #define MCX_RSS_HASH_SEL_V6_UDP	MCX_HASH_SEL_L4 | MCX_TIR_CTX_HASH_SEL_UDP | \
2374 				MCX_TIR_CTX_HASH_SEL_IPV6
2375 #define MCX_RSS_HASH_SEL_V4	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV4
2376 #define MCX_RSS_HASH_SEL_V6	MCX_HASH_SEL_L3 | MCX_TIR_CTX_HASH_SEL_IPV6
2377 
2378 /*
2379  * There are a few different pieces involved in configuring RSS.
2380  * A Receive Queue Table (RQT) is the indirection table that maps packets to
2381  * different rx queues based on a hash value.  We only create one, because
2382  * we want to scatter any traffic we can apply RSS to across all our rx
2383  * queues.  Anything else will only be delivered to the first rx queue,
2384  * which doesn't require an RQT.
2385  *
2386  * A Transport Interface Receive (TIR) delivers packets to either a single rx
2387  * queue or an RQT, and in the latter case, specifies the set of fields
2388  * hashed, the hash function, and the hash key.  We need one of these for each
2389  * type of RSS traffic - v4 TCP, v6 TCP, v4 UDP, v6 UDP, other v4, other v6,
2390  * and one for non-RSS traffic.
2391  *
2392  * Flow tables hold flow table entries in sequence.  The first entry that
2393  * matches a packet is applied, sending the packet to either another flow
2394  * table or a TIR.  We use one flow table to select packets based on
2395  * destination MAC address, and a second to apply RSS.  The entries in the
2396  * first table send matching packets to the second, and the entries in the
2397  * RSS table send packets to RSS TIRs if possible, or the non-RSS TIR.
2398  *
2399  * The flow table entry that delivers packets to an RSS TIR must include match
2400  * criteria that ensure packets delivered to the TIR include all the fields
2401  * that the TIR hashes on - so for a v4 TCP TIR, the flow table entry must
2402  * only accept v4 TCP packets.  Accordingly, we need flow table entries for
2403  * each TIR.
2404  *
2405  * All of this is a lot more flexible than we need, and we can describe most
2406  * of the stuff we need with a simple array.
2407  *
2408  * An RSS config creates a TIR with hashing enabled on a set of fields,
2409  * pointing to either the first rx queue or the RQT containing all the rx
2410  * queues, and a flow table entry that matches on an ether type and
2411  * optionally an ip proto, that delivers packets to the TIR.
2412  */
2413 static struct mcx_rss_rule {
2414 	int			hash_sel;
2415 	int			flow_group;
2416 	int			ethertype;
2417 	int			ip_proto;
2418 } mcx_rss_config[] = {
2419 	/* udp and tcp for v4/v6 */
2420 	{ MCX_RSS_HASH_SEL_V4_TCP, MCX_FLOW_GROUP_RSS_L4,
2421 	  ETHERTYPE_IP, IPPROTO_TCP },
2422 	{ MCX_RSS_HASH_SEL_V6_TCP, MCX_FLOW_GROUP_RSS_L4,
2423 	  ETHERTYPE_IPV6, IPPROTO_TCP },
2424 	{ MCX_RSS_HASH_SEL_V4_UDP, MCX_FLOW_GROUP_RSS_L4,
2425 	  ETHERTYPE_IP, IPPROTO_UDP },
2426 	{ MCX_RSS_HASH_SEL_V6_UDP, MCX_FLOW_GROUP_RSS_L4,
2427 	  ETHERTYPE_IPV6, IPPROTO_UDP },
2428 
2429 	/* other v4/v6 */
2430 	{ MCX_RSS_HASH_SEL_V4, MCX_FLOW_GROUP_RSS_L3,
2431 	  ETHERTYPE_IP, 0 },
2432 	{ MCX_RSS_HASH_SEL_V6, MCX_FLOW_GROUP_RSS_L3,
2433 	  ETHERTYPE_IPV6, 0 },
2434 
2435 	/* non v4/v6 */
2436 	{ 0, MCX_FLOW_GROUP_RSS_NONE, 0, 0 }
2437 };
2438 
2439 struct mcx_softc {
2440 	struct device		 sc_dev;
2441 	struct arpcom		 sc_ac;
2442 	struct ifmedia		 sc_media;
2443 	uint64_t		 sc_media_status;
2444 	uint64_t		 sc_media_active;
2445 
2446 	pci_chipset_tag_t	 sc_pc;
2447 	pci_intr_handle_t	 sc_ih;
2448 	void			*sc_ihc;
2449 	pcitag_t		 sc_tag;
2450 
2451 	bus_dma_tag_t		 sc_dmat;
2452 	bus_space_tag_t		 sc_memt;
2453 	bus_space_handle_t	 sc_memh;
2454 	bus_size_t		 sc_mems;
2455 
2456 	struct mcx_dmamem	 sc_cmdq_mem;
2457 	unsigned int		 sc_cmdq_mask;
2458 	unsigned int		 sc_cmdq_size;
2459 
2460 	unsigned int		 sc_cmdq_token;
2461 	struct mutex		 sc_cmdq_mtx;
2462 	struct rwlock		 sc_cmdq_kstat_lk;
2463 	struct rwlock		 sc_cmdq_ioctl_lk;
2464 
2465 	struct mcx_hwmem	 sc_boot_pages;
2466 	struct mcx_hwmem	 sc_init_pages;
2467 	struct mcx_hwmem	 sc_regular_pages;
2468 
2469 	int			 sc_uar;
2470 	int			 sc_pd;
2471 	int			 sc_tdomain;
2472 	uint32_t		 sc_lkey;
2473 	int			 sc_tis;
2474 	int			 sc_tir[nitems(mcx_rss_config)];
2475 	int			 sc_rqt;
2476 
2477 	struct mcx_dmamem	 sc_doorbell_mem;
2478 
2479 	struct mcx_eq		 sc_admin_eq;
2480 	struct mcx_eq		 sc_queue_eq;
2481 
2482 	int			 sc_hardmtu;
2483 	int			 sc_rxbufsz;
2484 
2485 	int			 sc_bf_size;
2486 	int			 sc_max_rqt_size;
2487 
2488 	struct task		 sc_port_change;
2489 
2490 	int			 sc_mac_flow_table_id;
2491 	int			 sc_rss_flow_table_id;
2492 	struct mcx_flow_group	 sc_flow_group[MCX_NUM_FLOW_GROUPS];
2493 	int			 sc_promisc_flow_enabled;
2494 	int			 sc_allmulti_flow_enabled;
2495 	int			 sc_mcast_flow_base;
2496 	int			 sc_extra_mcast;
2497 	uint8_t			 sc_mcast_flows[MCX_NUM_MCAST_FLOWS][ETHER_ADDR_LEN];
2498 
2499 	struct mcx_calibration	 sc_calibration[2];
2500 	unsigned int		 sc_calibration_gen;
2501 	struct timeout		 sc_calibrate;
2502 	uint32_t		 sc_mhz;
2503 	uint32_t		 sc_khz;
2504 
2505 	struct intrmap		*sc_intrmap;
2506 	struct mcx_queues	*sc_queues;
2507 
2508 	int			 sc_mcam_reg;
2509 
2510 #if NKSTAT > 0
2511 	struct kstat		*sc_kstat_ieee8023;
2512 	struct kstat		*sc_kstat_rfc2863;
2513 	struct kstat		*sc_kstat_rfc2819;
2514 	struct kstat		*sc_kstat_rfc3635;
2515 	unsigned int		 sc_kstat_mtmp_count;
2516 	struct kstat		**sc_kstat_mtmp;
2517 #endif
2518 
2519 	struct timecounter	 sc_timecounter;
2520 };
2521 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
2522 
2523 static int	mcx_match(struct device *, void *, void *);
2524 static void	mcx_attach(struct device *, struct device *, void *);
2525 
2526 #if NKSTAT > 0
2527 static void	mcx_kstat_attach(struct mcx_softc *);
2528 #endif
2529 
2530 static void	mcx_timecounter_attach(struct mcx_softc *);
2531 
2532 static int	mcx_version(struct mcx_softc *);
2533 static int	mcx_init_wait(struct mcx_softc *);
2534 static int	mcx_enable_hca(struct mcx_softc *);
2535 static int	mcx_teardown_hca(struct mcx_softc *, uint16_t);
2536 static int	mcx_access_hca_reg(struct mcx_softc *, uint16_t, int, void *,
2537 		    int, enum mcx_cmdq_slot);
2538 static int	mcx_issi(struct mcx_softc *);
2539 static int	mcx_pages(struct mcx_softc *, struct mcx_hwmem *, uint16_t);
2540 static int	mcx_hca_max_caps(struct mcx_softc *);
2541 static int	mcx_hca_set_caps(struct mcx_softc *);
2542 static int	mcx_init_hca(struct mcx_softc *);
2543 static int	mcx_set_driver_version(struct mcx_softc *);
2544 static int	mcx_iff(struct mcx_softc *);
2545 static int	mcx_alloc_uar(struct mcx_softc *, int *);
2546 static int	mcx_alloc_pd(struct mcx_softc *);
2547 static int	mcx_alloc_tdomain(struct mcx_softc *);
2548 static int	mcx_create_eq(struct mcx_softc *, struct mcx_eq *, int,
2549 		    uint64_t, int);
2550 static int	mcx_query_nic_vport_context(struct mcx_softc *);
2551 static int	mcx_query_special_contexts(struct mcx_softc *);
2552 static int	mcx_set_port_mtu(struct mcx_softc *, int);
2553 static int	mcx_create_cq(struct mcx_softc *, struct mcx_cq *, int, int,
2554 		    int);
2555 static int	mcx_destroy_cq(struct mcx_softc *, struct mcx_cq *);
2556 static int	mcx_create_sq(struct mcx_softc *, struct mcx_tx *, int, int,
2557 		    int);
2558 static int	mcx_destroy_sq(struct mcx_softc *, struct mcx_tx *);
2559 static int	mcx_ready_sq(struct mcx_softc *, struct mcx_tx *);
2560 static int	mcx_create_rq(struct mcx_softc *, struct mcx_rx *, int, int);
2561 static int	mcx_destroy_rq(struct mcx_softc *, struct mcx_rx *);
2562 static int	mcx_ready_rq(struct mcx_softc *, struct mcx_rx *);
2563 static int	mcx_create_tir_direct(struct mcx_softc *, struct mcx_rx *,
2564 		    int *);
2565 static int	mcx_create_tir_indirect(struct mcx_softc *, int, uint32_t,
2566 		    int *);
2567 static int	mcx_destroy_tir(struct mcx_softc *, int);
2568 static int	mcx_create_tis(struct mcx_softc *, int *);
2569 static int	mcx_destroy_tis(struct mcx_softc *, int);
2570 static int	mcx_create_rqt(struct mcx_softc *, int, int *, int *);
2571 static int	mcx_destroy_rqt(struct mcx_softc *, int);
2572 static int	mcx_create_flow_table(struct mcx_softc *, int, int, int *);
2573 static int	mcx_set_flow_table_root(struct mcx_softc *, int);
2574 static int	mcx_destroy_flow_table(struct mcx_softc *, int);
2575 static int	mcx_create_flow_group(struct mcx_softc *, int, int, int,
2576 		    int, int, struct mcx_flow_match *);
2577 static int	mcx_destroy_flow_group(struct mcx_softc *, int);
2578 static int	mcx_set_flow_table_entry_mac(struct mcx_softc *, int, int,
2579 		    uint8_t *, uint32_t);
2580 static int	mcx_set_flow_table_entry_proto(struct mcx_softc *, int, int,
2581 		    int, int, uint32_t);
2582 static int	mcx_delete_flow_table_entry(struct mcx_softc *, int, int);
2583 
2584 #if NKSTAT > 0
2585 static int	mcx_query_rq(struct mcx_softc *, struct mcx_rx *, struct mcx_rq_ctx *);
2586 static int	mcx_query_sq(struct mcx_softc *, struct mcx_tx *, struct mcx_sq_ctx *);
2587 static int	mcx_query_cq(struct mcx_softc *, struct mcx_cq *, struct mcx_cq_ctx *);
2588 static int	mcx_query_eq(struct mcx_softc *, struct mcx_eq *, struct mcx_eq_ctx *);
2589 #endif
2590 
2591 #if 0
2592 static int	mcx_dump_flow_table(struct mcx_softc *, int);
2593 static int	mcx_dump_flow_table_entry(struct mcx_softc *, int, int);
2594 static int	mcx_dump_flow_group(struct mcx_softc *, int);
2595 #endif
2596 
2597 
2598 /*
2599 static void	mcx_cmdq_dump(const struct mcx_cmdq_entry *);
2600 static void	mcx_cmdq_mbox_dump(struct mcx_dmamem *, int);
2601 */
2602 static void	mcx_refill(void *);
2603 static int	mcx_process_rx(struct mcx_softc *, struct mcx_rx *,
2604 		    struct mcx_cq_entry *, struct mbuf_list *,
2605 		    const struct mcx_calibration *);
2606 static int	mcx_process_txeof(struct mcx_softc *, struct mcx_tx *,
2607 		    struct mcx_cq_entry *);
2608 static void	mcx_process_cq(struct mcx_softc *, struct mcx_queues *,
2609 		    struct mcx_cq *);
2610 
2611 static void	mcx_arm_cq(struct mcx_softc *, struct mcx_cq *, int);
2612 static void	mcx_arm_eq(struct mcx_softc *, struct mcx_eq *, int);
2613 static int	mcx_admin_intr(void *);
2614 static int	mcx_cq_intr(void *);
2615 
2616 static int	mcx_up(struct mcx_softc *);
2617 static void	mcx_down(struct mcx_softc *);
2618 static int	mcx_ioctl(struct ifnet *, u_long, caddr_t);
2619 static int	mcx_rxrinfo(struct mcx_softc *, struct if_rxrinfo *);
2620 static void	mcx_start(struct ifqueue *);
2621 static void	mcx_watchdog(struct ifnet *);
2622 static void	mcx_media_add_types(struct mcx_softc *);
2623 static void	mcx_media_status(struct ifnet *, struct ifmediareq *);
2624 static int	mcx_media_change(struct ifnet *);
2625 static int	mcx_get_sffpage(struct ifnet *, struct if_sffpage *);
2626 static void	mcx_port_change(void *);
2627 
2628 static void	mcx_calibrate_first(struct mcx_softc *);
2629 static void	mcx_calibrate(void *);
2630 
2631 static inline uint32_t
2632 		mcx_rd(struct mcx_softc *, bus_size_t);
2633 static inline void
2634 		mcx_wr(struct mcx_softc *, bus_size_t, uint32_t);
2635 static inline void
2636 		mcx_bar(struct mcx_softc *, bus_size_t, bus_size_t, int);
2637 
2638 static uint64_t	mcx_timer(struct mcx_softc *);
2639 
2640 static int	mcx_dmamem_alloc(struct mcx_softc *, struct mcx_dmamem *,
2641 		    bus_size_t, u_int align);
2642 static void	mcx_dmamem_zero(struct mcx_dmamem *);
2643 static void	mcx_dmamem_free(struct mcx_softc *, struct mcx_dmamem *);
2644 
2645 static int	mcx_hwmem_alloc(struct mcx_softc *, struct mcx_hwmem *,
2646 		    unsigned int);
2647 static void	mcx_hwmem_free(struct mcx_softc *, struct mcx_hwmem *);
2648 
2649 struct cfdriver mcx_cd = {
2650 	NULL,
2651 	"mcx",
2652 	DV_IFNET,
2653 };
2654 
2655 const struct cfattach mcx_ca = {
2656 	sizeof(struct mcx_softc),
2657 	mcx_match,
2658 	mcx_attach,
2659 };
2660 
2661 static const struct pci_matchid mcx_devices[] = {
2662 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700 },
2663 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27700VF },
2664 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710 },
2665 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27710VF },
2666 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800 },
2667 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT27800VF },
2668 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800 },
2669 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28800VF },
2670 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28908 },
2671 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT28908VF },
2672 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT2892 },
2673 	{ PCI_VENDOR_MELLANOX,	PCI_PRODUCT_MELLANOX_MT2894 },
2674 };
2675 
2676 struct mcx_eth_proto_capability {
2677 	uint64_t	cap_media;
2678 	uint64_t	cap_baudrate;
2679 };
2680 
2681 static const struct mcx_eth_proto_capability mcx_eth_cap_map[] = {
2682 	[MCX_ETHER_CAP_SGMII]		= { IFM_1000_SGMII,	IF_Gbps(1) },
2683 	[MCX_ETHER_CAP_1000_KX]		= { IFM_1000_KX,	IF_Gbps(1) },
2684 	[MCX_ETHER_CAP_10G_CX4]		= { IFM_10G_CX4,	IF_Gbps(10) },
2685 	[MCX_ETHER_CAP_10G_KX4]		= { IFM_10G_KX4,	IF_Gbps(10) },
2686 	[MCX_ETHER_CAP_10G_KR]		= { IFM_10G_KR,		IF_Gbps(10) },
2687 	[MCX_ETHER_CAP_40G_CR4]		= { IFM_40G_CR4,	IF_Gbps(40) },
2688 	[MCX_ETHER_CAP_40G_KR4]		= { IFM_40G_KR4,	IF_Gbps(40) },
2689 	[MCX_ETHER_CAP_10G_CR]		= { IFM_10G_SFP_CU,	IF_Gbps(10) },
2690 	[MCX_ETHER_CAP_10G_SR]		= { IFM_10G_SR,		IF_Gbps(10) },
2691 	[MCX_ETHER_CAP_10G_LR]		= { IFM_10G_LR,		IF_Gbps(10) },
2692 	[MCX_ETHER_CAP_40G_SR4]		= { IFM_40G_SR4,	IF_Gbps(40) },
2693 	[MCX_ETHER_CAP_40G_LR4]		= { IFM_40G_LR4,	IF_Gbps(40) },
2694 	[MCX_ETHER_CAP_50G_SR2]		= { 0 /*IFM_50G_SR2*/,	IF_Gbps(50) },
2695 	[MCX_ETHER_CAP_100G_CR4]	= { IFM_100G_CR4,	IF_Gbps(100) },
2696 	[MCX_ETHER_CAP_100G_SR4]	= { IFM_100G_SR4,	IF_Gbps(100) },
2697 	[MCX_ETHER_CAP_100G_KR4]	= { IFM_100G_KR4,	IF_Gbps(100) },
2698 	[MCX_ETHER_CAP_100G_LR4]	= { IFM_100G_LR4,	IF_Gbps(100) },
2699 	[MCX_ETHER_CAP_25G_CR]		= { IFM_25G_CR,		IF_Gbps(25) },
2700 	[MCX_ETHER_CAP_25G_KR]		= { IFM_25G_KR,		IF_Gbps(25) },
2701 	[MCX_ETHER_CAP_25G_SR]		= { IFM_25G_SR,		IF_Gbps(25) },
2702 	[MCX_ETHER_CAP_50G_CR2]		= { IFM_50G_CR2,	IF_Gbps(50) },
2703 	[MCX_ETHER_CAP_50G_KR2]		= { IFM_50G_KR2,	IF_Gbps(50) },
2704 };
2705 
2706 static const struct mcx_eth_proto_capability mcx_ext_eth_cap_map[] = {
2707 	[MCX_ETHER_EXT_CAP_SGMII_100]	= { IFM_100_FX,		IF_Mbps(100) },
2708 	[MCX_ETHER_EXT_CAP_1000_X]	= { IFM_1000_SX,	IF_Gbps(1) },
2709 	[MCX_ETHER_EXT_CAP_5G_R]	= { IFM_5000_T,		IF_Gbps(5) },
2710 	[MCX_ETHER_EXT_CAP_XAUI]	= { IFM_10G_SFI,	IF_Gbps(10) },
2711 	[MCX_ETHER_EXT_CAP_XLAUI]	= { IFM_40G_XLPPI,	IF_Gbps(40) },
2712 	[MCX_ETHER_EXT_CAP_25G_AUI1]	= { 0 /*IFM_25G_AUI*/,	IF_Gbps(25) },
2713 	[MCX_ETHER_EXT_CAP_50G_AUI2]	= { 0 /*IFM_50G_AUI*/,	IF_Gbps(50) },
2714 	[MCX_ETHER_EXT_CAP_50G_AUI1]	= { 0 /*IFM_50G_AUI*/,	IF_Gbps(50) },
2715 	[MCX_ETHER_EXT_CAP_CAUI4]	= { 0 /*IFM_100G_AUI*/,	IF_Gbps(100) },
2716 	[MCX_ETHER_EXT_CAP_100G_AUI2]	= { 0 /*IFM_100G_AUI*/,	IF_Gbps(100) },
2717 	[MCX_ETHER_EXT_CAP_200G_AUI4]	= { 0 /*IFM_200G_AUI*/,	IF_Gbps(200) },
2718 	[MCX_ETHER_EXT_CAP_400G_AUI8]	= { 0 /*IFM_400G_AUI*/,	IF_Gbps(400) },
2719 };
2720 
2721 static int
mcx_get_id(uint32_t val)2722 mcx_get_id(uint32_t val)
2723 {
2724 	return betoh32(val) & 0x00ffffff;
2725 }
2726 
2727 static int
mcx_match(struct device * parent,void * match,void * aux)2728 mcx_match(struct device *parent, void *match, void *aux)
2729 {
2730 	return (pci_matchbyid(aux, mcx_devices, nitems(mcx_devices)));
2731 }
2732 
2733 void
mcx_attach(struct device * parent,struct device * self,void * aux)2734 mcx_attach(struct device *parent, struct device *self, void *aux)
2735 {
2736 	struct mcx_softc *sc = (struct mcx_softc *)self;
2737 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2738 	struct pci_attach_args *pa = aux;
2739 	pcireg_t memtype;
2740 	uint32_t r;
2741 	unsigned int cq_stride;
2742 	unsigned int cq_size;
2743 	const char *intrstr;
2744 	int i, msix;
2745 
2746 	sc->sc_pc = pa->pa_pc;
2747 	sc->sc_tag = pa->pa_tag;
2748 	sc->sc_dmat = pa->pa_dmat;
2749 
2750 	/* Map the PCI memory space */
2751 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MCX_HCA_BAR);
2752 	if (pci_mapreg_map(pa, MCX_HCA_BAR, memtype,
2753 	    BUS_SPACE_MAP_PREFETCHABLE, &sc->sc_memt, &sc->sc_memh,
2754 	    NULL, &sc->sc_mems, 0)) {
2755 		printf(": unable to map register memory\n");
2756 		return;
2757 	}
2758 
2759 	if (mcx_version(sc) != 0) {
2760 		/* error printed by mcx_version */
2761 		goto unmap;
2762 	}
2763 
2764 	r = mcx_rd(sc, MCX_CMDQ_ADDR_LO);
2765 	cq_stride = 1 << MCX_CMDQ_LOG_STRIDE(r); /* size of the entries */
2766 	cq_size = 1 << MCX_CMDQ_LOG_SIZE(r); /* number of entries */
2767 	if (cq_size > MCX_MAX_CQE) {
2768 		printf(", command queue size overflow %u\n", cq_size);
2769 		goto unmap;
2770 	}
2771 	if (cq_stride < sizeof(struct mcx_cmdq_entry)) {
2772 		printf(", command queue entry size underflow %u\n", cq_stride);
2773 		goto unmap;
2774 	}
2775 	if (cq_stride * cq_size > MCX_PAGE_SIZE) {
2776 		printf(", command queue page overflow\n");
2777 		goto unmap;
2778 	}
2779 
2780 	if (mcx_dmamem_alloc(sc, &sc->sc_doorbell_mem, MCX_DOORBELL_AREA_SIZE,
2781 	    MCX_PAGE_SIZE) != 0) {
2782 		printf(", unable to allocate doorbell memory\n");
2783 		goto unmap;
2784 	}
2785 
2786 	if (mcx_dmamem_alloc(sc, &sc->sc_cmdq_mem, MCX_PAGE_SIZE,
2787 	    MCX_PAGE_SIZE) != 0) {
2788 		printf(", unable to allocate command queue\n");
2789 		goto dbfree;
2790 	}
2791 
2792 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
2793 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint32_t),
2794 	    BUS_SPACE_BARRIER_WRITE);
2795 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem));
2796 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint32_t),
2797 	    BUS_SPACE_BARRIER_WRITE);
2798 
2799 	if (mcx_init_wait(sc) != 0) {
2800 		printf(", timeout waiting for init\n");
2801 		goto cqfree;
2802 	}
2803 
2804 	sc->sc_cmdq_mask = cq_size - 1;
2805 	sc->sc_cmdq_size = cq_stride;
2806 	rw_init(&sc->sc_cmdq_kstat_lk, "mcxkstat");
2807 	rw_init(&sc->sc_cmdq_ioctl_lk, "mcxioctl");
2808 	mtx_init(&sc->sc_cmdq_mtx, IPL_NET);
2809 
2810 	if (mcx_enable_hca(sc) != 0) {
2811 		/* error printed by mcx_enable_hca */
2812 		goto cqfree;
2813 	}
2814 
2815 	if (mcx_issi(sc) != 0) {
2816 		/* error printed by mcx_issi */
2817 		goto teardown;
2818 	}
2819 
2820 	if (mcx_pages(sc, &sc->sc_boot_pages,
2821 	    htobe16(MCX_CMD_QUERY_PAGES_BOOT)) != 0) {
2822 		/* error printed by mcx_pages */
2823 		goto teardown;
2824 	}
2825 
2826 	if (mcx_hca_max_caps(sc) != 0) {
2827 		/* error printed by mcx_hca_max_caps */
2828 		goto teardown;
2829 	}
2830 
2831 	if (mcx_hca_set_caps(sc) != 0) {
2832 		/* error printed by mcx_hca_set_caps */
2833 		goto teardown;
2834 	}
2835 
2836 	if (mcx_pages(sc, &sc->sc_init_pages,
2837 	    htobe16(MCX_CMD_QUERY_PAGES_INIT)) != 0) {
2838 		/* error printed by mcx_pages */
2839 		goto teardown;
2840 	}
2841 
2842 	if (mcx_init_hca(sc) != 0) {
2843 		/* error printed by mcx_init_hca */
2844 		goto teardown;
2845 	}
2846 
2847 	if (mcx_pages(sc, &sc->sc_regular_pages,
2848 	    htobe16(MCX_CMD_QUERY_PAGES_REGULAR)) != 0) {
2849 		/* error printed by mcx_pages */
2850 		goto teardown;
2851 	}
2852 
2853 	/* apparently not necessary? */
2854 	if (mcx_set_driver_version(sc) != 0) {
2855 		/* error printed by mcx_set_driver_version */
2856 		goto teardown;
2857 	}
2858 
2859 	if (mcx_iff(sc) != 0) {	/* modify nic vport context */
2860 		/* error printed by mcx_iff? */
2861 		goto teardown;
2862 	}
2863 
2864 	if (mcx_alloc_uar(sc, &sc->sc_uar) != 0) {
2865 		/* error printed by mcx_alloc_uar */
2866 		goto teardown;
2867 	}
2868 
2869 	if (mcx_alloc_pd(sc) != 0) {
2870 		/* error printed by mcx_alloc_pd */
2871 		goto teardown;
2872 	}
2873 
2874 	if (mcx_alloc_tdomain(sc) != 0) {
2875 		/* error printed by mcx_alloc_tdomain */
2876 		goto teardown;
2877 	}
2878 
2879 	msix = pci_intr_msix_count(pa);
2880 	if (msix < 2) {
2881 		printf(": not enough msi-x vectors\n");
2882 		goto teardown;
2883 	}
2884 
2885 	/*
2886 	 * PRM makes no mention of msi interrupts, just legacy and msi-x.
2887 	 * mellanox support tells me legacy interrupts are not supported,
2888 	 * so we're stuck with just msi-x.
2889 	 */
2890 	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) {
2891 		printf(": unable to map interrupt\n");
2892 		goto teardown;
2893 	}
2894 	intrstr = pci_intr_string(sc->sc_pc, sc->sc_ih);
2895 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
2896 	    IPL_NET | IPL_MPSAFE, mcx_admin_intr, sc, DEVNAME(sc));
2897 	if (sc->sc_ihc == NULL) {
2898 		printf(": unable to establish interrupt");
2899 		if (intrstr != NULL)
2900 			printf(" at %s", intrstr);
2901 		printf("\n");
2902 		goto teardown;
2903 	}
2904 
2905 	if (mcx_create_eq(sc, &sc->sc_admin_eq, sc->sc_uar,
2906 	    (1ull << MCX_EVENT_TYPE_INTERNAL_ERROR) |
2907 	    (1ull << MCX_EVENT_TYPE_PORT_CHANGE) |
2908 	    (1ull << MCX_EVENT_TYPE_CMD_COMPLETION) |
2909 	    (1ull << MCX_EVENT_TYPE_PAGE_REQUEST), 0) != 0) {
2910 		/* error printed by mcx_create_eq */
2911 		goto teardown;
2912 	}
2913 
2914 	if (mcx_query_nic_vport_context(sc) != 0) {
2915 		/* error printed by mcx_query_nic_vport_context */
2916 		goto teardown;
2917 	}
2918 
2919 	if (mcx_query_special_contexts(sc) != 0) {
2920 		/* error printed by mcx_query_special_contexts */
2921 		goto teardown;
2922 	}
2923 
2924 	if (mcx_set_port_mtu(sc, MCX_HARDMTU) != 0) {
2925 		/* error printed by mcx_set_port_mtu */
2926 		goto teardown;
2927 	}
2928 
2929 	msix--; /* admin ops took one */
2930 	sc->sc_intrmap = intrmap_create(&sc->sc_dev, msix, MCX_MAX_QUEUES,
2931 	    INTRMAP_POWEROF2);
2932 	if (sc->sc_intrmap == NULL) {
2933 		printf(": unable to create interrupt map\n");
2934 		goto teardown;
2935 	}
2936 	sc->sc_queues = mallocarray(intrmap_count(sc->sc_intrmap),
2937 	    sizeof(*sc->sc_queues), M_DEVBUF, M_WAITOK|M_ZERO);
2938 	if (sc->sc_queues == NULL) {
2939 		printf(": unable to create queues\n");
2940 		goto intrunmap;
2941 	}
2942 
2943 	printf(", %s, %d queue%s, address %s\n", intrstr,
2944 	    intrmap_count(sc->sc_intrmap),
2945 	    intrmap_count(sc->sc_intrmap) > 1 ? "s" : "",
2946 	    ether_sprintf(sc->sc_ac.ac_enaddr));
2947 
2948 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
2949 	ifp->if_softc = sc;
2950 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
2951 	ifp->if_xflags = IFXF_MPSAFE;
2952 	ifp->if_ioctl = mcx_ioctl;
2953 	ifp->if_qstart = mcx_start;
2954 	ifp->if_watchdog = mcx_watchdog;
2955 	ifp->if_hardmtu = sc->sc_hardmtu;
2956 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
2957 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_UDPv6 | IFCAP_CSUM_TCPv4 |
2958 	    IFCAP_CSUM_TCPv6;
2959 #if NVLAN > 0
2960 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
2961 #endif
2962 	ifq_init_maxlen(&ifp->if_snd, 1024);
2963 
2964 	ifmedia_init(&sc->sc_media, IFM_IMASK, mcx_media_change,
2965 	    mcx_media_status);
2966 	mcx_media_add_types(sc);
2967 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
2968 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
2969 
2970 	if_attach(ifp);
2971 	ether_ifattach(ifp);
2972 
2973 	if_attach_iqueues(ifp, intrmap_count(sc->sc_intrmap));
2974 	if_attach_queues(ifp, intrmap_count(sc->sc_intrmap));
2975 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
2976 		struct ifiqueue *ifiq = ifp->if_iqs[i];
2977 		struct ifqueue *ifq = ifp->if_ifqs[i];
2978 		struct mcx_queues *q = &sc->sc_queues[i];
2979 		struct mcx_rx *rx = &q->q_rx;
2980 		struct mcx_tx *tx = &q->q_tx;
2981 		pci_intr_handle_t ih;
2982 		int vec;
2983 
2984 		vec = i + 1;
2985 		q->q_sc = sc;
2986 		q->q_index = i;
2987 
2988 		if (mcx_alloc_uar(sc, &q->q_uar) != 0) {
2989 			printf("%s: unable to alloc uar %d\n",
2990 			    DEVNAME(sc), i);
2991 			goto intrdisestablish;
2992 		}
2993 
2994 		if (mcx_create_eq(sc, &q->q_eq, q->q_uar, 0, vec) != 0) {
2995 			printf("%s: unable to create event queue %d\n",
2996 			    DEVNAME(sc), i);
2997 			goto intrdisestablish;
2998 		}
2999 
3000 		rx->rx_softc = sc;
3001 		rx->rx_ifiq = ifiq;
3002 		timeout_set(&rx->rx_refill, mcx_refill, rx);
3003 		ifiq->ifiq_softc = rx;
3004 
3005 		tx->tx_softc = sc;
3006 		tx->tx_ifq = ifq;
3007 		ifq->ifq_softc = tx;
3008 
3009 		if (pci_intr_map_msix(pa, vec, &ih) != 0) {
3010 			printf("%s: unable to map queue interrupt %d\n",
3011 			    DEVNAME(sc), i);
3012 			goto intrdisestablish;
3013 		}
3014 		snprintf(q->q_name, sizeof(q->q_name), "%s:%d",
3015 		    DEVNAME(sc), i);
3016 		q->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
3017 		    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
3018 		    mcx_cq_intr, q, q->q_name);
3019 		if (q->q_ihc == NULL) {
3020 			printf("%s: unable to establish interrupt %d\n",
3021 			    DEVNAME(sc), i);
3022 			goto intrdisestablish;
3023 		}
3024 	}
3025 
3026 	timeout_set(&sc->sc_calibrate, mcx_calibrate, sc);
3027 
3028 	task_set(&sc->sc_port_change, mcx_port_change, sc);
3029 	mcx_port_change(sc);
3030 
3031 	sc->sc_mac_flow_table_id = -1;
3032 	sc->sc_rss_flow_table_id = -1;
3033 	sc->sc_rqt = -1;
3034 	for (i = 0; i < MCX_NUM_FLOW_GROUPS; i++) {
3035 		struct mcx_flow_group *mfg = &sc->sc_flow_group[i];
3036 		mfg->g_id = -1;
3037 		mfg->g_table = -1;
3038 		mfg->g_size = 0;
3039 		mfg->g_start = 0;
3040 	}
3041 	sc->sc_extra_mcast = 0;
3042 	memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows));
3043 
3044 #if NKSTAT > 0
3045 	mcx_kstat_attach(sc);
3046 #endif
3047 	mcx_timecounter_attach(sc);
3048 	return;
3049 
3050 intrdisestablish:
3051 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
3052 		struct mcx_queues *q = &sc->sc_queues[i];
3053 		if (q->q_ihc == NULL)
3054 			continue;
3055 		pci_intr_disestablish(sc->sc_pc, q->q_ihc);
3056 		q->q_ihc = NULL;
3057 	}
3058 	free(sc->sc_queues, M_DEVBUF,
3059 	    intrmap_count(sc->sc_intrmap) * sizeof(*sc->sc_queues));
3060 intrunmap:
3061 	intrmap_destroy(sc->sc_intrmap);
3062 	sc->sc_intrmap = NULL;
3063 teardown:
3064 	mcx_teardown_hca(sc, htobe16(MCX_CMD_TEARDOWN_HCA_GRACEFUL));
3065 	/* error printed by mcx_teardown_hca, and we're already unwinding */
3066 cqfree:
3067 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, MCX_DMA_DVA(&sc->sc_cmdq_mem) >> 32);
3068 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3069 	    BUS_SPACE_BARRIER_WRITE);
3070 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_DMA_DVA(&sc->sc_cmdq_mem) |
3071 	    MCX_CMDQ_INTERFACE_DISABLED);
3072 	mcx_bar(sc, MCX_CMDQ_ADDR_LO, sizeof(uint64_t),
3073 	    BUS_SPACE_BARRIER_WRITE);
3074 
3075 	mcx_wr(sc, MCX_CMDQ_ADDR_HI, 0);
3076 	mcx_bar(sc, MCX_CMDQ_ADDR_HI, sizeof(uint64_t),
3077 	    BUS_SPACE_BARRIER_WRITE);
3078 	mcx_wr(sc, MCX_CMDQ_ADDR_LO, MCX_CMDQ_INTERFACE_DISABLED);
3079 
3080 	mcx_dmamem_free(sc, &sc->sc_cmdq_mem);
3081 dbfree:
3082 	mcx_dmamem_free(sc, &sc->sc_doorbell_mem);
3083 unmap:
3084 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
3085 	sc->sc_mems = 0;
3086 }
3087 
3088 static int
mcx_version(struct mcx_softc * sc)3089 mcx_version(struct mcx_softc *sc)
3090 {
3091 	uint32_t fw0, fw1;
3092 	uint16_t cmdif;
3093 
3094 	fw0 = mcx_rd(sc, MCX_FW_VER);
3095 	fw1 = mcx_rd(sc, MCX_CMDIF_FW_SUBVER);
3096 
3097 	printf(": FW %u.%u.%04u", MCX_FW_VER_MAJOR(fw0),
3098 	    MCX_FW_VER_MINOR(fw0), MCX_FW_VER_SUBMINOR(fw1));
3099 
3100 	cmdif = MCX_CMDIF(fw1);
3101 	if (cmdif != MCX_CMD_IF_SUPPORTED) {
3102 		printf(", unsupported command interface %u\n", cmdif);
3103 		return (-1);
3104 	}
3105 
3106 	return (0);
3107 }
3108 
3109 static int
mcx_init_wait(struct mcx_softc * sc)3110 mcx_init_wait(struct mcx_softc *sc)
3111 {
3112 	unsigned int i;
3113 	uint32_t r;
3114 
3115 	for (i = 0; i < 2000; i++) {
3116 		r = mcx_rd(sc, MCX_STATE);
3117 		if ((r & MCX_STATE_MASK) == MCX_STATE_READY)
3118 			return (0);
3119 
3120 		delay(1000);
3121 		mcx_bar(sc, MCX_STATE, sizeof(uint32_t),
3122 		    BUS_SPACE_BARRIER_READ);
3123 	}
3124 
3125 	return (-1);
3126 }
3127 
3128 static uint8_t
mcx_cmdq_poll(struct mcx_softc * sc,struct mcx_cmdq_entry * cqe,unsigned int msec)3129 mcx_cmdq_poll(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3130     unsigned int msec)
3131 {
3132 	unsigned int i;
3133 
3134 	for (i = 0; i < msec; i++) {
3135 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3136 		    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3137 
3138 		if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
3139 		    MCX_CQ_STATUS_OWN_SW)
3140 			return (0);
3141 
3142 		delay(1000);
3143 	}
3144 
3145 	return (ETIMEDOUT);
3146 }
3147 
3148 static uint32_t
mcx_mix_u64(uint32_t xor,uint64_t u64)3149 mcx_mix_u64(uint32_t xor, uint64_t u64)
3150 {
3151 	xor ^= u64 >> 32;
3152 	xor ^= u64;
3153 
3154 	return (xor);
3155 }
3156 
3157 static uint32_t
mcx_mix_u32(uint32_t xor,uint32_t u32)3158 mcx_mix_u32(uint32_t xor, uint32_t u32)
3159 {
3160 	xor ^= u32;
3161 
3162 	return (xor);
3163 }
3164 
3165 static uint32_t
mcx_mix_u8(uint32_t xor,uint8_t u8)3166 mcx_mix_u8(uint32_t xor, uint8_t u8)
3167 {
3168 	xor ^= u8;
3169 
3170 	return (xor);
3171 }
3172 
3173 static uint8_t
mcx_mix_done(uint32_t xor)3174 mcx_mix_done(uint32_t xor)
3175 {
3176 	xor ^= xor >> 16;
3177 	xor ^= xor >> 8;
3178 
3179 	return (xor);
3180 }
3181 
3182 static uint8_t
mcx_xor(const void * buf,size_t len)3183 mcx_xor(const void *buf, size_t len)
3184 {
3185 	const uint32_t *dwords = buf;
3186 	uint32_t xor = 0xff;
3187 	size_t i;
3188 
3189 	len /= sizeof(*dwords);
3190 
3191 	for (i = 0; i < len; i++)
3192 		xor ^= dwords[i];
3193 
3194 	return (mcx_mix_done(xor));
3195 }
3196 
3197 static uint8_t
mcx_cmdq_token(struct mcx_softc * sc)3198 mcx_cmdq_token(struct mcx_softc *sc)
3199 {
3200 	uint8_t token;
3201 
3202 	mtx_enter(&sc->sc_cmdq_mtx);
3203 	do {
3204 		token = ++sc->sc_cmdq_token;
3205 	} while (token == 0);
3206 	mtx_leave(&sc->sc_cmdq_mtx);
3207 
3208 	return (token);
3209 }
3210 
3211 static struct mcx_cmdq_entry *
mcx_get_cmdq_entry(struct mcx_softc * sc,enum mcx_cmdq_slot slot)3212 mcx_get_cmdq_entry(struct mcx_softc *sc, enum mcx_cmdq_slot slot)
3213 {
3214 	struct mcx_cmdq_entry *cqe;
3215 
3216 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3217 	cqe += slot;
3218 
3219 	/* make sure the slot isn't running a command already */
3220 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3221 	    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3222 	if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) !=
3223 	    MCX_CQ_STATUS_OWN_SW)
3224 		cqe = NULL;
3225 
3226 	return (cqe);
3227 }
3228 
3229 static void
mcx_cmdq_init(struct mcx_softc * sc,struct mcx_cmdq_entry * cqe,uint32_t ilen,uint32_t olen,uint8_t token)3230 mcx_cmdq_init(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3231     uint32_t ilen, uint32_t olen, uint8_t token)
3232 {
3233 	memset(cqe, 0, sc->sc_cmdq_size);
3234 
3235 	cqe->cq_type = MCX_CMDQ_TYPE_PCIE;
3236 	htobem32(&cqe->cq_input_length, ilen);
3237 	htobem32(&cqe->cq_output_length, olen);
3238 	cqe->cq_token = token;
3239 	cqe->cq_status = MCX_CQ_STATUS_OWN_HW;
3240 }
3241 
3242 static void
mcx_cmdq_sign(struct mcx_cmdq_entry * cqe)3243 mcx_cmdq_sign(struct mcx_cmdq_entry *cqe)
3244 {
3245 	cqe->cq_signature = ~mcx_xor(cqe, sizeof(*cqe));
3246 }
3247 
3248 static int
mcx_cmdq_verify(const struct mcx_cmdq_entry * cqe)3249 mcx_cmdq_verify(const struct mcx_cmdq_entry *cqe)
3250 {
3251 	/* return (mcx_xor(cqe, sizeof(*cqe)) ? -1 :  0); */
3252 	return (0);
3253 }
3254 
3255 static void *
mcx_cmdq_in(struct mcx_cmdq_entry * cqe)3256 mcx_cmdq_in(struct mcx_cmdq_entry *cqe)
3257 {
3258 	return (&cqe->cq_input_data);
3259 }
3260 
3261 static void *
mcx_cmdq_out(struct mcx_cmdq_entry * cqe)3262 mcx_cmdq_out(struct mcx_cmdq_entry *cqe)
3263 {
3264 	return (&cqe->cq_output_data);
3265 }
3266 
3267 static void
mcx_cmdq_post(struct mcx_softc * sc,struct mcx_cmdq_entry * cqe,unsigned int slot)3268 mcx_cmdq_post(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3269     unsigned int slot)
3270 {
3271 	mcx_cmdq_sign(cqe);
3272 
3273 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem),
3274 	    0, MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_PRERW);
3275 
3276 	mcx_wr(sc, MCX_CMDQ_DOORBELL, 1U << slot);
3277 	mcx_bar(sc, MCX_CMDQ_DOORBELL, sizeof(uint32_t),
3278 	    BUS_SPACE_BARRIER_WRITE);
3279 }
3280 
3281 static int
mcx_cmdq_exec(struct mcx_softc * sc,struct mcx_cmdq_entry * cqe,unsigned int slot,unsigned int msec)3282 mcx_cmdq_exec(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3283     unsigned int slot, unsigned int msec)
3284 {
3285 	int err;
3286 
3287 	if (slot == MCX_CMDQ_SLOT_POLL) {
3288 		mcx_cmdq_post(sc, cqe, slot);
3289 		return (mcx_cmdq_poll(sc, cqe, msec));
3290 	}
3291 
3292 	mtx_enter(&sc->sc_cmdq_mtx);
3293 	mcx_cmdq_post(sc, cqe, slot);
3294 
3295 	err = 0;
3296 	while (err == 0) {
3297 		err = msleep_nsec(&sc->sc_cmdq_token, &sc->sc_cmdq_mtx, 0,
3298 		    "mcxcmd", msec * 1000);
3299 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_cmdq_mem), 0,
3300 		    MCX_DMA_LEN(&sc->sc_cmdq_mem), BUS_DMASYNC_POSTRW);
3301 		if ((cqe->cq_status & MCX_CQ_STATUS_OWN_MASK) ==
3302 		    MCX_CQ_STATUS_OWN_SW) {
3303 			err = 0;
3304 			break;
3305 		}
3306 	}
3307 
3308 	mtx_leave(&sc->sc_cmdq_mtx);
3309 	return (err);
3310 }
3311 
3312 static int
mcx_enable_hca(struct mcx_softc * sc)3313 mcx_enable_hca(struct mcx_softc *sc)
3314 {
3315 	struct mcx_cmdq_entry *cqe;
3316 	struct mcx_cmd_enable_hca_in *in;
3317 	struct mcx_cmd_enable_hca_out *out;
3318 	int error;
3319 	uint8_t status;
3320 
3321 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3322 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3323 
3324 	in = mcx_cmdq_in(cqe);
3325 	in->cmd_opcode = htobe16(MCX_CMD_ENABLE_HCA);
3326 	in->cmd_op_mod = htobe16(0);
3327 	in->cmd_function_id = htobe16(0);
3328 
3329 	mcx_cmdq_post(sc, cqe, 0);
3330 
3331 	error = mcx_cmdq_poll(sc, cqe, 1000);
3332 	if (error != 0) {
3333 		printf(", hca enable timeout\n");
3334 		return (-1);
3335 	}
3336 	if (mcx_cmdq_verify(cqe) != 0) {
3337 		printf(", hca enable command corrupt\n");
3338 		return (-1);
3339 	}
3340 
3341 	status = cqe->cq_output_data[0];
3342 	if (status != MCX_CQ_STATUS_OK) {
3343 		printf(", hca enable failed (%x)\n", status);
3344 		return (-1);
3345 	}
3346 
3347 	return (0);
3348 }
3349 
3350 static int
mcx_teardown_hca(struct mcx_softc * sc,uint16_t profile)3351 mcx_teardown_hca(struct mcx_softc *sc, uint16_t profile)
3352 {
3353 	struct mcx_cmdq_entry *cqe;
3354 	struct mcx_cmd_teardown_hca_in *in;
3355 	struct mcx_cmd_teardown_hca_out *out;
3356 	int error;
3357 	uint8_t status;
3358 
3359 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3360 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3361 
3362 	in = mcx_cmdq_in(cqe);
3363 	in->cmd_opcode = htobe16(MCX_CMD_TEARDOWN_HCA);
3364 	in->cmd_op_mod = htobe16(0);
3365 	in->cmd_profile = profile;
3366 
3367 	mcx_cmdq_post(sc, cqe, 0);
3368 
3369 	error = mcx_cmdq_poll(sc, cqe, 1000);
3370 	if (error != 0) {
3371 		printf(", hca teardown timeout\n");
3372 		return (-1);
3373 	}
3374 	if (mcx_cmdq_verify(cqe) != 0) {
3375 		printf(", hca teardown command corrupt\n");
3376 		return (-1);
3377 	}
3378 
3379 	status = cqe->cq_output_data[0];
3380 	if (status != MCX_CQ_STATUS_OK) {
3381 		printf(", hca teardown failed (%x)\n", status);
3382 		return (-1);
3383 	}
3384 
3385 	return (0);
3386 }
3387 
3388 static int
mcx_cmdq_mboxes_alloc(struct mcx_softc * sc,struct mcx_dmamem * mxm,unsigned int nmb,uint64_t * ptr,uint8_t token)3389 mcx_cmdq_mboxes_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
3390     unsigned int nmb, uint64_t *ptr, uint8_t token)
3391 {
3392 	caddr_t kva;
3393 	uint64_t dva;
3394 	int i;
3395 	int error;
3396 
3397 	error = mcx_dmamem_alloc(sc, mxm,
3398 	    nmb * MCX_CMDQ_MAILBOX_SIZE, MCX_CMDQ_MAILBOX_ALIGN);
3399 	if (error != 0)
3400 		return (error);
3401 
3402 	mcx_dmamem_zero(mxm);
3403 
3404 	dva = MCX_DMA_DVA(mxm);
3405 	kva = MCX_DMA_KVA(mxm);
3406 	for (i = 0; i < nmb; i++) {
3407 		struct mcx_cmdq_mailbox *mbox = (struct mcx_cmdq_mailbox *)kva;
3408 
3409 		/* patch the cqe or mbox pointing at this one */
3410 		htobem64(ptr, dva);
3411 
3412 		/* fill in this mbox */
3413 		htobem32(&mbox->mb_block_number, i);
3414 		mbox->mb_token = token;
3415 
3416 		/* move to the next one */
3417 		ptr = &mbox->mb_next_ptr;
3418 
3419 		dva += MCX_CMDQ_MAILBOX_SIZE;
3420 		kva += MCX_CMDQ_MAILBOX_SIZE;
3421 	}
3422 
3423 	return (0);
3424 }
3425 
3426 static uint32_t
mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox * mb)3427 mcx_cmdq_mbox_ctrl_sig(const struct mcx_cmdq_mailbox *mb)
3428 {
3429 	uint32_t xor = 0xff;
3430 
3431 	/* only 3 fields get set, so mix them directly */
3432 	xor = mcx_mix_u64(xor, mb->mb_next_ptr);
3433 	xor = mcx_mix_u32(xor, mb->mb_block_number);
3434 	xor = mcx_mix_u8(xor, mb->mb_token);
3435 
3436 	return (mcx_mix_done(xor));
3437 }
3438 
3439 static void
mcx_cmdq_mboxes_sign(struct mcx_dmamem * mxm,unsigned int nmb)3440 mcx_cmdq_mboxes_sign(struct mcx_dmamem *mxm, unsigned int nmb)
3441 {
3442 	caddr_t kva;
3443 	int i;
3444 
3445 	kva = MCX_DMA_KVA(mxm);
3446 
3447 	for (i = 0; i < nmb; i++) {
3448 		struct mcx_cmdq_mailbox *mb = (struct mcx_cmdq_mailbox *)kva;
3449 		uint8_t sig = mcx_cmdq_mbox_ctrl_sig(mb);
3450 		mb->mb_ctrl_signature = sig;
3451 		mb->mb_signature = sig ^
3452 		    mcx_xor(mb->mb_data, sizeof(mb->mb_data));
3453 
3454 		kva += MCX_CMDQ_MAILBOX_SIZE;
3455 	}
3456 }
3457 
3458 static void
mcx_cmdq_mboxes_sync(struct mcx_softc * sc,struct mcx_dmamem * mxm,int ops)3459 mcx_cmdq_mboxes_sync(struct mcx_softc *sc, struct mcx_dmamem *mxm, int ops)
3460 {
3461 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(mxm),
3462 	    0, MCX_DMA_LEN(mxm), ops);
3463 }
3464 
3465 static struct mcx_cmdq_mailbox *
mcx_cq_mbox(struct mcx_dmamem * mxm,unsigned int i)3466 mcx_cq_mbox(struct mcx_dmamem *mxm, unsigned int i)
3467 {
3468 	caddr_t kva;
3469 
3470 	kva = MCX_DMA_KVA(mxm);
3471 	kva += i * MCX_CMDQ_MAILBOX_SIZE;
3472 
3473 	return ((struct mcx_cmdq_mailbox *)kva);
3474 }
3475 
3476 static inline void *
mcx_cq_mbox_data(struct mcx_cmdq_mailbox * mb)3477 mcx_cq_mbox_data(struct mcx_cmdq_mailbox *mb)
3478 {
3479 	return (&mb->mb_data);
3480 }
3481 
3482 static void
mcx_cmdq_mboxes_copyin(struct mcx_dmamem * mxm,unsigned int nmb,void * b,size_t len)3483 mcx_cmdq_mboxes_copyin(struct mcx_dmamem *mxm, unsigned int nmb,
3484     void *b, size_t len)
3485 {
3486 	caddr_t buf = b;
3487 	struct mcx_cmdq_mailbox *mb;
3488 	int i;
3489 
3490 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3491 	for (i = 0; i < nmb; i++) {
3492 
3493 		memcpy(mb->mb_data, buf, min(sizeof(mb->mb_data), len));
3494 
3495 		if (sizeof(mb->mb_data) >= len)
3496 			break;
3497 
3498 		buf += sizeof(mb->mb_data);
3499 		len -= sizeof(mb->mb_data);
3500 		mb++;
3501 	}
3502 }
3503 
3504 static void
mcx_cmdq_mboxes_pas(struct mcx_dmamem * mxm,int offset,int npages,struct mcx_dmamem * buf)3505 mcx_cmdq_mboxes_pas(struct mcx_dmamem *mxm, int offset, int npages,
3506     struct mcx_dmamem *buf)
3507 {
3508 	uint64_t *pas;
3509 	int mbox, mbox_pages, i;
3510 
3511 	mbox = offset / MCX_CMDQ_MAILBOX_DATASIZE;
3512 	offset %= MCX_CMDQ_MAILBOX_DATASIZE;
3513 
3514 	pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3515 	pas += (offset / sizeof(*pas));
3516 	mbox_pages = (MCX_CMDQ_MAILBOX_DATASIZE - offset) / sizeof(*pas);
3517 	for (i = 0; i < npages; i++) {
3518 		if (i == mbox_pages) {
3519 			mbox++;
3520 			pas = mcx_cq_mbox_data(mcx_cq_mbox(mxm, mbox));
3521 			mbox_pages += MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas);
3522 		}
3523 		*pas = htobe64(MCX_DMA_DVA(buf) + (i * MCX_PAGE_SIZE));
3524 		pas++;
3525 	}
3526 }
3527 
3528 static void
mcx_cmdq_mboxes_copyout(struct mcx_dmamem * mxm,int nmb,void * b,size_t len)3529 mcx_cmdq_mboxes_copyout(struct mcx_dmamem *mxm, int nmb, void *b, size_t len)
3530 {
3531 	caddr_t buf = b;
3532 	struct mcx_cmdq_mailbox *mb;
3533 	int i;
3534 
3535 	mb = (struct mcx_cmdq_mailbox *)MCX_DMA_KVA(mxm);
3536 	for (i = 0; i < nmb; i++) {
3537 		memcpy(buf, mb->mb_data, min(sizeof(mb->mb_data), len));
3538 
3539 		if (sizeof(mb->mb_data) >= len)
3540 			break;
3541 
3542 		buf += sizeof(mb->mb_data);
3543 		len -= sizeof(mb->mb_data);
3544 		mb++;
3545 	}
3546 }
3547 
3548 static void
mcx_cq_mboxes_free(struct mcx_softc * sc,struct mcx_dmamem * mxm)3549 mcx_cq_mboxes_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
3550 {
3551 	mcx_dmamem_free(sc, mxm);
3552 }
3553 
3554 #if 0
3555 static void
3556 mcx_cmdq_dump(const struct mcx_cmdq_entry *cqe)
3557 {
3558 	unsigned int i;
3559 
3560 	printf(" type %02x, ilen %u, iptr %016llx", cqe->cq_type,
3561 	    bemtoh32(&cqe->cq_input_length), bemtoh64(&cqe->cq_input_ptr));
3562 
3563 	printf(", idata ");
3564 	for (i = 0; i < sizeof(cqe->cq_input_data); i++)
3565 		printf("%02x", cqe->cq_input_data[i]);
3566 
3567 	printf(", odata ");
3568 	for (i = 0; i < sizeof(cqe->cq_output_data); i++)
3569 		printf("%02x", cqe->cq_output_data[i]);
3570 
3571 	printf(", optr %016llx, olen %u, token %02x, sig %02x, status %02x",
3572 	    bemtoh64(&cqe->cq_output_ptr), bemtoh32(&cqe->cq_output_length),
3573 	    cqe->cq_token, cqe->cq_signature, cqe->cq_status);
3574 }
3575 
3576 static void
3577 mcx_cmdq_mbox_dump(struct mcx_dmamem *mboxes, int num)
3578 {
3579 	int i, j;
3580 	uint8_t *d;
3581 
3582 	for (i = 0; i < num; i++) {
3583 		struct mcx_cmdq_mailbox *mbox;
3584 		mbox = mcx_cq_mbox(mboxes, i);
3585 
3586 		d = mcx_cq_mbox_data(mbox);
3587 		for (j = 0; j < MCX_CMDQ_MAILBOX_DATASIZE; j++) {
3588 			if (j != 0 && (j % 16 == 0))
3589 				printf("\n");
3590 			printf("%.2x ", d[j]);
3591 		}
3592 	}
3593 }
3594 #endif
3595 
3596 static int
mcx_access_hca_reg(struct mcx_softc * sc,uint16_t reg,int op,void * data,int len,enum mcx_cmdq_slot slot)3597 mcx_access_hca_reg(struct mcx_softc *sc, uint16_t reg, int op, void *data,
3598     int len, enum mcx_cmdq_slot slot)
3599 {
3600 	struct mcx_dmamem mxm;
3601 	struct mcx_cmdq_entry *cqe;
3602 	struct mcx_cmd_access_reg_in *in;
3603 	struct mcx_cmd_access_reg_out *out;
3604 	uint8_t token = mcx_cmdq_token(sc);
3605 	int error, nmb;
3606 
3607 	cqe = mcx_get_cmdq_entry(sc, slot);
3608 	if (cqe == NULL)
3609 		return (-1);
3610 
3611 	mcx_cmdq_init(sc, cqe, sizeof(*in) + len, sizeof(*out) + len,
3612 	    token);
3613 
3614 	in = mcx_cmdq_in(cqe);
3615 	in->cmd_opcode = htobe16(MCX_CMD_ACCESS_REG);
3616 	in->cmd_op_mod = htobe16(op);
3617 	in->cmd_register_id = htobe16(reg);
3618 
3619 	nmb = howmany(len, MCX_CMDQ_MAILBOX_DATASIZE);
3620 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3621 	    &cqe->cq_output_ptr, token) != 0) {
3622 		printf(", unable to allocate access reg mailboxen\n");
3623 		return (-1);
3624 	}
3625 	cqe->cq_input_ptr = cqe->cq_output_ptr;
3626 	mcx_cmdq_mboxes_copyin(&mxm, nmb, data, len);
3627 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3628 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3629 
3630 	error = mcx_cmdq_exec(sc, cqe, slot, 1000);
3631 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3632 
3633 	if (error != 0) {
3634 		printf("%s: access reg (%s %x) timeout\n", DEVNAME(sc),
3635 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), reg);
3636 		goto free;
3637 	}
3638 	error = mcx_cmdq_verify(cqe);
3639 	if (error != 0) {
3640 		printf("%s: access reg (%s %x) reply corrupt\n",
3641 		    (op == MCX_REG_OP_WRITE ? "write" : "read"), DEVNAME(sc),
3642 		    reg);
3643 		goto free;
3644 	}
3645 
3646 	out = mcx_cmdq_out(cqe);
3647 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3648 		printf("%s: access reg (%s %x) failed (%x, %.6x)\n",
3649 		    DEVNAME(sc), (op == MCX_REG_OP_WRITE ? "write" : "read"),
3650 		    reg, out->cmd_status, betoh32(out->cmd_syndrome));
3651 		error = -1;
3652 		goto free;
3653 	}
3654 
3655 	mcx_cmdq_mboxes_copyout(&mxm, nmb, data, len);
3656 free:
3657 	mcx_dmamem_free(sc, &mxm);
3658 
3659 	return (error);
3660 }
3661 
3662 static int
mcx_set_issi(struct mcx_softc * sc,struct mcx_cmdq_entry * cqe,unsigned int slot)3663 mcx_set_issi(struct mcx_softc *sc, struct mcx_cmdq_entry *cqe,
3664     unsigned int slot)
3665 {
3666 	struct mcx_cmd_set_issi_in *in;
3667 	struct mcx_cmd_set_issi_out *out;
3668 	uint8_t status;
3669 
3670 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3671 
3672 	in = mcx_cmdq_in(cqe);
3673 	in->cmd_opcode = htobe16(MCX_CMD_SET_ISSI);
3674 	in->cmd_op_mod = htobe16(0);
3675 	in->cmd_current_issi = htobe16(MCX_ISSI);
3676 
3677 	mcx_cmdq_post(sc, cqe, slot);
3678 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0)
3679 		return (-1);
3680 	if (mcx_cmdq_verify(cqe) != 0)
3681 		return (-1);
3682 
3683 	status = cqe->cq_output_data[0];
3684 	if (status != MCX_CQ_STATUS_OK)
3685 		return (-1);
3686 
3687 	return (0);
3688 }
3689 
3690 static int
mcx_issi(struct mcx_softc * sc)3691 mcx_issi(struct mcx_softc *sc)
3692 {
3693 	struct mcx_dmamem mxm;
3694 	struct mcx_cmdq_entry *cqe;
3695 	struct mcx_cmd_query_issi_in *in;
3696 	struct mcx_cmd_query_issi_il_out *out;
3697 	struct mcx_cmd_query_issi_mb_out *mb;
3698 	uint8_t token = mcx_cmdq_token(sc);
3699 	uint8_t status;
3700 	int error;
3701 
3702 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3703 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mb), token);
3704 
3705 	in = mcx_cmdq_in(cqe);
3706 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_ISSI);
3707 	in->cmd_op_mod = htobe16(0);
3708 
3709 	CTASSERT(sizeof(*mb) <= MCX_CMDQ_MAILBOX_DATASIZE);
3710 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
3711 	    &cqe->cq_output_ptr, token) != 0) {
3712 		printf(", unable to allocate query issi mailbox\n");
3713 		return (-1);
3714 	}
3715 	mcx_cmdq_mboxes_sign(&mxm, 1);
3716 
3717 	mcx_cmdq_post(sc, cqe, 0);
3718 	error = mcx_cmdq_poll(sc, cqe, 1000);
3719 	if (error != 0) {
3720 		printf(", query issi timeout\n");
3721 		goto free;
3722 	}
3723 	error = mcx_cmdq_verify(cqe);
3724 	if (error != 0) {
3725 		printf(", query issi reply corrupt\n");
3726 		goto free;
3727 	}
3728 
3729 	status = cqe->cq_output_data[0];
3730 	switch (status) {
3731 	case MCX_CQ_STATUS_OK:
3732 		break;
3733 	case MCX_CQ_STATUS_BAD_OPCODE:
3734 		/* use ISSI 0 */
3735 		goto free;
3736 	default:
3737 		printf(", query issi failed (%x)\n", status);
3738 		error = -1;
3739 		goto free;
3740 	}
3741 
3742 	out = mcx_cmdq_out(cqe);
3743 	if (out->cmd_current_issi == htobe16(MCX_ISSI)) {
3744 		/* use ISSI 1 */
3745 		goto free;
3746 	}
3747 
3748 	/* don't need to read cqe anymore, can be used for SET ISSI */
3749 
3750 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
3751 	CTASSERT(MCX_ISSI < NBBY);
3752 	 /* XXX math is hard */
3753 	if (!ISSET(mb->cmd_supported_issi[79], 1 << MCX_ISSI)) {
3754 		/* use ISSI 0 */
3755 		goto free;
3756 	}
3757 
3758 	if (mcx_set_issi(sc, cqe, 0) != 0) {
3759 		/* ignore the error, just use ISSI 0 */
3760 	} else {
3761 		/* use ISSI 1 */
3762 	}
3763 
3764 free:
3765 	mcx_cq_mboxes_free(sc, &mxm);
3766 	return (error);
3767 }
3768 
3769 static int
mcx_query_pages(struct mcx_softc * sc,uint16_t type,int32_t * npages,uint16_t * func_id)3770 mcx_query_pages(struct mcx_softc *sc, uint16_t type,
3771     int32_t *npages, uint16_t *func_id)
3772 {
3773 	struct mcx_cmdq_entry *cqe;
3774 	struct mcx_cmd_query_pages_in *in;
3775 	struct mcx_cmd_query_pages_out *out;
3776 
3777 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3778 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
3779 
3780 	in = mcx_cmdq_in(cqe);
3781 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_PAGES);
3782 	in->cmd_op_mod = type;
3783 
3784 	mcx_cmdq_post(sc, cqe, 0);
3785 	if (mcx_cmdq_poll(sc, cqe, 1000) != 0) {
3786 		printf(", query pages timeout\n");
3787 		return (-1);
3788 	}
3789 	if (mcx_cmdq_verify(cqe) != 0) {
3790 		printf(", query pages reply corrupt\n");
3791 		return (-1);
3792 	}
3793 
3794 	out = mcx_cmdq_out(cqe);
3795 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
3796 		printf(", query pages failed (%x)\n", out->cmd_status);
3797 		return (-1);
3798 	}
3799 
3800 	*func_id = out->cmd_func_id;
3801 	*npages = bemtoh32(&out->cmd_num_pages);
3802 
3803 	return (0);
3804 }
3805 
3806 struct bus_dma_iter {
3807 	bus_dmamap_t		i_map;
3808 	bus_size_t		i_offset;
3809 	unsigned int		i_index;
3810 };
3811 
3812 static void
bus_dma_iter_init(struct bus_dma_iter * i,bus_dmamap_t map)3813 bus_dma_iter_init(struct bus_dma_iter *i, bus_dmamap_t map)
3814 {
3815 	i->i_map = map;
3816 	i->i_offset = 0;
3817 	i->i_index = 0;
3818 }
3819 
3820 static bus_addr_t
bus_dma_iter_addr(struct bus_dma_iter * i)3821 bus_dma_iter_addr(struct bus_dma_iter *i)
3822 {
3823 	return (i->i_map->dm_segs[i->i_index].ds_addr + i->i_offset);
3824 }
3825 
3826 static void
bus_dma_iter_add(struct bus_dma_iter * i,bus_size_t size)3827 bus_dma_iter_add(struct bus_dma_iter *i, bus_size_t size)
3828 {
3829 	bus_dma_segment_t *seg = i->i_map->dm_segs + i->i_index;
3830 	bus_size_t diff;
3831 
3832 	do {
3833 		diff = seg->ds_len - i->i_offset;
3834 		if (size < diff)
3835 			break;
3836 
3837 		size -= diff;
3838 
3839 		seg++;
3840 
3841 		i->i_offset = 0;
3842 		i->i_index++;
3843 	} while (size > 0);
3844 
3845 	i->i_offset += size;
3846 }
3847 
3848 static int
mcx_add_pages(struct mcx_softc * sc,struct mcx_hwmem * mhm,uint16_t func_id)3849 mcx_add_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t func_id)
3850 {
3851 	struct mcx_dmamem mxm;
3852 	struct mcx_cmdq_entry *cqe;
3853 	struct mcx_cmd_manage_pages_in *in;
3854 	struct mcx_cmd_manage_pages_out *out;
3855 	unsigned int paslen, nmb, i, j, npages;
3856 	struct bus_dma_iter iter;
3857 	uint64_t *pas;
3858 	uint8_t status;
3859 	uint8_t token = mcx_cmdq_token(sc);
3860 	int error;
3861 
3862 	npages = mhm->mhm_npages;
3863 
3864 	paslen = sizeof(*pas) * npages;
3865 	nmb = howmany(paslen, MCX_CMDQ_MAILBOX_DATASIZE);
3866 
3867 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3868 	mcx_cmdq_init(sc, cqe, sizeof(*in) + paslen, sizeof(*out), token);
3869 
3870 	in = mcx_cmdq_in(cqe);
3871 	in->cmd_opcode = htobe16(MCX_CMD_MANAGE_PAGES);
3872 	in->cmd_op_mod = htobe16(MCX_CMD_MANAGE_PAGES_ALLOC_SUCCESS);
3873 	in->cmd_func_id = func_id;
3874 	htobem32(&in->cmd_input_num_entries, npages);
3875 
3876 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, nmb,
3877 	    &cqe->cq_input_ptr, token) != 0) {
3878 		printf(", unable to allocate manage pages mailboxen\n");
3879 		return (-1);
3880 	}
3881 
3882 	bus_dma_iter_init(&iter, mhm->mhm_map);
3883 	for (i = 0; i < nmb; i++) {
3884 		unsigned int lim;
3885 
3886 		pas = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, i));
3887 		lim = min(MCX_CMDQ_MAILBOX_DATASIZE / sizeof(*pas), npages);
3888 
3889 		for (j = 0; j < lim; j++) {
3890 			htobem64(&pas[j], bus_dma_iter_addr(&iter));
3891 			bus_dma_iter_add(&iter, MCX_PAGE_SIZE);
3892 		}
3893 
3894 		npages -= lim;
3895 	}
3896 
3897 	mcx_cmdq_mboxes_sign(&mxm, nmb);
3898 
3899 	mcx_cmdq_post(sc, cqe, 0);
3900 	error = mcx_cmdq_poll(sc, cqe, 1000);
3901 	if (error != 0) {
3902 		printf(", manage pages timeout\n");
3903 		goto free;
3904 	}
3905 	error = mcx_cmdq_verify(cqe);
3906 	if (error != 0) {
3907 		printf(", manage pages reply corrupt\n");
3908 		goto free;
3909 	}
3910 
3911 	status = cqe->cq_output_data[0];
3912 	if (status != MCX_CQ_STATUS_OK) {
3913 		printf(", manage pages failed (%x)\n", status);
3914 		error = -1;
3915 		goto free;
3916 	}
3917 
3918 free:
3919 	mcx_dmamem_free(sc, &mxm);
3920 
3921 	return (error);
3922 }
3923 
3924 static int
mcx_pages(struct mcx_softc * sc,struct mcx_hwmem * mhm,uint16_t type)3925 mcx_pages(struct mcx_softc *sc, struct mcx_hwmem *mhm, uint16_t type)
3926 {
3927 	int32_t npages;
3928 	uint16_t func_id;
3929 
3930 	if (mcx_query_pages(sc, type, &npages, &func_id) != 0) {
3931 		/* error printed by mcx_query_pages */
3932 		return (-1);
3933 	}
3934 
3935 	if (npages < 1)
3936 		return (0);
3937 
3938 	if (mcx_hwmem_alloc(sc, mhm, npages) != 0) {
3939 		printf(", unable to allocate hwmem\n");
3940 		return (-1);
3941 	}
3942 
3943 	if (mcx_add_pages(sc, mhm, func_id) != 0) {
3944 		printf(", unable to add hwmem\n");
3945 		goto free;
3946 	}
3947 
3948 	return (0);
3949 
3950 free:
3951 	mcx_hwmem_free(sc, mhm);
3952 
3953 	return (-1);
3954 }
3955 
3956 static int
mcx_hca_max_caps(struct mcx_softc * sc)3957 mcx_hca_max_caps(struct mcx_softc *sc)
3958 {
3959 	struct mcx_dmamem mxm;
3960 	struct mcx_cmdq_entry *cqe;
3961 	struct mcx_cmd_query_hca_cap_in *in;
3962 	struct mcx_cmd_query_hca_cap_out *out;
3963 	struct mcx_cmdq_mailbox *mb;
3964 	struct mcx_cap_device *hca;
3965 	uint8_t status;
3966 	uint8_t token = mcx_cmdq_token(sc);
3967 	int error;
3968 
3969 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
3970 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
3971 	    token);
3972 
3973 	in = mcx_cmdq_in(cqe);
3974 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
3975 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_MAX |
3976 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
3977 
3978 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
3979 	    &cqe->cq_output_ptr, token) != 0) {
3980 		printf(", unable to allocate query hca caps mailboxen\n");
3981 		return (-1);
3982 	}
3983 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
3984 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
3985 
3986 	mcx_cmdq_post(sc, cqe, 0);
3987 	error = mcx_cmdq_poll(sc, cqe, 1000);
3988 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
3989 
3990 	if (error != 0) {
3991 		printf(", query hca caps timeout\n");
3992 		goto free;
3993 	}
3994 	error = mcx_cmdq_verify(cqe);
3995 	if (error != 0) {
3996 		printf(", query hca caps reply corrupt\n");
3997 		goto free;
3998 	}
3999 
4000 	status = cqe->cq_output_data[0];
4001 	if (status != MCX_CQ_STATUS_OK) {
4002 		printf(", query hca caps failed (%x)\n", status);
4003 		error = -1;
4004 		goto free;
4005 	}
4006 
4007 	mb = mcx_cq_mbox(&mxm, 0);
4008 	hca = mcx_cq_mbox_data(mb);
4009 
4010 	if ((hca->port_type & MCX_CAP_DEVICE_PORT_TYPE)
4011 	    != MCX_CAP_DEVICE_PORT_TYPE_ETH) {
4012 		printf(", not in ethernet mode\n");
4013 		error = -1;
4014 		goto free;
4015 	}
4016 	if (hca->log_pg_sz > PAGE_SHIFT) {
4017 		printf(", minimum system page shift %u is too large\n",
4018 		    hca->log_pg_sz);
4019 		error = -1;
4020 		goto free;
4021 	}
4022 	/*
4023 	 * blueflame register is split into two buffers, and we must alternate
4024 	 * between the two of them.
4025 	 */
4026 	sc->sc_bf_size = (1 << hca->log_bf_reg_size) / 2;
4027 	sc->sc_max_rqt_size = (1 << hca->log_max_rqt_size);
4028 
4029 	if (hca->local_ca_ack_delay & MCX_CAP_DEVICE_MCAM_REG)
4030 		sc->sc_mcam_reg = 1;
4031 
4032 	sc->sc_mhz = bemtoh32(&hca->device_frequency_mhz);
4033 	sc->sc_khz = bemtoh32(&hca->device_frequency_khz);
4034 
4035 free:
4036 	mcx_dmamem_free(sc, &mxm);
4037 
4038 	return (error);
4039 }
4040 
4041 static int
mcx_hca_set_caps(struct mcx_softc * sc)4042 mcx_hca_set_caps(struct mcx_softc *sc)
4043 {
4044 	struct mcx_dmamem mxm;
4045 	struct mcx_cmdq_entry *cqe;
4046 	struct mcx_cmd_query_hca_cap_in *in;
4047 	struct mcx_cmd_query_hca_cap_out *out;
4048 	struct mcx_cmdq_mailbox *mb;
4049 	struct mcx_cap_device *hca;
4050 	uint8_t status;
4051 	uint8_t token = mcx_cmdq_token(sc);
4052 	int error;
4053 
4054 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4055 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + MCX_HCA_CAP_LEN,
4056 	    token);
4057 
4058 	in = mcx_cmdq_in(cqe);
4059 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_HCA_CAP);
4060 	in->cmd_op_mod = htobe16(MCX_CMD_QUERY_HCA_CAP_CURRENT |
4061 	    MCX_CMD_QUERY_HCA_CAP_DEVICE);
4062 
4063 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, MCX_HCA_CAP_NMAILBOXES,
4064 	    &cqe->cq_output_ptr, token) != 0) {
4065 		printf(", unable to allocate manage pages mailboxen\n");
4066 		return (-1);
4067 	}
4068 	mcx_cmdq_mboxes_sign(&mxm, MCX_HCA_CAP_NMAILBOXES);
4069 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_PRERW);
4070 
4071 	mcx_cmdq_post(sc, cqe, 0);
4072 	error = mcx_cmdq_poll(sc, cqe, 1000);
4073 	mcx_cmdq_mboxes_sync(sc, &mxm, BUS_DMASYNC_POSTRW);
4074 
4075 	if (error != 0) {
4076 		printf(", query hca caps timeout\n");
4077 		goto free;
4078 	}
4079 	error = mcx_cmdq_verify(cqe);
4080 	if (error != 0) {
4081 		printf(", query hca caps reply corrupt\n");
4082 		goto free;
4083 	}
4084 
4085 	status = cqe->cq_output_data[0];
4086 	if (status != MCX_CQ_STATUS_OK) {
4087 		printf(", query hca caps failed (%x)\n", status);
4088 		error = -1;
4089 		goto free;
4090 	}
4091 
4092 	mb = mcx_cq_mbox(&mxm, 0);
4093 	hca = mcx_cq_mbox_data(mb);
4094 
4095 	hca->log_pg_sz = PAGE_SHIFT;
4096 
4097 free:
4098 	mcx_dmamem_free(sc, &mxm);
4099 
4100 	return (error);
4101 }
4102 
4103 
4104 static int
mcx_init_hca(struct mcx_softc * sc)4105 mcx_init_hca(struct mcx_softc *sc)
4106 {
4107 	struct mcx_cmdq_entry *cqe;
4108 	struct mcx_cmd_init_hca_in *in;
4109 	struct mcx_cmd_init_hca_out *out;
4110 	int error;
4111 	uint8_t status;
4112 
4113 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4114 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4115 
4116 	in = mcx_cmdq_in(cqe);
4117 	in->cmd_opcode = htobe16(MCX_CMD_INIT_HCA);
4118 	in->cmd_op_mod = htobe16(0);
4119 
4120 	mcx_cmdq_post(sc, cqe, 0);
4121 
4122 	error = mcx_cmdq_poll(sc, cqe, 1000);
4123 	if (error != 0) {
4124 		printf(", hca init timeout\n");
4125 		return (-1);
4126 	}
4127 	if (mcx_cmdq_verify(cqe) != 0) {
4128 		printf(", hca init command corrupt\n");
4129 		return (-1);
4130 	}
4131 
4132 	status = cqe->cq_output_data[0];
4133 	if (status != MCX_CQ_STATUS_OK) {
4134 		printf(", hca init failed (%x)\n", status);
4135 		return (-1);
4136 	}
4137 
4138 	return (0);
4139 }
4140 
4141 static int
mcx_set_driver_version(struct mcx_softc * sc)4142 mcx_set_driver_version(struct mcx_softc *sc)
4143 {
4144 	struct mcx_dmamem mxm;
4145 	struct mcx_cmdq_entry *cqe;
4146 	struct mcx_cmd_set_driver_version_in *in;
4147 	struct mcx_cmd_set_driver_version_out *out;
4148 	int error;
4149 	int token;
4150 	uint8_t status;
4151 
4152 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4153 	token = mcx_cmdq_token(sc);
4154 	mcx_cmdq_init(sc, cqe, sizeof(*in) +
4155 	    sizeof(struct mcx_cmd_set_driver_version), sizeof(*out), token);
4156 
4157 	in = mcx_cmdq_in(cqe);
4158 	in->cmd_opcode = htobe16(MCX_CMD_SET_DRIVER_VERSION);
4159 	in->cmd_op_mod = htobe16(0);
4160 
4161 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4162 	    &cqe->cq_input_ptr, token) != 0) {
4163 		printf(", unable to allocate set driver version mailboxen\n");
4164 		return (-1);
4165 	}
4166 	strlcpy(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)),
4167 	    "OpenBSD,mcx,1.000.000000", MCX_CMDQ_MAILBOX_DATASIZE);
4168 
4169 	mcx_cmdq_mboxes_sign(&mxm, 1);
4170 	mcx_cmdq_post(sc, cqe, 0);
4171 
4172 	error = mcx_cmdq_poll(sc, cqe, 1000);
4173 	if (error != 0) {
4174 		printf(", set driver version timeout\n");
4175 		goto free;
4176 	}
4177 	if (mcx_cmdq_verify(cqe) != 0) {
4178 		printf(", set driver version command corrupt\n");
4179 		goto free;
4180 	}
4181 
4182 	status = cqe->cq_output_data[0];
4183 	if (status != MCX_CQ_STATUS_OK) {
4184 		printf(", set driver version failed (%x)\n", status);
4185 		error = -1;
4186 		goto free;
4187 	}
4188 
4189 free:
4190 	mcx_dmamem_free(sc, &mxm);
4191 
4192 	return (error);
4193 }
4194 
4195 static int
mcx_iff(struct mcx_softc * sc)4196 mcx_iff(struct mcx_softc *sc)
4197 {
4198 	struct ifnet *ifp = &sc->sc_ac.ac_if;
4199 	struct mcx_dmamem mxm;
4200 	struct mcx_cmdq_entry *cqe;
4201 	struct mcx_cmd_modify_nic_vport_context_in *in;
4202 	struct mcx_cmd_modify_nic_vport_context_out *out;
4203 	struct mcx_nic_vport_ctx *ctx;
4204 	int error;
4205 	int token;
4206 	int insize;
4207 	uint32_t dest;
4208 
4209 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
4210 	    sc->sc_rss_flow_table_id;
4211 
4212 	/* enable or disable the promisc flow */
4213 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
4214 		if (sc->sc_promisc_flow_enabled == 0) {
4215 			mcx_set_flow_table_entry_mac(sc,
4216 			    MCX_FLOW_GROUP_PROMISC, 0, NULL, dest);
4217 			sc->sc_promisc_flow_enabled = 1;
4218 		}
4219 	} else if (sc->sc_promisc_flow_enabled != 0) {
4220 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
4221 		sc->sc_promisc_flow_enabled = 0;
4222 	}
4223 
4224 	/* enable or disable the all-multicast flow */
4225 	if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
4226 		if (sc->sc_allmulti_flow_enabled == 0) {
4227 			uint8_t mcast[ETHER_ADDR_LEN];
4228 
4229 			memset(mcast, 0, sizeof(mcast));
4230 			mcast[0] = 0x01;
4231 			mcx_set_flow_table_entry_mac(sc,
4232 			    MCX_FLOW_GROUP_ALLMULTI, 0, mcast, dest);
4233 			sc->sc_allmulti_flow_enabled = 1;
4234 		}
4235 	} else if (sc->sc_allmulti_flow_enabled != 0) {
4236 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
4237 		sc->sc_allmulti_flow_enabled = 0;
4238 	}
4239 
4240 	insize = sizeof(struct mcx_nic_vport_ctx) + 240;
4241 
4242 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4243 	token = mcx_cmdq_token(sc);
4244 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4245 
4246 	in = mcx_cmdq_in(cqe);
4247 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_NIC_VPORT_CONTEXT);
4248 	in->cmd_op_mod = htobe16(0);
4249 	in->cmd_field_select = htobe32(
4250 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_PROMISC |
4251 	    MCX_CMD_MODIFY_NIC_VPORT_CONTEXT_FIELD_MTU);
4252 
4253 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1, &cqe->cq_input_ptr, token) != 0) {
4254 		printf(", unable to allocate modify "
4255 		    "nic vport context mailboxen\n");
4256 		return (-1);
4257 	}
4258 	ctx = (struct mcx_nic_vport_ctx *)
4259 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 240);
4260 	ctx->vp_mtu = htobe32(sc->sc_hardmtu);
4261 	/*
4262          * always leave promisc-all enabled on the vport since we
4263          * can't give it a vlan list, and we're already doing multicast
4264          * filtering in the flow table.
4265 	 */
4266 	ctx->vp_flags = htobe16(MCX_NIC_VPORT_CTX_PROMISC_ALL);
4267 
4268 	mcx_cmdq_mboxes_sign(&mxm, 1);
4269 	mcx_cmdq_post(sc, cqe, 0);
4270 
4271 	error = mcx_cmdq_poll(sc, cqe, 1000);
4272 	if (error != 0) {
4273 		printf(", modify nic vport context timeout\n");
4274 		goto free;
4275 	}
4276 	if (mcx_cmdq_verify(cqe) != 0) {
4277 		printf(", modify nic vport context command corrupt\n");
4278 		goto free;
4279 	}
4280 
4281 	out = mcx_cmdq_out(cqe);
4282 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4283 		printf(", modify nic vport context failed (%x, %x)\n",
4284 		    out->cmd_status, betoh32(out->cmd_syndrome));
4285 		error = -1;
4286 		goto free;
4287 	}
4288 
4289 free:
4290 	mcx_dmamem_free(sc, &mxm);
4291 
4292 	return (error);
4293 }
4294 
4295 static int
mcx_alloc_uar(struct mcx_softc * sc,int * uar)4296 mcx_alloc_uar(struct mcx_softc *sc, int *uar)
4297 {
4298 	struct mcx_cmdq_entry *cqe;
4299 	struct mcx_cmd_alloc_uar_in *in;
4300 	struct mcx_cmd_alloc_uar_out *out;
4301 	int error;
4302 
4303 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4304 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4305 
4306 	in = mcx_cmdq_in(cqe);
4307 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_UAR);
4308 	in->cmd_op_mod = htobe16(0);
4309 
4310 	mcx_cmdq_post(sc, cqe, 0);
4311 
4312 	error = mcx_cmdq_poll(sc, cqe, 1000);
4313 	if (error != 0) {
4314 		printf(", alloc uar timeout\n");
4315 		return (-1);
4316 	}
4317 	if (mcx_cmdq_verify(cqe) != 0) {
4318 		printf(", alloc uar command corrupt\n");
4319 		return (-1);
4320 	}
4321 
4322 	out = mcx_cmdq_out(cqe);
4323 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4324 		printf(", alloc uar failed (%x)\n", out->cmd_status);
4325 		return (-1);
4326 	}
4327 
4328 	*uar = mcx_get_id(out->cmd_uar);
4329 	return (0);
4330 }
4331 
4332 static int
mcx_create_eq(struct mcx_softc * sc,struct mcx_eq * eq,int uar,uint64_t events,int vector)4333 mcx_create_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar,
4334     uint64_t events, int vector)
4335 {
4336 	struct mcx_cmdq_entry *cqe;
4337 	struct mcx_dmamem mxm;
4338 	struct mcx_cmd_create_eq_in *in;
4339 	struct mcx_cmd_create_eq_mb_in *mbin;
4340 	struct mcx_cmd_create_eq_out *out;
4341 	struct mcx_eq_entry *eqe;
4342 	int error;
4343 	uint64_t *pas;
4344 	int insize, npages, paslen, i, token;
4345 
4346 	eq->eq_cons = 0;
4347 
4348 	npages = howmany((1 << MCX_LOG_EQ_SIZE) * sizeof(struct mcx_eq_entry),
4349 	    MCX_PAGE_SIZE);
4350 	paslen = npages * sizeof(*pas);
4351 	insize = sizeof(struct mcx_cmd_create_eq_mb_in) + paslen;
4352 
4353 	if (mcx_dmamem_alloc(sc, &eq->eq_mem, npages * MCX_PAGE_SIZE,
4354 	    MCX_PAGE_SIZE) != 0) {
4355 		printf(", unable to allocate event queue memory\n");
4356 		return (-1);
4357 	}
4358 
4359 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
4360 	for (i = 0; i < (1 << MCX_LOG_EQ_SIZE); i++) {
4361 		eqe[i].eq_owner = MCX_EQ_ENTRY_OWNER_INIT;
4362 	}
4363 
4364 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4365 	token = mcx_cmdq_token(sc);
4366 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4367 
4368 	in = mcx_cmdq_in(cqe);
4369 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_EQ);
4370 	in->cmd_op_mod = htobe16(0);
4371 
4372 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4373 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4374 	    &cqe->cq_input_ptr, token) != 0) {
4375 		printf(", unable to allocate create eq mailboxen\n");
4376 		goto free_eq;
4377 	}
4378 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4379 	mbin->cmd_eq_ctx.eq_uar_size = htobe32(
4380 	    (MCX_LOG_EQ_SIZE << MCX_EQ_CTX_LOG_EQ_SIZE_SHIFT) | uar);
4381 	mbin->cmd_eq_ctx.eq_intr = vector;
4382 	mbin->cmd_event_bitmask = htobe64(events);
4383 
4384 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4385 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
4386 
4387 	/* physical addresses follow the mailbox in data */
4388 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &eq->eq_mem);
4389 	mcx_cmdq_mboxes_sign(&mxm, howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE));
4390 	mcx_cmdq_post(sc, cqe, 0);
4391 
4392 	error = mcx_cmdq_poll(sc, cqe, 1000);
4393 	if (error != 0) {
4394 		printf(", create eq timeout\n");
4395 		goto free_mxm;
4396 	}
4397 	if (mcx_cmdq_verify(cqe) != 0) {
4398 		printf(", create eq command corrupt\n");
4399 		goto free_mxm;
4400 	}
4401 
4402 	out = mcx_cmdq_out(cqe);
4403 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4404 		printf(", create eq failed (%x, %x)\n", out->cmd_status,
4405 		    betoh32(out->cmd_syndrome));
4406 		goto free_mxm;
4407 	}
4408 
4409 	eq->eq_n = mcx_get_id(out->cmd_eqn);
4410 
4411 	mcx_dmamem_free(sc, &mxm);
4412 
4413 	mcx_arm_eq(sc, eq, uar);
4414 
4415 	return (0);
4416 
4417 free_mxm:
4418 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
4419 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
4420 	mcx_dmamem_free(sc, &mxm);
4421 free_eq:
4422 	mcx_dmamem_free(sc, &eq->eq_mem);
4423 	return (-1);
4424 }
4425 
4426 static int
mcx_alloc_pd(struct mcx_softc * sc)4427 mcx_alloc_pd(struct mcx_softc *sc)
4428 {
4429 	struct mcx_cmdq_entry *cqe;
4430 	struct mcx_cmd_alloc_pd_in *in;
4431 	struct mcx_cmd_alloc_pd_out *out;
4432 	int error;
4433 
4434 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4435 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4436 
4437 	in = mcx_cmdq_in(cqe);
4438 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_PD);
4439 	in->cmd_op_mod = htobe16(0);
4440 
4441 	mcx_cmdq_post(sc, cqe, 0);
4442 
4443 	error = mcx_cmdq_poll(sc, cqe, 1000);
4444 	if (error != 0) {
4445 		printf(", alloc pd timeout\n");
4446 		return (-1);
4447 	}
4448 	if (mcx_cmdq_verify(cqe) != 0) {
4449 		printf(", alloc pd command corrupt\n");
4450 		return (-1);
4451 	}
4452 
4453 	out = mcx_cmdq_out(cqe);
4454 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4455 		printf(", alloc pd failed (%x)\n", out->cmd_status);
4456 		return (-1);
4457 	}
4458 
4459 	sc->sc_pd = mcx_get_id(out->cmd_pd);
4460 	return (0);
4461 }
4462 
4463 static int
mcx_alloc_tdomain(struct mcx_softc * sc)4464 mcx_alloc_tdomain(struct mcx_softc *sc)
4465 {
4466 	struct mcx_cmdq_entry *cqe;
4467 	struct mcx_cmd_alloc_td_in *in;
4468 	struct mcx_cmd_alloc_td_out *out;
4469 	int error;
4470 
4471 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4472 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4473 
4474 	in = mcx_cmdq_in(cqe);
4475 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_TRANSPORT_DOMAIN);
4476 	in->cmd_op_mod = htobe16(0);
4477 
4478 	mcx_cmdq_post(sc, cqe, 0);
4479 
4480 	error = mcx_cmdq_poll(sc, cqe, 1000);
4481 	if (error != 0) {
4482 		printf(", alloc transport domain timeout\n");
4483 		return (-1);
4484 	}
4485 	if (mcx_cmdq_verify(cqe) != 0) {
4486 		printf(", alloc transport domain command corrupt\n");
4487 		return (-1);
4488 	}
4489 
4490 	out = mcx_cmdq_out(cqe);
4491 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4492 		printf(", alloc transport domain failed (%x)\n",
4493 		    out->cmd_status);
4494 		return (-1);
4495 	}
4496 
4497 	sc->sc_tdomain = mcx_get_id(out->cmd_tdomain);
4498 	return (0);
4499 }
4500 
4501 static int
mcx_query_nic_vport_context(struct mcx_softc * sc)4502 mcx_query_nic_vport_context(struct mcx_softc *sc)
4503 {
4504 	struct mcx_dmamem mxm;
4505 	struct mcx_cmdq_entry *cqe;
4506 	struct mcx_cmd_query_nic_vport_context_in *in;
4507 	struct mcx_cmd_query_nic_vport_context_out *out;
4508 	struct mcx_nic_vport_ctx *ctx;
4509 	uint8_t *addr;
4510 	int error, token, i;
4511 
4512 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4513 	token = mcx_cmdq_token(sc);
4514 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx), token);
4515 
4516 	in = mcx_cmdq_in(cqe);
4517 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_NIC_VPORT_CONTEXT);
4518 	in->cmd_op_mod = htobe16(0);
4519 	in->cmd_allowed_list_type = 0;
4520 
4521 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4522 	    &cqe->cq_output_ptr, token) != 0) {
4523 		printf(", unable to allocate "
4524 		    "query nic vport context mailboxen\n");
4525 		return (-1);
4526 	}
4527 	mcx_cmdq_mboxes_sign(&mxm, 1);
4528 	mcx_cmdq_post(sc, cqe, 0);
4529 
4530 	error = mcx_cmdq_poll(sc, cqe, 1000);
4531 	if (error != 0) {
4532 		printf(", query nic vport context timeout\n");
4533 		goto free;
4534 	}
4535 	if (mcx_cmdq_verify(cqe) != 0) {
4536 		printf(", query nic vport context command corrupt\n");
4537 		goto free;
4538 	}
4539 
4540 	out = mcx_cmdq_out(cqe);
4541 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4542 		printf(", query nic vport context failed (%x, %x)\n",
4543 		    out->cmd_status, betoh32(out->cmd_syndrome));
4544 		error = -1;
4545 		goto free;
4546 	}
4547 
4548 	ctx = (struct mcx_nic_vport_ctx *)
4549 	    mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4550 	addr = (uint8_t *)&ctx->vp_perm_addr;
4551 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
4552 		sc->sc_ac.ac_enaddr[i] = addr[i + 2];
4553 	}
4554 free:
4555 	mcx_dmamem_free(sc, &mxm);
4556 
4557 	return (error);
4558 }
4559 
4560 static int
mcx_query_special_contexts(struct mcx_softc * sc)4561 mcx_query_special_contexts(struct mcx_softc *sc)
4562 {
4563 	struct mcx_cmdq_entry *cqe;
4564 	struct mcx_cmd_query_special_ctx_in *in;
4565 	struct mcx_cmd_query_special_ctx_out *out;
4566 	int error;
4567 
4568 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4569 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
4570 
4571 	in = mcx_cmdq_in(cqe);
4572 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SPECIAL_CONTEXTS);
4573 	in->cmd_op_mod = htobe16(0);
4574 
4575 	mcx_cmdq_post(sc, cqe, 0);
4576 
4577 	error = mcx_cmdq_poll(sc, cqe, 1000);
4578 	if (error != 0) {
4579 		printf(", query special contexts timeout\n");
4580 		return (-1);
4581 	}
4582 	if (mcx_cmdq_verify(cqe) != 0) {
4583 		printf(", query special contexts command corrupt\n");
4584 		return (-1);
4585 	}
4586 
4587 	out = mcx_cmdq_out(cqe);
4588 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4589 		printf(", query special contexts failed (%x)\n",
4590 		    out->cmd_status);
4591 		return (-1);
4592 	}
4593 
4594 	sc->sc_lkey = betoh32(out->cmd_resd_lkey);
4595 	return (0);
4596 }
4597 
4598 static int
mcx_set_port_mtu(struct mcx_softc * sc,int mtu)4599 mcx_set_port_mtu(struct mcx_softc *sc, int mtu)
4600 {
4601 	struct mcx_reg_pmtu pmtu;
4602 	int error;
4603 
4604 	/* read max mtu */
4605 	memset(&pmtu, 0, sizeof(pmtu));
4606 	pmtu.rp_local_port = 1;
4607 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_READ, &pmtu,
4608 	    sizeof(pmtu), MCX_CMDQ_SLOT_POLL);
4609 	if (error != 0) {
4610 		printf(", unable to get port MTU\n");
4611 		return error;
4612 	}
4613 
4614 	mtu = min(mtu, betoh16(pmtu.rp_max_mtu));
4615 	pmtu.rp_admin_mtu = htobe16(mtu);
4616 	error = mcx_access_hca_reg(sc, MCX_REG_PMTU, MCX_REG_OP_WRITE, &pmtu,
4617 	    sizeof(pmtu), MCX_CMDQ_SLOT_POLL);
4618 	if (error != 0) {
4619 		printf(", unable to set port MTU\n");
4620 		return error;
4621 	}
4622 
4623 	sc->sc_hardmtu = mtu;
4624 	sc->sc_rxbufsz = roundup(mtu + ETHER_ALIGN, sizeof(long));
4625 	return 0;
4626 }
4627 
4628 static int
mcx_create_cq(struct mcx_softc * sc,struct mcx_cq * cq,int uar,int db,int eqn)4629 mcx_create_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar, int db, int eqn)
4630 {
4631 	struct mcx_cmdq_entry *cmde;
4632 	struct mcx_cq_entry *cqe;
4633 	struct mcx_dmamem mxm;
4634 	struct mcx_cmd_create_cq_in *in;
4635 	struct mcx_cmd_create_cq_mb_in *mbin;
4636 	struct mcx_cmd_create_cq_out *out;
4637 	int error;
4638 	uint64_t *pas;
4639 	int insize, npages, paslen, i, token;
4640 
4641 	cq->cq_doorbell = MCX_CQ_DOORBELL_BASE + (MCX_CQ_DOORBELL_STRIDE * db);
4642 
4643 	npages = howmany((1 << MCX_LOG_CQ_SIZE) * sizeof(struct mcx_cq_entry),
4644 	    MCX_PAGE_SIZE);
4645 	paslen = npages * sizeof(*pas);
4646 	insize = sizeof(struct mcx_cmd_create_cq_mb_in) + paslen;
4647 
4648 	if (mcx_dmamem_alloc(sc, &cq->cq_mem, npages * MCX_PAGE_SIZE,
4649 	    MCX_PAGE_SIZE) != 0) {
4650 		printf("%s: unable to allocate completion queue memory\n",
4651 		    DEVNAME(sc));
4652 		return (-1);
4653 	}
4654 	cqe = MCX_DMA_KVA(&cq->cq_mem);
4655 	for (i = 0; i < (1 << MCX_LOG_CQ_SIZE); i++) {
4656 		cqe[i].cq_opcode_owner = MCX_CQ_ENTRY_FLAG_OWNER;
4657 	}
4658 
4659 	cmde = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4660 	token = mcx_cmdq_token(sc);
4661 	mcx_cmdq_init(sc, cmde, sizeof(*in) + insize, sizeof(*out), token);
4662 
4663 	in = mcx_cmdq_in(cmde);
4664 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_CQ);
4665 	in->cmd_op_mod = htobe16(0);
4666 
4667 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4668 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4669 	    &cmde->cq_input_ptr, token) != 0) {
4670 		printf("%s: unable to allocate create cq mailboxen\n",
4671 		    DEVNAME(sc));
4672 		goto free_cq;
4673 	}
4674 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4675 	mbin->cmd_cq_ctx.cq_uar_size = htobe32(
4676 	    (MCX_LOG_CQ_SIZE << MCX_CQ_CTX_LOG_CQ_SIZE_SHIFT) | uar);
4677 	mbin->cmd_cq_ctx.cq_eqn = htobe32(eqn);
4678 	mbin->cmd_cq_ctx.cq_period_max_count = htobe32(
4679 	    (MCX_CQ_MOD_PERIOD << MCX_CQ_CTX_PERIOD_SHIFT) |
4680 	    MCX_CQ_MOD_COUNTER);
4681 	mbin->cmd_cq_ctx.cq_doorbell = htobe64(
4682 	    MCX_DMA_DVA(&sc->sc_doorbell_mem) + cq->cq_doorbell);
4683 
4684 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4685 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
4686 
4687 	/* physical addresses follow the mailbox in data */
4688 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin), npages, &cq->cq_mem);
4689 	mcx_cmdq_post(sc, cmde, 0);
4690 
4691 	error = mcx_cmdq_poll(sc, cmde, 1000);
4692 	if (error != 0) {
4693 		printf("%s: create cq timeout\n", DEVNAME(sc));
4694 		goto free_mxm;
4695 	}
4696 	if (mcx_cmdq_verify(cmde) != 0) {
4697 		printf("%s: create cq command corrupt\n", DEVNAME(sc));
4698 		goto free_mxm;
4699 	}
4700 
4701 	out = mcx_cmdq_out(cmde);
4702 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4703 		printf("%s: create cq failed (%x, %x)\n", DEVNAME(sc),
4704 		    out->cmd_status, betoh32(out->cmd_syndrome));
4705 		goto free_mxm;
4706 	}
4707 
4708 	cq->cq_n = mcx_get_id(out->cmd_cqn);
4709 	cq->cq_cons = 0;
4710 	cq->cq_count = 0;
4711 
4712 	mcx_dmamem_free(sc, &mxm);
4713 
4714 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4715 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4716 	    BUS_DMASYNC_PREWRITE);
4717 
4718 	mcx_arm_cq(sc, cq, uar);
4719 
4720 	return (0);
4721 
4722 free_mxm:
4723 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4724 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4725 	mcx_dmamem_free(sc, &mxm);
4726 free_cq:
4727 	mcx_dmamem_free(sc, &cq->cq_mem);
4728 	return (-1);
4729 }
4730 
4731 static int
mcx_destroy_cq(struct mcx_softc * sc,struct mcx_cq * cq)4732 mcx_destroy_cq(struct mcx_softc *sc, struct mcx_cq *cq)
4733 {
4734 	struct mcx_cmdq_entry *cqe;
4735 	struct mcx_cmd_destroy_cq_in *in;
4736 	struct mcx_cmd_destroy_cq_out *out;
4737 	int error;
4738 	int token;
4739 
4740 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4741 	token = mcx_cmdq_token(sc);
4742 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4743 
4744 	in = mcx_cmdq_in(cqe);
4745 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_CQ);
4746 	in->cmd_op_mod = htobe16(0);
4747 	in->cmd_cqn = htobe32(cq->cq_n);
4748 
4749 	mcx_cmdq_post(sc, cqe, 0);
4750 	error = mcx_cmdq_poll(sc, cqe, 1000);
4751 	if (error != 0) {
4752 		printf("%s: destroy cq timeout\n", DEVNAME(sc));
4753 		return error;
4754 	}
4755 	if (mcx_cmdq_verify(cqe) != 0) {
4756 		printf("%s: destroy cq command corrupt\n", DEVNAME(sc));
4757 		return error;
4758 	}
4759 
4760 	out = mcx_cmdq_out(cqe);
4761 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4762 		printf("%s: destroy cq failed (%x, %x)\n", DEVNAME(sc),
4763 		    out->cmd_status, betoh32(out->cmd_syndrome));
4764 		return -1;
4765 	}
4766 
4767 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4768 	    cq->cq_doorbell, sizeof(struct mcx_cq_doorbell),
4769 	    BUS_DMASYNC_POSTWRITE);
4770 
4771 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
4772 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
4773 	mcx_dmamem_free(sc, &cq->cq_mem);
4774 
4775 	cq->cq_n = 0;
4776 	cq->cq_cons = 0;
4777 	cq->cq_count = 0;
4778 	return 0;
4779 }
4780 
4781 static int
mcx_create_rq(struct mcx_softc * sc,struct mcx_rx * rx,int db,int cqn)4782 mcx_create_rq(struct mcx_softc *sc, struct mcx_rx *rx, int db, int cqn)
4783 {
4784 	struct mcx_cmdq_entry *cqe;
4785 	struct mcx_dmamem mxm;
4786 	struct mcx_cmd_create_rq_in *in;
4787 	struct mcx_cmd_create_rq_out *out;
4788 	struct mcx_rq_ctx *mbin;
4789 	int error;
4790 	uint64_t *pas;
4791 	uint32_t rq_flags;
4792 	int insize, npages, paslen, token;
4793 
4794 	rx->rx_doorbell = MCX_WQ_DOORBELL_BASE +
4795 	    (db * MCX_WQ_DOORBELL_STRIDE);
4796 
4797 	npages = howmany((1 << MCX_LOG_RQ_SIZE) * sizeof(struct mcx_rq_entry),
4798 	    MCX_PAGE_SIZE);
4799 	paslen = npages * sizeof(*pas);
4800 	insize = 0x10 + sizeof(struct mcx_rq_ctx) + paslen;
4801 
4802 	if (mcx_dmamem_alloc(sc, &rx->rx_rq_mem, npages * MCX_PAGE_SIZE,
4803 	    MCX_PAGE_SIZE) != 0) {
4804 		printf("%s: unable to allocate receive queue memory\n",
4805 		    DEVNAME(sc));
4806 		return (-1);
4807 	}
4808 
4809 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4810 	token = mcx_cmdq_token(sc);
4811 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize, sizeof(*out), token);
4812 
4813 	in = mcx_cmdq_in(cqe);
4814 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQ);
4815 	in->cmd_op_mod = htobe16(0);
4816 
4817 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
4818 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
4819 	    &cqe->cq_input_ptr, token) != 0) {
4820 		printf("%s: unable to allocate create rq mailboxen\n",
4821 		    DEVNAME(sc));
4822 		goto free_rq;
4823 	}
4824 	mbin = (struct mcx_rq_ctx *)
4825 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
4826 	rq_flags = MCX_RQ_CTX_RLKEY;
4827 #if NVLAN == 0
4828 	rq_flags |= MCX_RQ_CTX_VLAN_STRIP_DIS;
4829 #endif
4830 	mbin->rq_flags = htobe32(rq_flags);
4831 	mbin->rq_cqn = htobe32(cqn);
4832 	mbin->rq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
4833 	mbin->rq_wq.wq_pd = htobe32(sc->sc_pd);
4834 	mbin->rq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
4835 	    rx->rx_doorbell);
4836 	mbin->rq_wq.wq_log_stride = htobe16(4);
4837 	mbin->rq_wq.wq_log_size = MCX_LOG_RQ_SIZE;
4838 
4839 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4840 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
4841 
4842 	/* physical addresses follow the mailbox in data */
4843 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10, npages, &rx->rx_rq_mem);
4844 	mcx_cmdq_post(sc, cqe, 0);
4845 
4846 	error = mcx_cmdq_poll(sc, cqe, 1000);
4847 	if (error != 0) {
4848 		printf("%s: create rq timeout\n", DEVNAME(sc));
4849 		goto free_mxm;
4850 	}
4851 	if (mcx_cmdq_verify(cqe) != 0) {
4852 		printf("%s: create rq command corrupt\n", DEVNAME(sc));
4853 		goto free_mxm;
4854 	}
4855 
4856 	out = mcx_cmdq_out(cqe);
4857 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4858 		printf("%s: create rq failed (%x, %x)\n", DEVNAME(sc),
4859 		    out->cmd_status, betoh32(out->cmd_syndrome));
4860 		goto free_mxm;
4861 	}
4862 
4863 	rx->rx_rqn = mcx_get_id(out->cmd_rqn);
4864 
4865 	mcx_dmamem_free(sc, &mxm);
4866 
4867 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4868 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
4869 
4870 	return (0);
4871 
4872 free_mxm:
4873 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4874 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4875 	mcx_dmamem_free(sc, &mxm);
4876 free_rq:
4877 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
4878 	return (-1);
4879 }
4880 
4881 static int
mcx_ready_rq(struct mcx_softc * sc,struct mcx_rx * rx)4882 mcx_ready_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4883 {
4884 	struct mcx_cmdq_entry *cqe;
4885 	struct mcx_dmamem mxm;
4886 	struct mcx_cmd_modify_rq_in *in;
4887 	struct mcx_cmd_modify_rq_mb_in *mbin;
4888 	struct mcx_cmd_modify_rq_out *out;
4889 	int error;
4890 	int token;
4891 
4892 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4893 	token = mcx_cmdq_token(sc);
4894 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4895 	    sizeof(*out), token);
4896 
4897 	in = mcx_cmdq_in(cqe);
4898 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_RQ);
4899 	in->cmd_op_mod = htobe16(0);
4900 	in->cmd_rq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | rx->rx_rqn);
4901 
4902 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
4903 	    &cqe->cq_input_ptr, token) != 0) {
4904 		printf("%s: unable to allocate modify rq mailbox\n",
4905 		    DEVNAME(sc));
4906 		return (-1);
4907 	}
4908 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
4909 	mbin->cmd_rq_ctx.rq_flags = htobe32(
4910 	    MCX_QUEUE_STATE_RDY << MCX_RQ_CTX_STATE_SHIFT);
4911 
4912 	mcx_cmdq_mboxes_sign(&mxm, 1);
4913 	mcx_cmdq_post(sc, cqe, 0);
4914 	error = mcx_cmdq_poll(sc, cqe, 1000);
4915 	if (error != 0) {
4916 		printf("%s: modify rq timeout\n", DEVNAME(sc));
4917 		goto free;
4918 	}
4919 	if (mcx_cmdq_verify(cqe) != 0) {
4920 		printf("%s: modify rq command corrupt\n", DEVNAME(sc));
4921 		goto free;
4922 	}
4923 
4924 	out = mcx_cmdq_out(cqe);
4925 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4926 		printf("%s: modify rq failed (%x, %x)\n", DEVNAME(sc),
4927 		    out->cmd_status, betoh32(out->cmd_syndrome));
4928 		error = -1;
4929 		goto free;
4930 	}
4931 
4932 free:
4933 	mcx_dmamem_free(sc, &mxm);
4934 	return (error);
4935 }
4936 
4937 static int
mcx_destroy_rq(struct mcx_softc * sc,struct mcx_rx * rx)4938 mcx_destroy_rq(struct mcx_softc *sc, struct mcx_rx *rx)
4939 {
4940 	struct mcx_cmdq_entry *cqe;
4941 	struct mcx_cmd_destroy_rq_in *in;
4942 	struct mcx_cmd_destroy_rq_out *out;
4943 	int error;
4944 	int token;
4945 
4946 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4947 	token = mcx_cmdq_token(sc);
4948 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
4949 
4950 	in = mcx_cmdq_in(cqe);
4951 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQ);
4952 	in->cmd_op_mod = htobe16(0);
4953 	in->cmd_rqn = htobe32(rx->rx_rqn);
4954 
4955 	mcx_cmdq_post(sc, cqe, 0);
4956 	error = mcx_cmdq_poll(sc, cqe, 1000);
4957 	if (error != 0) {
4958 		printf("%s: destroy rq timeout\n", DEVNAME(sc));
4959 		return error;
4960 	}
4961 	if (mcx_cmdq_verify(cqe) != 0) {
4962 		printf("%s: destroy rq command corrupt\n", DEVNAME(sc));
4963 		return error;
4964 	}
4965 
4966 	out = mcx_cmdq_out(cqe);
4967 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
4968 		printf("%s: destroy rq failed (%x, %x)\n", DEVNAME(sc),
4969 		    out->cmd_status, betoh32(out->cmd_syndrome));
4970 		return -1;
4971 	}
4972 
4973 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
4974 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
4975 
4976 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
4977 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
4978 	mcx_dmamem_free(sc, &rx->rx_rq_mem);
4979 
4980 	rx->rx_rqn = 0;
4981 	return 0;
4982 }
4983 
4984 static int
mcx_create_tir_direct(struct mcx_softc * sc,struct mcx_rx * rx,int * tirn)4985 mcx_create_tir_direct(struct mcx_softc *sc, struct mcx_rx *rx, int *tirn)
4986 {
4987 	struct mcx_cmdq_entry *cqe;
4988 	struct mcx_dmamem mxm;
4989 	struct mcx_cmd_create_tir_in *in;
4990 	struct mcx_cmd_create_tir_mb_in *mbin;
4991 	struct mcx_cmd_create_tir_out *out;
4992 	int error;
4993 	int token;
4994 
4995 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
4996 	token = mcx_cmdq_token(sc);
4997 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
4998 	    sizeof(*out), token);
4999 
5000 	in = mcx_cmdq_in(cqe);
5001 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
5002 	in->cmd_op_mod = htobe16(0);
5003 
5004 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5005 	    &cqe->cq_input_ptr, token) != 0) {
5006 		printf("%s: unable to allocate create tir mailbox\n",
5007 		    DEVNAME(sc));
5008 		return (-1);
5009 	}
5010 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5011 	/* leave disp_type = 0, so packets get sent to the inline rqn */
5012 	mbin->cmd_inline_rqn = htobe32(rx->rx_rqn);
5013 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5014 
5015 	mcx_cmdq_post(sc, cqe, 0);
5016 	error = mcx_cmdq_poll(sc, cqe, 1000);
5017 	if (error != 0) {
5018 		printf("%s: create tir timeout\n", DEVNAME(sc));
5019 		goto free;
5020 	}
5021 	if (mcx_cmdq_verify(cqe) != 0) {
5022 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
5023 		goto free;
5024 	}
5025 
5026 	out = mcx_cmdq_out(cqe);
5027 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5028 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5029 		    out->cmd_status, betoh32(out->cmd_syndrome));
5030 		error = -1;
5031 		goto free;
5032 	}
5033 
5034 	*tirn = mcx_get_id(out->cmd_tirn);
5035 free:
5036 	mcx_dmamem_free(sc, &mxm);
5037 	return (error);
5038 }
5039 
5040 static int
mcx_create_tir_indirect(struct mcx_softc * sc,int rqtn,uint32_t hash_sel,int * tirn)5041 mcx_create_tir_indirect(struct mcx_softc *sc, int rqtn, uint32_t hash_sel,
5042     int *tirn)
5043 {
5044 	struct mcx_cmdq_entry *cqe;
5045 	struct mcx_dmamem mxm;
5046 	struct mcx_cmd_create_tir_in *in;
5047 	struct mcx_cmd_create_tir_mb_in *mbin;
5048 	struct mcx_cmd_create_tir_out *out;
5049 	int error;
5050 	int token;
5051 
5052 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5053 	token = mcx_cmdq_token(sc);
5054 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5055 	    sizeof(*out), token);
5056 
5057 	in = mcx_cmdq_in(cqe);
5058 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIR);
5059 	in->cmd_op_mod = htobe16(0);
5060 
5061 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5062 	    &cqe->cq_input_ptr, token) != 0) {
5063 		printf("%s: unable to allocate create tir mailbox\n",
5064 		    DEVNAME(sc));
5065 		return (-1);
5066 	}
5067 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5068 	mbin->cmd_disp_type = htobe32(MCX_TIR_CTX_DISP_TYPE_INDIRECT
5069 	    << MCX_TIR_CTX_DISP_TYPE_SHIFT);
5070 	mbin->cmd_indir_table = htobe32(rqtn);
5071 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain |
5072 	    MCX_TIR_CTX_HASH_TOEPLITZ << MCX_TIR_CTX_HASH_SHIFT);
5073 	mbin->cmd_rx_hash_sel_outer = htobe32(hash_sel);
5074 	stoeplitz_to_key(&mbin->cmd_rx_hash_key,
5075 	    sizeof(mbin->cmd_rx_hash_key));
5076 
5077 	mcx_cmdq_post(sc, cqe, 0);
5078 	error = mcx_cmdq_poll(sc, cqe, 1000);
5079 	if (error != 0) {
5080 		printf("%s: create tir timeout\n", DEVNAME(sc));
5081 		goto free;
5082 	}
5083 	if (mcx_cmdq_verify(cqe) != 0) {
5084 		printf("%s: create tir command corrupt\n", DEVNAME(sc));
5085 		goto free;
5086 	}
5087 
5088 	out = mcx_cmdq_out(cqe);
5089 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5090 		printf("%s: create tir failed (%x, %x)\n", DEVNAME(sc),
5091 		    out->cmd_status, betoh32(out->cmd_syndrome));
5092 		error = -1;
5093 		goto free;
5094 	}
5095 
5096 	*tirn = mcx_get_id(out->cmd_tirn);
5097 free:
5098 	mcx_dmamem_free(sc, &mxm);
5099 	return (error);
5100 }
5101 
5102 static int
mcx_destroy_tir(struct mcx_softc * sc,int tirn)5103 mcx_destroy_tir(struct mcx_softc *sc, int tirn)
5104 {
5105 	struct mcx_cmdq_entry *cqe;
5106 	struct mcx_cmd_destroy_tir_in *in;
5107 	struct mcx_cmd_destroy_tir_out *out;
5108 	int error;
5109 	int token;
5110 
5111 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5112 	token = mcx_cmdq_token(sc);
5113 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5114 
5115 	in = mcx_cmdq_in(cqe);
5116 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIR);
5117 	in->cmd_op_mod = htobe16(0);
5118 	in->cmd_tirn = htobe32(tirn);
5119 
5120 	mcx_cmdq_post(sc, cqe, 0);
5121 	error = mcx_cmdq_poll(sc, cqe, 1000);
5122 	if (error != 0) {
5123 		printf("%s: destroy tir timeout\n", DEVNAME(sc));
5124 		return error;
5125 	}
5126 	if (mcx_cmdq_verify(cqe) != 0) {
5127 		printf("%s: destroy tir command corrupt\n", DEVNAME(sc));
5128 		return error;
5129 	}
5130 
5131 	out = mcx_cmdq_out(cqe);
5132 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5133 		printf("%s: destroy tir failed (%x, %x)\n", DEVNAME(sc),
5134 		    out->cmd_status, betoh32(out->cmd_syndrome));
5135 		return -1;
5136 	}
5137 
5138 	return (0);
5139 }
5140 
5141 static int
mcx_create_sq(struct mcx_softc * sc,struct mcx_tx * tx,int uar,int db,int cqn)5142 mcx_create_sq(struct mcx_softc *sc, struct mcx_tx *tx, int uar, int db,
5143     int cqn)
5144 {
5145 	struct mcx_cmdq_entry *cqe;
5146 	struct mcx_dmamem mxm;
5147 	struct mcx_cmd_create_sq_in *in;
5148 	struct mcx_sq_ctx *mbin;
5149 	struct mcx_cmd_create_sq_out *out;
5150 	int error;
5151 	uint64_t *pas;
5152 	int insize, npages, paslen, token;
5153 
5154 	tx->tx_doorbell = MCX_WQ_DOORBELL_BASE +
5155 	    (db * MCX_WQ_DOORBELL_STRIDE) + 4;
5156 
5157 	npages = howmany((1 << MCX_LOG_SQ_SIZE) * sizeof(struct mcx_sq_entry),
5158 	    MCX_PAGE_SIZE);
5159 	paslen = npages * sizeof(*pas);
5160 	insize = sizeof(struct mcx_sq_ctx) + paslen;
5161 
5162 	if (mcx_dmamem_alloc(sc, &tx->tx_sq_mem, npages * MCX_PAGE_SIZE,
5163 	    MCX_PAGE_SIZE) != 0) {
5164 		printf("%s: unable to allocate send queue memory\n",
5165 		    DEVNAME(sc));
5166 		return (-1);
5167 	}
5168 
5169 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5170 	token = mcx_cmdq_token(sc);
5171 	mcx_cmdq_init(sc, cqe, sizeof(*in) + insize + paslen, sizeof(*out),
5172 	    token);
5173 
5174 	in = mcx_cmdq_in(cqe);
5175 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_SQ);
5176 	in->cmd_op_mod = htobe16(0);
5177 
5178 	if (mcx_cmdq_mboxes_alloc(sc, &mxm,
5179 	    howmany(insize, MCX_CMDQ_MAILBOX_DATASIZE),
5180 	    &cqe->cq_input_ptr, token) != 0) {
5181 		printf("%s: unable to allocate create sq mailboxen\n",
5182 		    DEVNAME(sc));
5183 		goto free_sq;
5184 	}
5185 	mbin = (struct mcx_sq_ctx *)
5186 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0))) + 0x10);
5187 	mbin->sq_flags = htobe32(MCX_SQ_CTX_RLKEY |
5188 	    (1 << MCX_SQ_CTX_MIN_WQE_INLINE_SHIFT));
5189 	mbin->sq_cqn = htobe32(cqn);
5190 	mbin->sq_tis_lst_sz = htobe32(1 << MCX_SQ_CTX_TIS_LST_SZ_SHIFT);
5191 	mbin->sq_tis_num = htobe32(sc->sc_tis);
5192 	mbin->sq_wq.wq_type = MCX_WQ_CTX_TYPE_CYCLIC;
5193 	mbin->sq_wq.wq_pd = htobe32(sc->sc_pd);
5194 	mbin->sq_wq.wq_uar_page = htobe32(uar);
5195 	mbin->sq_wq.wq_doorbell = htobe64(MCX_DMA_DVA(&sc->sc_doorbell_mem) +
5196 	    tx->tx_doorbell);
5197 	mbin->sq_wq.wq_log_stride = htobe16(MCX_LOG_SQ_ENTRY_SIZE);
5198 	mbin->sq_wq.wq_log_size = MCX_LOG_SQ_SIZE;
5199 
5200 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5201 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
5202 
5203 	/* physical addresses follow the mailbox in data */
5204 	mcx_cmdq_mboxes_pas(&mxm, sizeof(*mbin) + 0x10,
5205 	    npages, &tx->tx_sq_mem);
5206 	mcx_cmdq_post(sc, cqe, 0);
5207 
5208 	error = mcx_cmdq_poll(sc, cqe, 1000);
5209 	if (error != 0) {
5210 		printf("%s: create sq timeout\n", DEVNAME(sc));
5211 		goto free_mxm;
5212 	}
5213 	if (mcx_cmdq_verify(cqe) != 0) {
5214 		printf("%s: create sq command corrupt\n", DEVNAME(sc));
5215 		goto free_mxm;
5216 	}
5217 
5218 	out = mcx_cmdq_out(cqe);
5219 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5220 		printf("%s: create sq failed (%x, %x)\n", DEVNAME(sc),
5221 		    out->cmd_status, betoh32(out->cmd_syndrome));
5222 		goto free_mxm;
5223 	}
5224 
5225 	tx->tx_uar = uar;
5226 	tx->tx_sqn = mcx_get_id(out->cmd_sqn);
5227 
5228 	mcx_dmamem_free(sc, &mxm);
5229 
5230 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5231 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
5232 
5233 	return (0);
5234 
5235 free_mxm:
5236 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5237 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5238 	mcx_dmamem_free(sc, &mxm);
5239 free_sq:
5240 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
5241 	return (-1);
5242 }
5243 
5244 static int
mcx_destroy_sq(struct mcx_softc * sc,struct mcx_tx * tx)5245 mcx_destroy_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5246 {
5247 	struct mcx_cmdq_entry *cqe;
5248 	struct mcx_cmd_destroy_sq_in *in;
5249 	struct mcx_cmd_destroy_sq_out *out;
5250 	int error;
5251 	int token;
5252 
5253 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5254 	token = mcx_cmdq_token(sc);
5255 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5256 
5257 	in = mcx_cmdq_in(cqe);
5258 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_SQ);
5259 	in->cmd_op_mod = htobe16(0);
5260 	in->cmd_sqn = htobe32(tx->tx_sqn);
5261 
5262 	mcx_cmdq_post(sc, cqe, 0);
5263 	error = mcx_cmdq_poll(sc, cqe, 1000);
5264 	if (error != 0) {
5265 		printf("%s: destroy sq timeout\n", DEVNAME(sc));
5266 		return error;
5267 	}
5268 	if (mcx_cmdq_verify(cqe) != 0) {
5269 		printf("%s: destroy sq command corrupt\n", DEVNAME(sc));
5270 		return error;
5271 	}
5272 
5273 	out = mcx_cmdq_out(cqe);
5274 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5275 		printf("%s: destroy sq failed (%x, %x)\n", DEVNAME(sc),
5276 		    out->cmd_status, betoh32(out->cmd_syndrome));
5277 		return -1;
5278 	}
5279 
5280 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
5281 	    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
5282 
5283 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
5284 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
5285 	mcx_dmamem_free(sc, &tx->tx_sq_mem);
5286 
5287 	tx->tx_sqn = 0;
5288 	return 0;
5289 }
5290 
5291 static int
mcx_ready_sq(struct mcx_softc * sc,struct mcx_tx * tx)5292 mcx_ready_sq(struct mcx_softc *sc, struct mcx_tx *tx)
5293 {
5294 	struct mcx_cmdq_entry *cqe;
5295 	struct mcx_dmamem mxm;
5296 	struct mcx_cmd_modify_sq_in *in;
5297 	struct mcx_cmd_modify_sq_mb_in *mbin;
5298 	struct mcx_cmd_modify_sq_out *out;
5299 	int error;
5300 	int token;
5301 
5302 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5303 	token = mcx_cmdq_token(sc);
5304 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5305 	    sizeof(*out), token);
5306 
5307 	in = mcx_cmdq_in(cqe);
5308 	in->cmd_opcode = htobe16(MCX_CMD_MODIFY_SQ);
5309 	in->cmd_op_mod = htobe16(0);
5310 	in->cmd_sq_state = htobe32((MCX_QUEUE_STATE_RST << 28) | tx->tx_sqn);
5311 
5312 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5313 	    &cqe->cq_input_ptr, token) != 0) {
5314 		printf("%s: unable to allocate modify sq mailbox\n",
5315 		    DEVNAME(sc));
5316 		return (-1);
5317 	}
5318 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5319 	mbin->cmd_sq_ctx.sq_flags = htobe32(
5320 	    MCX_QUEUE_STATE_RDY << MCX_SQ_CTX_STATE_SHIFT);
5321 
5322 	mcx_cmdq_mboxes_sign(&mxm, 1);
5323 	mcx_cmdq_post(sc, cqe, 0);
5324 	error = mcx_cmdq_poll(sc, cqe, 1000);
5325 	if (error != 0) {
5326 		printf("%s: modify sq timeout\n", DEVNAME(sc));
5327 		goto free;
5328 	}
5329 	if (mcx_cmdq_verify(cqe) != 0) {
5330 		printf("%s: modify sq command corrupt\n", DEVNAME(sc));
5331 		goto free;
5332 	}
5333 
5334 	out = mcx_cmdq_out(cqe);
5335 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5336 		printf("%s: modify sq failed (%x, %x)\n", DEVNAME(sc),
5337 		    out->cmd_status, betoh32(out->cmd_syndrome));
5338 		error = -1;
5339 		goto free;
5340 	}
5341 
5342 free:
5343 	mcx_dmamem_free(sc, &mxm);
5344 	return (error);
5345 }
5346 
5347 static int
mcx_create_tis(struct mcx_softc * sc,int * tis)5348 mcx_create_tis(struct mcx_softc *sc, int *tis)
5349 {
5350 	struct mcx_cmdq_entry *cqe;
5351 	struct mcx_dmamem mxm;
5352 	struct mcx_cmd_create_tis_in *in;
5353 	struct mcx_cmd_create_tis_mb_in *mbin;
5354 	struct mcx_cmd_create_tis_out *out;
5355 	int error;
5356 	int token;
5357 
5358 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5359 	token = mcx_cmdq_token(sc);
5360 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5361 	    sizeof(*out), token);
5362 
5363 	in = mcx_cmdq_in(cqe);
5364 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_TIS);
5365 	in->cmd_op_mod = htobe16(0);
5366 
5367 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5368 	    &cqe->cq_input_ptr, token) != 0) {
5369 		printf("%s: unable to allocate create tis mailbox\n",
5370 		    DEVNAME(sc));
5371 		return (-1);
5372 	}
5373 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5374 	mbin->cmd_tdomain = htobe32(sc->sc_tdomain);
5375 
5376 	mcx_cmdq_mboxes_sign(&mxm, 1);
5377 	mcx_cmdq_post(sc, cqe, 0);
5378 	error = mcx_cmdq_poll(sc, cqe, 1000);
5379 	if (error != 0) {
5380 		printf("%s: create tis timeout\n", DEVNAME(sc));
5381 		goto free;
5382 	}
5383 	if (mcx_cmdq_verify(cqe) != 0) {
5384 		printf("%s: create tis command corrupt\n", DEVNAME(sc));
5385 		goto free;
5386 	}
5387 
5388 	out = mcx_cmdq_out(cqe);
5389 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5390 		printf("%s: create tis failed (%x, %x)\n", DEVNAME(sc),
5391 		    out->cmd_status, betoh32(out->cmd_syndrome));
5392 		error = -1;
5393 		goto free;
5394 	}
5395 
5396 	*tis = mcx_get_id(out->cmd_tisn);
5397 free:
5398 	mcx_dmamem_free(sc, &mxm);
5399 	return (error);
5400 }
5401 
5402 static int
mcx_destroy_tis(struct mcx_softc * sc,int tis)5403 mcx_destroy_tis(struct mcx_softc *sc, int tis)
5404 {
5405 	struct mcx_cmdq_entry *cqe;
5406 	struct mcx_cmd_destroy_tis_in *in;
5407 	struct mcx_cmd_destroy_tis_out *out;
5408 	int error;
5409 	int token;
5410 
5411 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5412 	token = mcx_cmdq_token(sc);
5413 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5414 
5415 	in = mcx_cmdq_in(cqe);
5416 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_TIS);
5417 	in->cmd_op_mod = htobe16(0);
5418 	in->cmd_tisn = htobe32(tis);
5419 
5420 	mcx_cmdq_post(sc, cqe, 0);
5421 	error = mcx_cmdq_poll(sc, cqe, 1000);
5422 	if (error != 0) {
5423 		printf("%s: destroy tis timeout\n", DEVNAME(sc));
5424 		return error;
5425 	}
5426 	if (mcx_cmdq_verify(cqe) != 0) {
5427 		printf("%s: destroy tis command corrupt\n", DEVNAME(sc));
5428 		return error;
5429 	}
5430 
5431 	out = mcx_cmdq_out(cqe);
5432 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5433 		printf("%s: destroy tis failed (%x, %x)\n", DEVNAME(sc),
5434 		    out->cmd_status, betoh32(out->cmd_syndrome));
5435 		return -1;
5436 	}
5437 
5438 	return 0;
5439 }
5440 
5441 static int
mcx_create_rqt(struct mcx_softc * sc,int size,int * rqns,int * rqt)5442 mcx_create_rqt(struct mcx_softc *sc, int size, int *rqns, int *rqt)
5443 {
5444 	struct mcx_cmdq_entry *cqe;
5445 	struct mcx_dmamem mxm;
5446 	struct mcx_cmd_create_rqt_in *in;
5447 	struct mcx_cmd_create_rqt_mb_in *mbin;
5448 	struct mcx_cmd_create_rqt_out *out;
5449 	struct mcx_rqt_ctx *rqt_ctx;
5450 	int *rqtn;
5451 	int error;
5452 	int token;
5453 	int i;
5454 
5455 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5456 	token = mcx_cmdq_token(sc);
5457 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) +
5458 	    (size * sizeof(int)), sizeof(*out), token);
5459 
5460 	in = mcx_cmdq_in(cqe);
5461 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_RQT);
5462 	in->cmd_op_mod = htobe16(0);
5463 
5464 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5465 	    &cqe->cq_input_ptr, token) != 0) {
5466 		printf("%s: unable to allocate create rqt mailbox\n",
5467 		    DEVNAME(sc));
5468 		return (-1);
5469 	}
5470 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5471 	rqt_ctx = &mbin->cmd_rqt;
5472 	rqt_ctx->cmd_rqt_max_size = htobe16(sc->sc_max_rqt_size);
5473 	rqt_ctx->cmd_rqt_actual_size = htobe16(size);
5474 
5475 	/* rqt list follows the rqt context */
5476 	rqtn = (int *)(rqt_ctx + 1);
5477 	for (i = 0; i < size; i++) {
5478 		rqtn[i] = htobe32(rqns[i]);
5479 	}
5480 
5481 	mcx_cmdq_mboxes_sign(&mxm, 1);
5482 	mcx_cmdq_post(sc, cqe, 0);
5483 	error = mcx_cmdq_poll(sc, cqe, 1000);
5484 	if (error != 0) {
5485 		printf("%s: create rqt timeout\n", DEVNAME(sc));
5486 		goto free;
5487 	}
5488 	if (mcx_cmdq_verify(cqe) != 0) {
5489 		printf("%s: create rqt command corrupt\n", DEVNAME(sc));
5490 		goto free;
5491 	}
5492 
5493 	out = mcx_cmdq_out(cqe);
5494 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5495 		printf("%s: create rqt failed (%x, %x)\n", DEVNAME(sc),
5496 		    out->cmd_status, betoh32(out->cmd_syndrome));
5497 		error = -1;
5498 		goto free;
5499 	}
5500 
5501 	*rqt = mcx_get_id(out->cmd_rqtn);
5502 	return (0);
5503 free:
5504 	mcx_dmamem_free(sc, &mxm);
5505 	return (error);
5506 }
5507 
5508 static int
mcx_destroy_rqt(struct mcx_softc * sc,int rqt)5509 mcx_destroy_rqt(struct mcx_softc *sc, int rqt)
5510 {
5511 	struct mcx_cmdq_entry *cqe;
5512 	struct mcx_cmd_destroy_rqt_in *in;
5513 	struct mcx_cmd_destroy_rqt_out *out;
5514 	int error;
5515 	int token;
5516 
5517 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5518 	token = mcx_cmdq_token(sc);
5519 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), token);
5520 
5521 	in = mcx_cmdq_in(cqe);
5522 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_RQT);
5523 	in->cmd_op_mod = htobe16(0);
5524 	in->cmd_rqtn = htobe32(rqt);
5525 
5526 	mcx_cmdq_post(sc, cqe, 0);
5527 	error = mcx_cmdq_poll(sc, cqe, 1000);
5528 	if (error != 0) {
5529 		printf("%s: destroy rqt timeout\n", DEVNAME(sc));
5530 		return error;
5531 	}
5532 	if (mcx_cmdq_verify(cqe) != 0) {
5533 		printf("%s: destroy rqt command corrupt\n", DEVNAME(sc));
5534 		return error;
5535 	}
5536 
5537 	out = mcx_cmdq_out(cqe);
5538 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5539 		printf("%s: destroy rqt failed (%x, %x)\n", DEVNAME(sc),
5540 		    out->cmd_status, betoh32(out->cmd_syndrome));
5541 		return -1;
5542 	}
5543 
5544 	return 0;
5545 }
5546 
5547 #if 0
5548 static int
5549 mcx_alloc_flow_counter(struct mcx_softc *sc, int i)
5550 {
5551 	struct mcx_cmdq_entry *cqe;
5552 	struct mcx_cmd_alloc_flow_counter_in *in;
5553 	struct mcx_cmd_alloc_flow_counter_out *out;
5554 	int error;
5555 
5556 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5557 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out), mcx_cmdq_token(sc));
5558 
5559 	in = mcx_cmdq_in(cqe);
5560 	in->cmd_opcode = htobe16(MCX_CMD_ALLOC_FLOW_COUNTER);
5561 	in->cmd_op_mod = htobe16(0);
5562 
5563 	mcx_cmdq_post(sc, cqe, 0);
5564 
5565 	error = mcx_cmdq_poll(sc, cqe, 1000);
5566 	if (error != 0) {
5567 		printf("%s: alloc flow counter timeout\n", DEVNAME(sc));
5568 		return (-1);
5569 	}
5570 	if (mcx_cmdq_verify(cqe) != 0) {
5571 		printf("%s: alloc flow counter command corrupt\n", DEVNAME(sc));
5572 		return (-1);
5573 	}
5574 
5575 	out = (struct mcx_cmd_alloc_flow_counter_out *)cqe->cq_output_data;
5576 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5577 		printf("%s: alloc flow counter failed (%x)\n", DEVNAME(sc),
5578 		    out->cmd_status);
5579 		return (-1);
5580 	}
5581 
5582 	sc->sc_flow_counter_id[i]  = betoh16(out->cmd_flow_counter_id);
5583 	printf("flow counter id %d = %d\n", i, sc->sc_flow_counter_id[i]);
5584 
5585 	return (0);
5586 }
5587 #endif
5588 
5589 static int
mcx_create_flow_table(struct mcx_softc * sc,int log_size,int level,int * flow_table_id)5590 mcx_create_flow_table(struct mcx_softc *sc, int log_size, int level,
5591     int *flow_table_id)
5592 {
5593 	struct mcx_cmdq_entry *cqe;
5594 	struct mcx_dmamem mxm;
5595 	struct mcx_cmd_create_flow_table_in *in;
5596 	struct mcx_cmd_create_flow_table_mb_in *mbin;
5597 	struct mcx_cmd_create_flow_table_out *out;
5598 	int error;
5599 	int token;
5600 
5601 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5602 	token = mcx_cmdq_token(sc);
5603 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5604 	    sizeof(*out), token);
5605 
5606 	in = mcx_cmdq_in(cqe);
5607 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_TABLE);
5608 	in->cmd_op_mod = htobe16(0);
5609 
5610 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5611 	    &cqe->cq_input_ptr, token) != 0) {
5612 		printf("%s: unable to allocate create flow table mailbox\n",
5613 		    DEVNAME(sc));
5614 		return (-1);
5615 	}
5616 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5617 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5618 	mbin->cmd_ctx.ft_log_size = log_size;
5619 	mbin->cmd_ctx.ft_level = level;
5620 
5621 	mcx_cmdq_mboxes_sign(&mxm, 1);
5622 	mcx_cmdq_post(sc, cqe, 0);
5623 	error = mcx_cmdq_poll(sc, cqe, 1000);
5624 	if (error != 0) {
5625 		printf("%s: create flow table timeout\n", DEVNAME(sc));
5626 		goto free;
5627 	}
5628 	if (mcx_cmdq_verify(cqe) != 0) {
5629 		printf("%s: create flow table command corrupt\n", DEVNAME(sc));
5630 		goto free;
5631 	}
5632 
5633 	out = mcx_cmdq_out(cqe);
5634 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5635 		printf("%s: create flow table failed (%x, %x)\n", DEVNAME(sc),
5636 		    out->cmd_status, betoh32(out->cmd_syndrome));
5637 		error = -1;
5638 		goto free;
5639 	}
5640 
5641 	*flow_table_id = mcx_get_id(out->cmd_table_id);
5642 free:
5643 	mcx_dmamem_free(sc, &mxm);
5644 	return (error);
5645 }
5646 
5647 static int
mcx_set_flow_table_root(struct mcx_softc * sc,int flow_table_id)5648 mcx_set_flow_table_root(struct mcx_softc *sc, int flow_table_id)
5649 {
5650 	struct mcx_cmdq_entry *cqe;
5651 	struct mcx_dmamem mxm;
5652 	struct mcx_cmd_set_flow_table_root_in *in;
5653 	struct mcx_cmd_set_flow_table_root_mb_in *mbin;
5654 	struct mcx_cmd_set_flow_table_root_out *out;
5655 	int error;
5656 	int token;
5657 
5658 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5659 	token = mcx_cmdq_token(sc);
5660 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
5661 	    sizeof(*out), token);
5662 
5663 	in = mcx_cmdq_in(cqe);
5664 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ROOT);
5665 	in->cmd_op_mod = htobe16(0);
5666 
5667 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5668 	    &cqe->cq_input_ptr, token) != 0) {
5669 		printf("%s: unable to allocate set flow table root mailbox\n",
5670 		    DEVNAME(sc));
5671 		return (-1);
5672 	}
5673 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5674 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5675 	mbin->cmd_table_id = htobe32(flow_table_id);
5676 
5677 	mcx_cmdq_mboxes_sign(&mxm, 1);
5678 	mcx_cmdq_post(sc, cqe, 0);
5679 	error = mcx_cmdq_poll(sc, cqe, 1000);
5680 	if (error != 0) {
5681 		printf("%s: set flow table root timeout\n", DEVNAME(sc));
5682 		goto free;
5683 	}
5684 	if (mcx_cmdq_verify(cqe) != 0) {
5685 		printf("%s: set flow table root command corrupt\n",
5686 		    DEVNAME(sc));
5687 		goto free;
5688 	}
5689 
5690 	out = mcx_cmdq_out(cqe);
5691 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5692 		printf("%s: set flow table root failed (%x, %x)\n",
5693 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
5694 		error = -1;
5695 		goto free;
5696 	}
5697 
5698 free:
5699 	mcx_dmamem_free(sc, &mxm);
5700 	return (error);
5701 }
5702 
5703 static int
mcx_destroy_flow_table(struct mcx_softc * sc,int flow_table_id)5704 mcx_destroy_flow_table(struct mcx_softc *sc, int flow_table_id)
5705 {
5706 	struct mcx_cmdq_entry *cqe;
5707 	struct mcx_dmamem mxm;
5708 	struct mcx_cmd_destroy_flow_table_in *in;
5709 	struct mcx_cmd_destroy_flow_table_mb_in *mb;
5710 	struct mcx_cmd_destroy_flow_table_out *out;
5711 	int error;
5712 	int token;
5713 
5714 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5715 	token = mcx_cmdq_token(sc);
5716 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5717 
5718 	in = mcx_cmdq_in(cqe);
5719 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_TABLE);
5720 	in->cmd_op_mod = htobe16(0);
5721 
5722 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
5723 	    &cqe->cq_input_ptr, token) != 0) {
5724 		printf("%s: unable to allocate destroy flow table mailbox\n",
5725 		    DEVNAME(sc));
5726 		return (-1);
5727 	}
5728 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5729 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5730 	mb->cmd_table_id = htobe32(flow_table_id);
5731 
5732 	mcx_cmdq_mboxes_sign(&mxm, 1);
5733 	mcx_cmdq_post(sc, cqe, 0);
5734 	error = mcx_cmdq_poll(sc, cqe, 1000);
5735 	if (error != 0) {
5736 		printf("%s: destroy flow table timeout\n", DEVNAME(sc));
5737 		goto free;
5738 	}
5739 	if (mcx_cmdq_verify(cqe) != 0) {
5740 		printf("%s: destroy flow table command corrupt\n",
5741 		    DEVNAME(sc));
5742 		goto free;
5743 	}
5744 
5745 	out = mcx_cmdq_out(cqe);
5746 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5747 		printf("%s: destroy flow table failed (%x, %x)\n", DEVNAME(sc),
5748 		    out->cmd_status, betoh32(out->cmd_syndrome));
5749 		error = -1;
5750 		goto free;
5751 	}
5752 
5753 free:
5754 	mcx_dmamem_free(sc, &mxm);
5755 	return (error);
5756 }
5757 
5758 
5759 static int
mcx_create_flow_group(struct mcx_softc * sc,int flow_table_id,int group,int start,int size,int match_enable,struct mcx_flow_match * match)5760 mcx_create_flow_group(struct mcx_softc *sc, int flow_table_id, int group,
5761     int start, int size, int match_enable, struct mcx_flow_match *match)
5762 {
5763 	struct mcx_cmdq_entry *cqe;
5764 	struct mcx_dmamem mxm;
5765 	struct mcx_cmd_create_flow_group_in *in;
5766 	struct mcx_cmd_create_flow_group_mb_in *mbin;
5767 	struct mcx_cmd_create_flow_group_out *out;
5768 	struct mcx_flow_group *mfg;
5769 	int error;
5770 	int token;
5771 
5772 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5773 	token = mcx_cmdq_token(sc);
5774 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
5775 	    token);
5776 
5777 	in = mcx_cmdq_in(cqe);
5778 	in->cmd_opcode = htobe16(MCX_CMD_CREATE_FLOW_GROUP);
5779 	in->cmd_op_mod = htobe16(0);
5780 
5781 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5782 	    != 0) {
5783 		printf("%s: unable to allocate create flow group mailbox\n",
5784 		    DEVNAME(sc));
5785 		return (-1);
5786 	}
5787 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5788 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5789 	mbin->cmd_table_id = htobe32(flow_table_id);
5790 	mbin->cmd_start_flow_index = htobe32(start);
5791 	mbin->cmd_end_flow_index = htobe32(start + (size - 1));
5792 
5793 	mbin->cmd_match_criteria_enable = match_enable;
5794 	memcpy(&mbin->cmd_match_criteria, match, sizeof(*match));
5795 
5796 	mcx_cmdq_mboxes_sign(&mxm, 2);
5797 	mcx_cmdq_post(sc, cqe, 0);
5798 	error = mcx_cmdq_poll(sc, cqe, 1000);
5799 	if (error != 0) {
5800 		printf("%s: create flow group timeout\n", DEVNAME(sc));
5801 		goto free;
5802 	}
5803 	if (mcx_cmdq_verify(cqe) != 0) {
5804 		printf("%s: create flow group command corrupt\n", DEVNAME(sc));
5805 		goto free;
5806 	}
5807 
5808 	out = mcx_cmdq_out(cqe);
5809 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5810 		printf("%s: create flow group failed (%x, %x)\n", DEVNAME(sc),
5811 		    out->cmd_status, betoh32(out->cmd_syndrome));
5812 		error = -1;
5813 		goto free;
5814 	}
5815 
5816 	mfg = &sc->sc_flow_group[group];
5817 	mfg->g_id = mcx_get_id(out->cmd_group_id);
5818 	mfg->g_table = flow_table_id;
5819 	mfg->g_start = start;
5820 	mfg->g_size = size;
5821 
5822 free:
5823 	mcx_dmamem_free(sc, &mxm);
5824 	return (error);
5825 }
5826 
5827 static int
mcx_destroy_flow_group(struct mcx_softc * sc,int group)5828 mcx_destroy_flow_group(struct mcx_softc *sc, int group)
5829 {
5830 	struct mcx_cmdq_entry *cqe;
5831 	struct mcx_dmamem mxm;
5832 	struct mcx_cmd_destroy_flow_group_in *in;
5833 	struct mcx_cmd_destroy_flow_group_mb_in *mb;
5834 	struct mcx_cmd_destroy_flow_group_out *out;
5835 	struct mcx_flow_group *mfg;
5836 	int error;
5837 	int token;
5838 
5839 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5840 	token = mcx_cmdq_token(sc);
5841 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mb), sizeof(*out), token);
5842 
5843 	in = mcx_cmdq_in(cqe);
5844 	in->cmd_opcode = htobe16(MCX_CMD_DESTROY_FLOW_GROUP);
5845 	in->cmd_op_mod = htobe16(0);
5846 
5847 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
5848 	    &cqe->cq_input_ptr, token) != 0) {
5849 		printf("%s: unable to allocate destroy flow group mailbox\n",
5850 		    DEVNAME(sc));
5851 		return (-1);
5852 	}
5853 	mb = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5854 	mb->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5855 	mfg = &sc->sc_flow_group[group];
5856 	mb->cmd_table_id = htobe32(mfg->g_table);
5857 	mb->cmd_group_id = htobe32(mfg->g_id);
5858 
5859 	mcx_cmdq_mboxes_sign(&mxm, 2);
5860 	mcx_cmdq_post(sc, cqe, 0);
5861 	error = mcx_cmdq_poll(sc, cqe, 1000);
5862 	if (error != 0) {
5863 		printf("%s: destroy flow group timeout\n", DEVNAME(sc));
5864 		goto free;
5865 	}
5866 	if (mcx_cmdq_verify(cqe) != 0) {
5867 		printf("%s: destroy flow group command corrupt\n", DEVNAME(sc));
5868 		goto free;
5869 	}
5870 
5871 	out = mcx_cmdq_out(cqe);
5872 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5873 		printf("%s: destroy flow group failed (%x, %x)\n", DEVNAME(sc),
5874 		    out->cmd_status, betoh32(out->cmd_syndrome));
5875 		error = -1;
5876 		goto free;
5877 	}
5878 
5879 	mfg->g_id = -1;
5880 	mfg->g_table = -1;
5881 	mfg->g_size = 0;
5882 	mfg->g_start = 0;
5883 free:
5884 	mcx_dmamem_free(sc, &mxm);
5885 	return (error);
5886 }
5887 
5888 static int
mcx_set_flow_table_entry_mac(struct mcx_softc * sc,int group,int index,uint8_t * macaddr,uint32_t dest)5889 mcx_set_flow_table_entry_mac(struct mcx_softc *sc, int group, int index,
5890     uint8_t *macaddr, uint32_t dest)
5891 {
5892 	struct mcx_cmdq_entry *cqe;
5893 	struct mcx_dmamem mxm;
5894 	struct mcx_cmd_set_flow_table_entry_in *in;
5895 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5896 	struct mcx_cmd_set_flow_table_entry_out *out;
5897 	struct mcx_flow_group *mfg;
5898 	uint32_t *pdest;
5899 	int error;
5900 	int token;
5901 
5902 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5903 	token = mcx_cmdq_token(sc);
5904 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5905 	    sizeof(*out), token);
5906 
5907 	in = mcx_cmdq_in(cqe);
5908 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5909 	in->cmd_op_mod = htobe16(0);
5910 
5911 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5912 	    != 0) {
5913 		printf("%s: unable to allocate set flow table entry mailbox\n",
5914 		    DEVNAME(sc));
5915 		return (-1);
5916 	}
5917 
5918 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5919 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5920 
5921 	mfg = &sc->sc_flow_group[group];
5922 	mbin->cmd_table_id = htobe32(mfg->g_table);
5923 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
5924 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
5925 
5926 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
5927 	pdest = (uint32_t *)
5928 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
5929 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
5930 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
5931 	*pdest = htobe32(dest);
5932 
5933 	/* the only thing we match on at the moment is the dest mac address */
5934 	if (macaddr != NULL) {
5935 		memcpy(mbin->cmd_flow_ctx.fc_match_value.mc_dest_mac, macaddr,
5936 		    ETHER_ADDR_LEN);
5937 	}
5938 
5939 	mcx_cmdq_mboxes_sign(&mxm, 2);
5940 	mcx_cmdq_post(sc, cqe, 0);
5941 	error = mcx_cmdq_poll(sc, cqe, 1000);
5942 	if (error != 0) {
5943 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
5944 		goto free;
5945 	}
5946 	if (mcx_cmdq_verify(cqe) != 0) {
5947 		printf("%s: set flow table entry command corrupt\n",
5948 		    DEVNAME(sc));
5949 		goto free;
5950 	}
5951 
5952 	out = mcx_cmdq_out(cqe);
5953 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
5954 		printf("%s: set flow table entry failed (%x, %x)\n",
5955 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
5956 		error = -1;
5957 		goto free;
5958 	}
5959 
5960 free:
5961 	mcx_dmamem_free(sc, &mxm);
5962 	return (error);
5963 }
5964 
5965 static int
mcx_set_flow_table_entry_proto(struct mcx_softc * sc,int group,int index,int ethertype,int ip_proto,uint32_t dest)5966 mcx_set_flow_table_entry_proto(struct mcx_softc *sc, int group, int index,
5967     int ethertype, int ip_proto, uint32_t dest)
5968 {
5969 	struct mcx_cmdq_entry *cqe;
5970 	struct mcx_dmamem mxm;
5971 	struct mcx_cmd_set_flow_table_entry_in *in;
5972 	struct mcx_cmd_set_flow_table_entry_mb_in *mbin;
5973 	struct mcx_cmd_set_flow_table_entry_out *out;
5974 	struct mcx_flow_group *mfg;
5975 	uint32_t *pdest;
5976 	int error;
5977 	int token;
5978 
5979 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
5980 	token = mcx_cmdq_token(sc);
5981 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin) + sizeof(*pdest),
5982 	    sizeof(*out), token);
5983 
5984 	in = mcx_cmdq_in(cqe);
5985 	in->cmd_opcode = htobe16(MCX_CMD_SET_FLOW_TABLE_ENTRY);
5986 	in->cmd_op_mod = htobe16(0);
5987 
5988 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2, &cqe->cq_input_ptr, token)
5989 	    != 0) {
5990 		printf("%s: unable to allocate set flow table entry mailbox\n",
5991 		    DEVNAME(sc));
5992 		return (-1);
5993 	}
5994 
5995 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
5996 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
5997 
5998 	mfg = &sc->sc_flow_group[group];
5999 	mbin->cmd_table_id = htobe32(mfg->g_table);
6000 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
6001 	mbin->cmd_flow_ctx.fc_group_id = htobe32(mfg->g_id);
6002 
6003 	/* flow context ends at offset 0x330, 0x130 into the second mbox */
6004 	pdest = (uint32_t *)
6005 	    (((char *)mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1))) + 0x130);
6006 	mbin->cmd_flow_ctx.fc_action = htobe32(MCX_FLOW_CONTEXT_ACTION_FORWARD);
6007 	mbin->cmd_flow_ctx.fc_dest_list_size = htobe32(1);
6008 	*pdest = htobe32(dest);
6009 
6010 	mbin->cmd_flow_ctx.fc_match_value.mc_ethertype = htobe16(ethertype);
6011 	mbin->cmd_flow_ctx.fc_match_value.mc_ip_proto = ip_proto;
6012 
6013 	mcx_cmdq_mboxes_sign(&mxm, 2);
6014 	mcx_cmdq_post(sc, cqe, 0);
6015 	error = mcx_cmdq_poll(sc, cqe, 1000);
6016 	if (error != 0) {
6017 		printf("%s: set flow table entry timeout\n", DEVNAME(sc));
6018 		goto free;
6019 	}
6020 	if (mcx_cmdq_verify(cqe) != 0) {
6021 		printf("%s: set flow table entry command corrupt\n",
6022 		    DEVNAME(sc));
6023 		goto free;
6024 	}
6025 
6026 	out = mcx_cmdq_out(cqe);
6027 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6028 		printf("%s: set flow table entry failed (%x, %x)\n",
6029 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6030 		error = -1;
6031 		goto free;
6032 	}
6033 
6034 free:
6035 	mcx_dmamem_free(sc, &mxm);
6036 	return (error);
6037 }
6038 
6039 static int
mcx_delete_flow_table_entry(struct mcx_softc * sc,int group,int index)6040 mcx_delete_flow_table_entry(struct mcx_softc *sc, int group, int index)
6041 {
6042 	struct mcx_cmdq_entry *cqe;
6043 	struct mcx_dmamem mxm;
6044 	struct mcx_cmd_delete_flow_table_entry_in *in;
6045 	struct mcx_cmd_delete_flow_table_entry_mb_in *mbin;
6046 	struct mcx_cmd_delete_flow_table_entry_out *out;
6047 	struct mcx_flow_group *mfg;
6048 	int error;
6049 	int token;
6050 
6051 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6052 	token = mcx_cmdq_token(sc);
6053 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out),
6054 	    token);
6055 
6056 	in = mcx_cmdq_in(cqe);
6057 	in->cmd_opcode = htobe16(MCX_CMD_DELETE_FLOW_TABLE_ENTRY);
6058 	in->cmd_op_mod = htobe16(0);
6059 
6060 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6061 	    &cqe->cq_input_ptr, token) != 0) {
6062 		printf("%s: unable to allocate "
6063 		    "delete flow table entry mailbox\n", DEVNAME(sc));
6064 		return (-1);
6065 	}
6066 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6067 	mbin->cmd_table_type = MCX_FLOW_TABLE_TYPE_RX;
6068 
6069 	mfg = &sc->sc_flow_group[group];
6070 	mbin->cmd_table_id = htobe32(mfg->g_table);
6071 	mbin->cmd_flow_index = htobe32(mfg->g_start + index);
6072 
6073 	mcx_cmdq_mboxes_sign(&mxm, 2);
6074 	mcx_cmdq_post(sc, cqe, 0);
6075 	error = mcx_cmdq_poll(sc, cqe, 1000);
6076 	if (error != 0) {
6077 		printf("%s: delete flow table entry timeout\n", DEVNAME(sc));
6078 		goto free;
6079 	}
6080 	if (mcx_cmdq_verify(cqe) != 0) {
6081 		printf("%s: delete flow table entry command corrupt\n",
6082 		    DEVNAME(sc));
6083 		goto free;
6084 	}
6085 
6086 	out = mcx_cmdq_out(cqe);
6087 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6088 		printf("%s: delete flow table entry %d:%d failed (%x, %x)\n",
6089 		    DEVNAME(sc), group, index, out->cmd_status,
6090 		    betoh32(out->cmd_syndrome));
6091 		error = -1;
6092 		goto free;
6093 	}
6094 
6095 free:
6096 	mcx_dmamem_free(sc, &mxm);
6097 	return (error);
6098 }
6099 
6100 #if 0
6101 int
6102 mcx_dump_flow_table(struct mcx_softc *sc, int flow_table_id)
6103 {
6104 	struct mcx_dmamem mxm;
6105 	struct mcx_cmdq_entry *cqe;
6106 	struct mcx_cmd_query_flow_table_in *in;
6107 	struct mcx_cmd_query_flow_table_mb_in *mbin;
6108 	struct mcx_cmd_query_flow_table_out *out;
6109 	struct mcx_cmd_query_flow_table_mb_out *mbout;
6110 	uint8_t token = mcx_cmdq_token(sc);
6111 	int error;
6112 	int i;
6113 	uint8_t *dump;
6114 
6115 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6116 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6117 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6118 
6119 	in = mcx_cmdq_in(cqe);
6120 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE);
6121 	in->cmd_op_mod = htobe16(0);
6122 
6123 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6124 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE);
6125 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6126 	    &cqe->cq_output_ptr, token) != 0) {
6127 		printf(", unable to allocate query flow table mailboxes\n");
6128 		return (-1);
6129 	}
6130 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6131 
6132 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6133 	mbin->cmd_table_type = 0;
6134 	mbin->cmd_table_id = htobe32(flow_table_id);
6135 
6136 	mcx_cmdq_mboxes_sign(&mxm, 1);
6137 
6138 	mcx_cmdq_post(sc, cqe, 0);
6139 	error = mcx_cmdq_poll(sc, cqe, 1000);
6140 	if (error != 0) {
6141 		printf("%s: query flow table timeout\n", DEVNAME(sc));
6142 		goto free;
6143 	}
6144 	error = mcx_cmdq_verify(cqe);
6145 	if (error != 0) {
6146 		printf("%s: query flow table reply corrupt\n", DEVNAME(sc));
6147 		goto free;
6148 	}
6149 
6150 	out = mcx_cmdq_out(cqe);
6151 	switch (out->cmd_status) {
6152 	case MCX_CQ_STATUS_OK:
6153 		break;
6154 	default:
6155 		printf("%s: query flow table failed (%x/%x)\n", DEVNAME(sc),
6156 		    out->cmd_status, betoh32(out->cmd_syndrome));
6157 		error = -1;
6158 		goto free;
6159 	}
6160 
6161         mbout = (struct mcx_cmd_query_flow_table_mb_out *)
6162 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6163 	dump = (uint8_t *)mbout + 8;
6164 	for (i = 0; i < sizeof(struct mcx_flow_table_ctx); i++) {
6165 		printf("%.2x ", dump[i]);
6166 		if (i % 16 == 15)
6167 			printf("\n");
6168 	}
6169 free:
6170 	mcx_cq_mboxes_free(sc, &mxm);
6171 	return (error);
6172 }
6173 int
6174 mcx_dump_flow_table_entry(struct mcx_softc *sc, int flow_table_id, int index)
6175 {
6176 	struct mcx_dmamem mxm;
6177 	struct mcx_cmdq_entry *cqe;
6178 	struct mcx_cmd_query_flow_table_entry_in *in;
6179 	struct mcx_cmd_query_flow_table_entry_mb_in *mbin;
6180 	struct mcx_cmd_query_flow_table_entry_out *out;
6181 	struct mcx_cmd_query_flow_table_entry_mb_out *mbout;
6182 	uint8_t token = mcx_cmdq_token(sc);
6183 	int error;
6184 	int i;
6185 	uint8_t *dump;
6186 
6187 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6188 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6189 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6190 
6191 	in = mcx_cmdq_in(cqe);
6192 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_TABLE_ENTRY);
6193 	in->cmd_op_mod = htobe16(0);
6194 
6195 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6196 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6197 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6198 	    &cqe->cq_output_ptr, token) != 0) {
6199 		printf(", unable to allocate "
6200 		    "query flow table entry mailboxes\n");
6201 		return (-1);
6202 	}
6203 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6204 
6205 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6206 	mbin->cmd_table_type = 0;
6207 	mbin->cmd_table_id = htobe32(flow_table_id);
6208 	mbin->cmd_flow_index = htobe32(index);
6209 
6210 	mcx_cmdq_mboxes_sign(&mxm, 1);
6211 
6212 	mcx_cmdq_post(sc, cqe, 0);
6213 	error = mcx_cmdq_poll(sc, cqe, 1000);
6214 	if (error != 0) {
6215 		printf("%s: query flow table entry timeout\n", DEVNAME(sc));
6216 		goto free;
6217 	}
6218 	error = mcx_cmdq_verify(cqe);
6219 	if (error != 0) {
6220 		printf("%s: query flow table entry reply corrupt\n",
6221 		    DEVNAME(sc));
6222 		goto free;
6223 	}
6224 
6225 	out = mcx_cmdq_out(cqe);
6226 	switch (out->cmd_status) {
6227 	case MCX_CQ_STATUS_OK:
6228 		break;
6229 	default:
6230 		printf("%s: query flow table entry failed (%x/%x)\n",
6231 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6232 		error = -1;
6233 		goto free;
6234 	}
6235 
6236         mbout = (struct mcx_cmd_query_flow_table_entry_mb_out *)
6237 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6238 	dump = (uint8_t *)mbout;
6239 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6240 		printf("%.2x ", dump[i]);
6241 		if (i % 16 == 15)
6242 			printf("\n");
6243 	}
6244 
6245 free:
6246 	mcx_cq_mboxes_free(sc, &mxm);
6247 	return (error);
6248 }
6249 
6250 int
6251 mcx_dump_flow_group(struct mcx_softc *sc, int flow_table_id)
6252 {
6253 	struct mcx_dmamem mxm;
6254 	struct mcx_cmdq_entry *cqe;
6255 	struct mcx_cmd_query_flow_group_in *in;
6256 	struct mcx_cmd_query_flow_group_mb_in *mbin;
6257 	struct mcx_cmd_query_flow_group_out *out;
6258 	struct mcx_cmd_query_flow_group_mb_out *mbout;
6259 	uint8_t token = mcx_cmdq_token(sc);
6260 	int error;
6261 	int i;
6262 	uint8_t *dump;
6263 
6264 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6265 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6266 	    sizeof(*out) + sizeof(*mbout) + 16, token);
6267 
6268 	in = mcx_cmdq_in(cqe);
6269 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_GROUP);
6270 	in->cmd_op_mod = htobe16(0);
6271 
6272 	CTASSERT(sizeof(*mbin) <= MCX_CMDQ_MAILBOX_DATASIZE);
6273 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6274 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6275 	    &cqe->cq_output_ptr, token) != 0) {
6276 		printf(", unable to allocate query flow group mailboxes\n");
6277 		return (-1);
6278 	}
6279 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6280 
6281 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6282 	mbin->cmd_table_type = 0;
6283 	mbin->cmd_table_id = htobe32(flow_table_id);
6284 	mbin->cmd_group_id = htobe32(sc->sc_flow_group_id);
6285 
6286 	mcx_cmdq_mboxes_sign(&mxm, 1);
6287 
6288 	mcx_cmdq_post(sc, cqe, 0);
6289 	error = mcx_cmdq_poll(sc, cqe, 1000);
6290 	if (error != 0) {
6291 		printf("%s: query flow group timeout\n", DEVNAME(sc));
6292 		goto free;
6293 	}
6294 	error = mcx_cmdq_verify(cqe);
6295 	if (error != 0) {
6296 		printf("%s: query flow group reply corrupt\n", DEVNAME(sc));
6297 		goto free;
6298 	}
6299 
6300 	out = mcx_cmdq_out(cqe);
6301 	switch (out->cmd_status) {
6302 	case MCX_CQ_STATUS_OK:
6303 		break;
6304 	default:
6305 		printf("%s: query flow group failed (%x/%x)\n", DEVNAME(sc),
6306 		    out->cmd_status, betoh32(out->cmd_syndrome));
6307 		error = -1;
6308 		goto free;
6309 	}
6310 
6311         mbout = (struct mcx_cmd_query_flow_group_mb_out *)
6312 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6313 	dump = (uint8_t *)mbout;
6314 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6315 		printf("%.2x ", dump[i]);
6316 		if (i % 16 == 15)
6317 			printf("\n");
6318 	}
6319 	dump = (uint8_t *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 1)));
6320 	for (i = 0; i < MCX_CMDQ_MAILBOX_DATASIZE; i++) {
6321 		printf("%.2x ", dump[i]);
6322 		if (i % 16 == 15)
6323 			printf("\n");
6324 	}
6325 
6326 free:
6327 	mcx_cq_mboxes_free(sc, &mxm);
6328 	return (error);
6329 }
6330 
6331 static int
6332 mcx_dump_counters(struct mcx_softc *sc)
6333 {
6334 	struct mcx_dmamem mxm;
6335 	struct mcx_cmdq_entry *cqe;
6336 	struct mcx_cmd_query_vport_counters_in *in;
6337 	struct mcx_cmd_query_vport_counters_mb_in *mbin;
6338 	struct mcx_cmd_query_vport_counters_out *out;
6339 	struct mcx_nic_vport_counters *counters;
6340 	int error, token;
6341 
6342 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6343 	token = mcx_cmdq_token(sc);
6344 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin),
6345 	    sizeof(*out) + sizeof(*counters), token);
6346 
6347 	in = mcx_cmdq_in(cqe);
6348 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_VPORT_COUNTERS);
6349 	in->cmd_op_mod = htobe16(0);
6350 
6351 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6352 	    &cqe->cq_output_ptr, token) != 0) {
6353 		printf(", unable to allocate "
6354 		    "query nic vport counters mailboxen\n");
6355 		return (-1);
6356 	}
6357 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6358 
6359 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6360 	mbin->cmd_clear = 0x80;
6361 
6362 	mcx_cmdq_mboxes_sign(&mxm, 1);
6363 	mcx_cmdq_post(sc, cqe, 0);
6364 
6365 	error = mcx_cmdq_poll(sc, cqe, 1000);
6366 	if (error != 0) {
6367 		printf("%s: query nic vport counters timeout\n", DEVNAME(sc));
6368 		goto free;
6369 	}
6370 	if (mcx_cmdq_verify(cqe) != 0) {
6371 		printf("%s: query nic vport counters command corrupt\n",
6372 		    DEVNAME(sc));
6373 		goto free;
6374 	}
6375 
6376 	out = mcx_cmdq_out(cqe);
6377 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6378 		printf("%s: query nic vport counters failed (%x, %x)\n",
6379 		    DEVNAME(sc), out->cmd_status, betoh32(out->cmd_syndrome));
6380 		error = -1;
6381 		goto free;
6382 	}
6383 
6384 	counters = (struct mcx_nic_vport_counters *)
6385 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6386 	if (counters->rx_bcast.packets + counters->tx_bcast.packets +
6387 	    counters->rx_ucast.packets + counters->tx_ucast.packets +
6388 	    counters->rx_err.packets + counters->tx_err.packets)
6389 		printf("%s: err %llx/%llx uc %llx/%llx bc %llx/%llx\n",
6390 		    DEVNAME(sc),
6391 		    betoh64(counters->tx_err.packets),
6392 		    betoh64(counters->rx_err.packets),
6393 		    betoh64(counters->tx_ucast.packets),
6394 		    betoh64(counters->rx_ucast.packets),
6395 		    betoh64(counters->tx_bcast.packets),
6396 		    betoh64(counters->rx_bcast.packets));
6397 free:
6398 	mcx_dmamem_free(sc, &mxm);
6399 
6400 	return (error);
6401 }
6402 
6403 static int
6404 mcx_dump_flow_counter(struct mcx_softc *sc, int index, const char *what)
6405 {
6406 	struct mcx_dmamem mxm;
6407 	struct mcx_cmdq_entry *cqe;
6408 	struct mcx_cmd_query_flow_counter_in *in;
6409 	struct mcx_cmd_query_flow_counter_mb_in *mbin;
6410 	struct mcx_cmd_query_flow_counter_out *out;
6411 	struct mcx_counter *counters;
6412 	int error, token;
6413 
6414 	cqe = MCX_DMA_KVA(&sc->sc_cmdq_mem);
6415 	token = mcx_cmdq_token(sc);
6416 	mcx_cmdq_init(sc, cqe, sizeof(*in) + sizeof(*mbin), sizeof(*out) +
6417 	    sizeof(*counters), token);
6418 
6419 	in = mcx_cmdq_in(cqe);
6420 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_FLOW_COUNTER);
6421 	in->cmd_op_mod = htobe16(0);
6422 
6423 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 1,
6424 	    &cqe->cq_output_ptr, token) != 0) {
6425 		printf(", unable to allocate query flow counter mailboxen\n");
6426 		return (-1);
6427 	}
6428 	cqe->cq_input_ptr = cqe->cq_output_ptr;
6429 	mbin = mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0));
6430 	mbin->cmd_flow_counter_id = htobe16(sc->sc_flow_counter_id[index]);
6431 	mbin->cmd_clear = 0x80;
6432 
6433 	mcx_cmdq_mboxes_sign(&mxm, 1);
6434 	mcx_cmdq_post(sc, cqe, 0);
6435 
6436 	error = mcx_cmdq_poll(sc, cqe, 1000);
6437 	if (error != 0) {
6438 		printf("%s: query flow counter timeout\n", DEVNAME(sc));
6439 		goto free;
6440 	}
6441 	if (mcx_cmdq_verify(cqe) != 0) {
6442 		printf("%s: query flow counter command corrupt\n", DEVNAME(sc));
6443 		goto free;
6444 	}
6445 
6446 	out = mcx_cmdq_out(cqe);
6447 	if (out->cmd_status != MCX_CQ_STATUS_OK) {
6448 		printf("%s: query flow counter failed (%x, %x)\n", DEVNAME(sc),
6449 		    out->cmd_status, betoh32(out->cmd_syndrome));
6450 		error = -1;
6451 		goto free;
6452 	}
6453 
6454 	counters = (struct mcx_counter *)
6455 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6456 	if (counters->packets)
6457 		printf("%s: %s inflow %llx\n", DEVNAME(sc), what,
6458 		    betoh64(counters->packets));
6459 free:
6460 	mcx_dmamem_free(sc, &mxm);
6461 
6462 	return (error);
6463 }
6464 
6465 #endif
6466 
6467 #if NKSTAT > 0
6468 
6469 int
mcx_query_rq(struct mcx_softc * sc,struct mcx_rx * rx,struct mcx_rq_ctx * rq_ctx)6470 mcx_query_rq(struct mcx_softc *sc, struct mcx_rx *rx, struct mcx_rq_ctx *rq_ctx)
6471 {
6472 	struct mcx_dmamem mxm;
6473 	struct mcx_cmdq_entry *cqe;
6474 	struct mcx_cmd_query_rq_in *in;
6475 	struct mcx_cmd_query_rq_out *out;
6476 	struct mcx_cmd_query_rq_mb_out *mbout;
6477 	uint8_t token = mcx_cmdq_token(sc);
6478 	int error;
6479 
6480 	cqe = mcx_get_cmdq_entry(sc, MCX_CMDQ_SLOT_KSTAT);
6481 	if (cqe == NULL)
6482 		return (-1);
6483 
6484 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6485 	    token);
6486 
6487 	in = mcx_cmdq_in(cqe);
6488 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_RQ);
6489 	in->cmd_op_mod = htobe16(0);
6490 	in->cmd_rqn = htobe32(rx->rx_rqn);
6491 
6492 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6493 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6494 	    &cqe->cq_output_ptr, token) != 0) {
6495 		printf("%s: unable to allocate query rq mailboxes\n", DEVNAME(sc));
6496 		return (-1);
6497 	}
6498 
6499 	mcx_cmdq_mboxes_sign(&mxm, 1);
6500 
6501 	error = mcx_cmdq_exec(sc, cqe, MCX_CMDQ_SLOT_KSTAT, 1000);
6502 	if (error != 0) {
6503 		printf("%s: query rq timeout\n", DEVNAME(sc));
6504 		goto free;
6505 	}
6506 	error = mcx_cmdq_verify(cqe);
6507 	if (error != 0) {
6508 		printf("%s: query rq reply corrupt\n", DEVNAME(sc));
6509 		goto free;
6510 	}
6511 
6512 	out = mcx_cmdq_out(cqe);
6513 	switch (out->cmd_status) {
6514 	case MCX_CQ_STATUS_OK:
6515 		break;
6516 	default:
6517 		printf("%s: query rq failed (%x/%x)\n", DEVNAME(sc),
6518 		    out->cmd_status, betoh32(out->cmd_syndrome));
6519 		error = -1;
6520 		goto free;
6521 	}
6522 
6523         mbout = (struct mcx_cmd_query_rq_mb_out *)
6524 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6525 	memcpy(rq_ctx, &mbout->cmd_ctx, sizeof(*rq_ctx));
6526 
6527 free:
6528 	mcx_cq_mboxes_free(sc, &mxm);
6529 	return (error);
6530 }
6531 
6532 int
mcx_query_sq(struct mcx_softc * sc,struct mcx_tx * tx,struct mcx_sq_ctx * sq_ctx)6533 mcx_query_sq(struct mcx_softc *sc, struct mcx_tx *tx, struct mcx_sq_ctx *sq_ctx)
6534 {
6535 	struct mcx_dmamem mxm;
6536 	struct mcx_cmdq_entry *cqe;
6537 	struct mcx_cmd_query_sq_in *in;
6538 	struct mcx_cmd_query_sq_out *out;
6539 	struct mcx_cmd_query_sq_mb_out *mbout;
6540 	uint8_t token = mcx_cmdq_token(sc);
6541 	int error;
6542 
6543 	cqe = mcx_get_cmdq_entry(sc, MCX_CMDQ_SLOT_KSTAT);
6544 	if (cqe == NULL)
6545 		return (-1);
6546 
6547 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*mbout) + 16,
6548 	    token);
6549 
6550 	in = mcx_cmdq_in(cqe);
6551 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_SQ);
6552 	in->cmd_op_mod = htobe16(0);
6553 	in->cmd_sqn = htobe32(tx->tx_sqn);
6554 
6555 	CTASSERT(sizeof(*mbout) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6556 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6557 	    &cqe->cq_output_ptr, token) != 0) {
6558 		printf("%s: unable to allocate query sq mailboxes\n", DEVNAME(sc));
6559 		return (-1);
6560 	}
6561 
6562 	mcx_cmdq_mboxes_sign(&mxm, 1);
6563 
6564 	error = mcx_cmdq_exec(sc, cqe, MCX_CMDQ_SLOT_KSTAT, 1000);
6565 	if (error != 0) {
6566 		printf("%s: query sq timeout\n", DEVNAME(sc));
6567 		goto free;
6568 	}
6569 	error = mcx_cmdq_verify(cqe);
6570 	if (error != 0) {
6571 		printf("%s: query sq reply corrupt\n", DEVNAME(sc));
6572 		goto free;
6573 	}
6574 
6575 	out = mcx_cmdq_out(cqe);
6576 	switch (out->cmd_status) {
6577 	case MCX_CQ_STATUS_OK:
6578 		break;
6579 	default:
6580 		printf("%s: query sq failed (%x/%x)\n", DEVNAME(sc),
6581 		    out->cmd_status, betoh32(out->cmd_syndrome));
6582 		error = -1;
6583 		goto free;
6584 	}
6585 
6586         mbout = (struct mcx_cmd_query_sq_mb_out *)
6587 	    (mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6588 	memcpy(sq_ctx, &mbout->cmd_ctx, sizeof(*sq_ctx));
6589 
6590 free:
6591 	mcx_cq_mboxes_free(sc, &mxm);
6592 	return (error);
6593 }
6594 
6595 int
mcx_query_cq(struct mcx_softc * sc,struct mcx_cq * cq,struct mcx_cq_ctx * cq_ctx)6596 mcx_query_cq(struct mcx_softc *sc, struct mcx_cq *cq, struct mcx_cq_ctx *cq_ctx)
6597 {
6598 	struct mcx_dmamem mxm;
6599 	struct mcx_cmdq_entry *cqe;
6600 	struct mcx_cmd_query_cq_in *in;
6601 	struct mcx_cmd_query_cq_out *out;
6602 	struct mcx_cq_ctx *ctx;
6603 	uint8_t token = mcx_cmdq_token(sc);
6604 	int error;
6605 
6606 	cqe = mcx_get_cmdq_entry(sc, MCX_CMDQ_SLOT_KSTAT);
6607 	if (cqe == NULL)
6608 		return (-1);
6609 
6610 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6611 	    token);
6612 
6613 	in = mcx_cmdq_in(cqe);
6614 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_CQ);
6615 	in->cmd_op_mod = htobe16(0);
6616 	in->cmd_cqn = htobe32(cq->cq_n);
6617 
6618 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6619 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6620 	    &cqe->cq_output_ptr, token) != 0) {
6621 		printf("%s: unable to allocate query cq mailboxes\n", DEVNAME(sc));
6622 		return (-1);
6623 	}
6624 
6625 	mcx_cmdq_mboxes_sign(&mxm, 1);
6626 
6627 	error = mcx_cmdq_exec(sc, cqe, MCX_CMDQ_SLOT_KSTAT, 1000);
6628 	if (error != 0) {
6629 		printf("%s: query cq timeout\n", DEVNAME(sc));
6630 		goto free;
6631 	}
6632 	error = mcx_cmdq_verify(cqe);
6633 	if (error != 0) {
6634 		printf("%s: query cq reply corrupt\n", DEVNAME(sc));
6635 		goto free;
6636 	}
6637 
6638 	out = mcx_cmdq_out(cqe);
6639 	switch (out->cmd_status) {
6640 	case MCX_CQ_STATUS_OK:
6641 		break;
6642 	default:
6643 		printf("%s: query cq failed (%x/%x)\n", DEVNAME(sc),
6644 		    out->cmd_status, betoh32(out->cmd_syndrome));
6645 		error = -1;
6646 		goto free;
6647 	}
6648 
6649         ctx = (struct mcx_cq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6650 	memcpy(cq_ctx, ctx, sizeof(*cq_ctx));
6651 free:
6652 	mcx_cq_mboxes_free(sc, &mxm);
6653 	return (error);
6654 }
6655 
6656 int
mcx_query_eq(struct mcx_softc * sc,struct mcx_eq * eq,struct mcx_eq_ctx * eq_ctx)6657 mcx_query_eq(struct mcx_softc *sc, struct mcx_eq *eq, struct mcx_eq_ctx *eq_ctx)
6658 {
6659 	struct mcx_dmamem mxm;
6660 	struct mcx_cmdq_entry *cqe;
6661 	struct mcx_cmd_query_eq_in *in;
6662 	struct mcx_cmd_query_eq_out *out;
6663 	struct mcx_eq_ctx *ctx;
6664 	uint8_t token = mcx_cmdq_token(sc);
6665 	int error;
6666 
6667 	cqe = mcx_get_cmdq_entry(sc, MCX_CMDQ_SLOT_KSTAT);
6668 	if (cqe == NULL)
6669 		return (-1);
6670 
6671 	mcx_cmdq_init(sc, cqe, sizeof(*in), sizeof(*out) + sizeof(*ctx) + 16,
6672 	    token);
6673 
6674 	in = mcx_cmdq_in(cqe);
6675 	in->cmd_opcode = htobe16(MCX_CMD_QUERY_EQ);
6676 	in->cmd_op_mod = htobe16(0);
6677 	in->cmd_eqn = htobe32(eq->eq_n);
6678 
6679 	CTASSERT(sizeof(*ctx) <= MCX_CMDQ_MAILBOX_DATASIZE*2);
6680 	if (mcx_cmdq_mboxes_alloc(sc, &mxm, 2,
6681 	    &cqe->cq_output_ptr, token) != 0) {
6682 		printf("%s: unable to allocate query eq mailboxes\n", DEVNAME(sc));
6683 		return (-1);
6684 	}
6685 
6686 	mcx_cmdq_mboxes_sign(&mxm, 1);
6687 
6688 	error = mcx_cmdq_exec(sc, cqe, MCX_CMDQ_SLOT_KSTAT, 1000);
6689 	if (error != 0) {
6690 		printf("%s: query eq timeout\n", DEVNAME(sc));
6691 		goto free;
6692 	}
6693 	error = mcx_cmdq_verify(cqe);
6694 	if (error != 0) {
6695 		printf("%s: query eq reply corrupt\n", DEVNAME(sc));
6696 		goto free;
6697 	}
6698 
6699 	out = mcx_cmdq_out(cqe);
6700 	switch (out->cmd_status) {
6701 	case MCX_CQ_STATUS_OK:
6702 		break;
6703 	default:
6704 		printf("%s: query eq failed (%x/%x)\n", DEVNAME(sc),
6705 		    out->cmd_status, betoh32(out->cmd_syndrome));
6706 		error = -1;
6707 		goto free;
6708 	}
6709 
6710         ctx = (struct mcx_eq_ctx *)(mcx_cq_mbox_data(mcx_cq_mbox(&mxm, 0)));
6711 	memcpy(eq_ctx, ctx, sizeof(*eq_ctx));
6712 free:
6713 	mcx_cq_mboxes_free(sc, &mxm);
6714 	return (error);
6715 }
6716 
6717 #endif /* NKSTAT > 0 */
6718 
6719 static inline unsigned int
mcx_rx_fill_slots(struct mcx_softc * sc,struct mcx_rx * rx,uint nslots)6720 mcx_rx_fill_slots(struct mcx_softc *sc, struct mcx_rx *rx, uint nslots)
6721 {
6722 	struct mcx_rq_entry *ring, *rqe;
6723 	struct mcx_slot *ms;
6724 	struct mbuf *m;
6725 	uint slot, p, fills;
6726 
6727 	ring = MCX_DMA_KVA(&rx->rx_rq_mem);
6728 	p = rx->rx_prod;
6729 
6730 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6731 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_POSTWRITE);
6732 
6733 	for (fills = 0; fills < nslots; fills++) {
6734 		slot = p % (1 << MCX_LOG_RQ_SIZE);
6735 
6736 		ms = &rx->rx_slots[slot];
6737 		rqe = &ring[slot];
6738 
6739 		m = MCLGETL(NULL, M_DONTWAIT, sc->sc_rxbufsz);
6740 		if (m == NULL)
6741 			break;
6742 
6743 		m->m_data += (m->m_ext.ext_size - sc->sc_rxbufsz);
6744 		m->m_data += ETHER_ALIGN;
6745 		m->m_len = m->m_pkthdr.len = sc->sc_hardmtu;
6746 
6747 		if (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
6748 		    BUS_DMA_NOWAIT) != 0) {
6749 			m_freem(m);
6750 			break;
6751 		}
6752 		ms->ms_m = m;
6753 
6754 		htobem32(&rqe->rqe_byte_count, ms->ms_map->dm_segs[0].ds_len);
6755 		htobem64(&rqe->rqe_addr, ms->ms_map->dm_segs[0].ds_addr);
6756 		htobem32(&rqe->rqe_lkey, sc->sc_lkey);
6757 
6758 		p++;
6759 	}
6760 
6761 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&rx->rx_rq_mem),
6762 	    0, MCX_DMA_LEN(&rx->rx_rq_mem), BUS_DMASYNC_PREWRITE);
6763 
6764 	rx->rx_prod = p;
6765 
6766 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6767 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
6768 	htobem32(MCX_DMA_OFF(&sc->sc_doorbell_mem, rx->rx_doorbell),
6769 	    p & MCX_WQ_DOORBELL_MASK);
6770 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6771 	    rx->rx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
6772 
6773 	return (nslots - fills);
6774 }
6775 
6776 int
mcx_rx_fill(struct mcx_softc * sc,struct mcx_rx * rx)6777 mcx_rx_fill(struct mcx_softc *sc, struct mcx_rx *rx)
6778 {
6779 	u_int slots;
6780 
6781 	slots = if_rxr_get(&rx->rx_rxr, (1 << MCX_LOG_RQ_SIZE));
6782 	if (slots == 0)
6783 		return (1);
6784 
6785 	slots = mcx_rx_fill_slots(sc, rx, slots);
6786 	if_rxr_put(&rx->rx_rxr, slots);
6787 	return (0);
6788 }
6789 
6790 void
mcx_refill(void * xrx)6791 mcx_refill(void *xrx)
6792 {
6793 	struct mcx_rx *rx = xrx;
6794 	struct mcx_softc *sc = rx->rx_softc;
6795 
6796 	mcx_rx_fill(sc, rx);
6797 
6798 	if (if_rxr_inuse(&rx->rx_rxr) == 0)
6799 		timeout_add(&rx->rx_refill, 1);
6800 }
6801 
6802 static int
mcx_process_txeof(struct mcx_softc * sc,struct mcx_tx * tx,struct mcx_cq_entry * cqe)6803 mcx_process_txeof(struct mcx_softc *sc, struct mcx_tx *tx,
6804     struct mcx_cq_entry *cqe)
6805 {
6806 	struct mcx_slot *ms;
6807 	bus_dmamap_t map;
6808 	int slot, slots;
6809 
6810 	slot = betoh16(cqe->cq_wqe_count) % (1 << MCX_LOG_SQ_SIZE);
6811 
6812 	ms = &tx->tx_slots[slot];
6813 	map = ms->ms_map;
6814 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
6815 	    BUS_DMASYNC_POSTWRITE);
6816 
6817 	slots = 1;
6818 	if (map->dm_nsegs > 1)
6819 		slots += (map->dm_nsegs+2) / MCX_SQ_SEGS_PER_SLOT;
6820 
6821 	bus_dmamap_unload(sc->sc_dmat, map);
6822 	m_freem(ms->ms_m);
6823 	ms->ms_m = NULL;
6824 
6825 	return (slots);
6826 }
6827 
6828 static void
mcx_calibrate_first(struct mcx_softc * sc)6829 mcx_calibrate_first(struct mcx_softc *sc)
6830 {
6831 	struct mcx_calibration *c = &sc->sc_calibration[0];
6832 	int s;
6833 
6834 	sc->sc_calibration_gen = 0;
6835 
6836 	s = splhigh(); /* crit_enter? */
6837 	c->c_ubase = nsecuptime();
6838 	c->c_tbase = mcx_timer(sc);
6839 	splx(s);
6840 	c->c_ratio = 0;
6841 
6842 #ifdef notyet
6843 	timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_FIRST);
6844 #endif
6845 }
6846 
6847 #define MCX_TIMESTAMP_SHIFT 24
6848 
6849 static void
mcx_calibrate(void * arg)6850 mcx_calibrate(void *arg)
6851 {
6852 	struct mcx_softc *sc = arg;
6853 	struct mcx_calibration *nc, *pc;
6854 	uint64_t udiff, tdiff;
6855 	unsigned int gen;
6856 	int s;
6857 
6858 	if (!ISSET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING))
6859 		return;
6860 
6861 	timeout_add_sec(&sc->sc_calibrate, MCX_CALIBRATE_NORMAL);
6862 
6863 	gen = sc->sc_calibration_gen;
6864 	pc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
6865 	gen++;
6866 	nc = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
6867 
6868 	nc->c_uptime = pc->c_ubase;
6869 	nc->c_timestamp = pc->c_tbase;
6870 
6871 	s = splhigh(); /* crit_enter? */
6872 	nc->c_ubase = nsecuptime();
6873 	nc->c_tbase = mcx_timer(sc);
6874 	splx(s);
6875 
6876 	udiff = nc->c_ubase - nc->c_uptime;
6877 	tdiff = nc->c_tbase - nc->c_timestamp;
6878 
6879 	/*
6880 	 * udiff is the wall clock time between calibration ticks,
6881 	 * which should be 32 seconds or 32 billion nanoseconds. if
6882 	 * we squint, 1 billion nanoseconds is kind of like a 32 bit
6883 	 * number, so 32 billion should still have a lot of high bits
6884 	 * spare. we use this space by shifting the nanoseconds up
6885 	 * 24 bits so we have a nice big number to divide by the
6886 	 * number of mcx timer ticks.
6887 	 */
6888 	nc->c_ratio = (udiff << MCX_TIMESTAMP_SHIFT) / tdiff;
6889 
6890 	membar_producer();
6891 	sc->sc_calibration_gen = gen;
6892 }
6893 
6894 static int
mcx_process_rx(struct mcx_softc * sc,struct mcx_rx * rx,struct mcx_cq_entry * cqe,struct mbuf_list * ml,const struct mcx_calibration * c)6895 mcx_process_rx(struct mcx_softc *sc, struct mcx_rx *rx,
6896     struct mcx_cq_entry *cqe, struct mbuf_list *ml,
6897     const struct mcx_calibration *c)
6898 {
6899 	struct mcx_slot *ms;
6900 	struct mbuf *m;
6901 	uint32_t flags, len;
6902 	int slot;
6903 
6904 	len = bemtoh32(&cqe->cq_byte_cnt);
6905 	slot = betoh16(cqe->cq_wqe_count) % (1 << MCX_LOG_RQ_SIZE);
6906 
6907 	ms = &rx->rx_slots[slot];
6908 	bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, len, BUS_DMASYNC_POSTREAD);
6909 	bus_dmamap_unload(sc->sc_dmat, ms->ms_map);
6910 
6911 	m = ms->ms_m;
6912 	ms->ms_m = NULL;
6913 
6914 	m->m_pkthdr.len = m->m_len = len;
6915 
6916 	if (cqe->cq_rx_hash_type) {
6917 		m->m_pkthdr.ph_flowid = betoh32(cqe->cq_rx_hash);
6918 		m->m_pkthdr.csum_flags |= M_FLOWID;
6919 	}
6920 
6921 	flags = bemtoh32(&cqe->cq_flags);
6922 	if (flags & MCX_CQ_ENTRY_FLAGS_L3_OK)
6923 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
6924 	if (flags & MCX_CQ_ENTRY_FLAGS_L4_OK)
6925 		m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
6926 		    M_UDP_CSUM_IN_OK;
6927 #if NVLAN > 0
6928 	if (flags & MCX_CQ_ENTRY_FLAGS_CV) {
6929 		m->m_pkthdr.ether_vtag = (flags &
6930 		    MCX_CQ_ENTRY_FLAGS_VLAN_MASK);
6931 		m->m_flags |= M_VLANTAG;
6932 	}
6933 #endif
6934 
6935 #ifdef notyet
6936 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_LINK0) && c->c_ratio) {
6937 		uint64_t t = bemtoh64(&cqe->cq_timestamp);
6938 		t -= c->c_timestamp;
6939 		t *= c->c_ratio;
6940 		t >>= MCX_TIMESTAMP_SHIFT;
6941 		t += c->c_uptime;
6942 
6943 		m->m_pkthdr.ph_timestamp = t;
6944 		SET(m->m_pkthdr.csum_flags, M_TIMESTAMP);
6945 	}
6946 #endif
6947 
6948 	ml_enqueue(ml, m);
6949 
6950 	return (1);
6951 }
6952 
6953 static struct mcx_cq_entry *
mcx_next_cq_entry(struct mcx_softc * sc,struct mcx_cq * cq)6954 mcx_next_cq_entry(struct mcx_softc *sc, struct mcx_cq *cq)
6955 {
6956 	struct mcx_cq_entry *cqe;
6957 	int next;
6958 
6959 	cqe = (struct mcx_cq_entry *)MCX_DMA_KVA(&cq->cq_mem);
6960 	next = cq->cq_cons % (1 << MCX_LOG_CQ_SIZE);
6961 
6962 	if ((cqe[next].cq_opcode_owner & MCX_CQ_ENTRY_FLAG_OWNER) ==
6963 	    ((cq->cq_cons >> MCX_LOG_CQ_SIZE) & 1)) {
6964 		return (&cqe[next]);
6965 	}
6966 
6967 	return (NULL);
6968 }
6969 
6970 static void
mcx_arm_cq(struct mcx_softc * sc,struct mcx_cq * cq,int uar)6971 mcx_arm_cq(struct mcx_softc *sc, struct mcx_cq *cq, int uar)
6972 {
6973 	struct mcx_cq_doorbell *db;
6974 	bus_size_t offset;
6975 	uint32_t val;
6976 	uint64_t uval;
6977 
6978 	val = ((cq->cq_count) & 3) << MCX_CQ_DOORBELL_ARM_CMD_SN_SHIFT;
6979 	val |= (cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
6980 
6981 	db = MCX_DMA_OFF(&sc->sc_doorbell_mem, cq->cq_doorbell);
6982 
6983 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6984 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_POSTWRITE);
6985 
6986 	htobem32(&db->db_update_ci, cq->cq_cons & MCX_CQ_DOORBELL_ARM_CI_MASK);
6987 	htobem32(&db->db_arm_ci, val);
6988 
6989 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
6990 	    cq->cq_doorbell, sizeof(*db), BUS_DMASYNC_PREWRITE);
6991 
6992 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_CQ_DOORBELL;
6993 
6994 	uval = (uint64_t)val << 32;
6995 	uval |= cq->cq_n;
6996 
6997 	bus_space_write_raw_8(sc->sc_memt, sc->sc_memh, offset, htobe64(uval));
6998 	mcx_bar(sc, offset, sizeof(uval), BUS_SPACE_BARRIER_WRITE);
6999 }
7000 
7001 void
mcx_process_cq(struct mcx_softc * sc,struct mcx_queues * q,struct mcx_cq * cq)7002 mcx_process_cq(struct mcx_softc *sc, struct mcx_queues *q, struct mcx_cq *cq)
7003 {
7004 	struct mcx_rx *rx = &q->q_rx;
7005 	struct mcx_tx *tx = &q->q_tx;
7006 	const struct mcx_calibration *c;
7007 	unsigned int gen;
7008 	struct mcx_cq_entry *cqe;
7009 	uint8_t *cqp;
7010 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
7011 	int rxfree, txfree;
7012 
7013 	gen = sc->sc_calibration_gen;
7014 	membar_consumer();
7015 	c = &sc->sc_calibration[gen % nitems(sc->sc_calibration)];
7016 
7017 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
7018 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_POSTREAD);
7019 
7020 	rxfree = 0;
7021 	txfree = 0;
7022 	while ((cqe = mcx_next_cq_entry(sc, cq))) {
7023 		uint8_t opcode;
7024 		opcode = (cqe->cq_opcode_owner >> MCX_CQ_ENTRY_OPCODE_SHIFT);
7025 		switch (opcode) {
7026 		case MCX_CQ_ENTRY_OPCODE_REQ:
7027 			txfree += mcx_process_txeof(sc, tx, cqe);
7028 			break;
7029 		case MCX_CQ_ENTRY_OPCODE_SEND:
7030 			rxfree += mcx_process_rx(sc, rx, cqe, &ml, c);
7031 			break;
7032 		case MCX_CQ_ENTRY_OPCODE_REQ_ERR:
7033 		case MCX_CQ_ENTRY_OPCODE_SEND_ERR:
7034 			cqp = (uint8_t *)cqe;
7035 			/* printf("%s: cq completion error: %x\n",
7036 			    DEVNAME(sc), cqp[0x37]); */
7037 			break;
7038 
7039 		default:
7040 			/* printf("%s: cq completion opcode %x??\n",
7041 			    DEVNAME(sc), opcode); */
7042 			break;
7043 		}
7044 
7045 		cq->cq_cons++;
7046 	}
7047 
7048 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&cq->cq_mem),
7049 	    0, MCX_DMA_LEN(&cq->cq_mem), BUS_DMASYNC_PREREAD);
7050 
7051 	if (rxfree > 0) {
7052 		if_rxr_put(&rx->rx_rxr, rxfree);
7053 		if (ifiq_input(rx->rx_ifiq, &ml))
7054 			if_rxr_livelocked(&rx->rx_rxr);
7055 
7056 		mcx_rx_fill(sc, rx);
7057 		if (if_rxr_inuse(&rx->rx_rxr) == 0)
7058 			timeout_add(&rx->rx_refill, 1);
7059 	}
7060 
7061 	cq->cq_count++;
7062 	mcx_arm_cq(sc, cq, q->q_uar);
7063 
7064 	if (txfree > 0) {
7065 		tx->tx_cons += txfree;
7066 		if (ifq_is_oactive(tx->tx_ifq))
7067 			ifq_restart(tx->tx_ifq);
7068 	}
7069 }
7070 
7071 
7072 static void
mcx_arm_eq(struct mcx_softc * sc,struct mcx_eq * eq,int uar)7073 mcx_arm_eq(struct mcx_softc *sc, struct mcx_eq *eq, int uar)
7074 {
7075 	bus_size_t offset;
7076 	uint32_t val;
7077 
7078 	offset = (MCX_PAGE_SIZE * uar) + MCX_UAR_EQ_DOORBELL_ARM;
7079 	val = (eq->eq_n << 24) | (eq->eq_cons & 0xffffff);
7080 
7081 	mcx_wr(sc, offset, val);
7082 	mcx_bar(sc, offset, sizeof(val), BUS_SPACE_BARRIER_WRITE);
7083 }
7084 
7085 static struct mcx_eq_entry *
mcx_next_eq_entry(struct mcx_softc * sc,struct mcx_eq * eq)7086 mcx_next_eq_entry(struct mcx_softc *sc, struct mcx_eq *eq)
7087 {
7088 	struct mcx_eq_entry *eqe;
7089 	int next;
7090 
7091 	eqe = (struct mcx_eq_entry *)MCX_DMA_KVA(&eq->eq_mem);
7092 	next = eq->eq_cons % (1 << MCX_LOG_EQ_SIZE);
7093 	if ((eqe[next].eq_owner & 1) ==
7094 	    ((eq->eq_cons >> MCX_LOG_EQ_SIZE) & 1)) {
7095 		eq->eq_cons++;
7096 		return (&eqe[next]);
7097 	}
7098 	return (NULL);
7099 }
7100 
7101 int
mcx_admin_intr(void * xsc)7102 mcx_admin_intr(void *xsc)
7103 {
7104 	struct mcx_softc *sc = (struct mcx_softc *)xsc;
7105 	struct mcx_eq *eq = &sc->sc_admin_eq;
7106 	struct mcx_eq_entry *eqe;
7107 
7108 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7109 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7110 
7111 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7112 		switch (eqe->eq_event_type) {
7113 		case MCX_EVENT_TYPE_LAST_WQE:
7114 			/* printf("%s: last wqe reached?\n", DEVNAME(sc)); */
7115 			break;
7116 
7117 		case MCX_EVENT_TYPE_CQ_ERROR:
7118 			/* printf("%s: cq error\n", DEVNAME(sc)); */
7119 			break;
7120 
7121 		case MCX_EVENT_TYPE_CMD_COMPLETION:
7122 			mtx_enter(&sc->sc_cmdq_mtx);
7123 			wakeup(&sc->sc_cmdq_token);
7124 			mtx_leave(&sc->sc_cmdq_mtx);
7125 			break;
7126 
7127 		case MCX_EVENT_TYPE_PORT_CHANGE:
7128 			task_add(systq, &sc->sc_port_change);
7129 			break;
7130 
7131 		default:
7132 			/* printf("%s: something happened\n", DEVNAME(sc)); */
7133 			break;
7134 		}
7135 	}
7136 
7137 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7138 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7139 
7140 	mcx_arm_eq(sc, eq, sc->sc_uar);
7141 
7142 	return (1);
7143 }
7144 
7145 int
mcx_cq_intr(void * xq)7146 mcx_cq_intr(void *xq)
7147 {
7148 	struct mcx_queues *q = (struct mcx_queues *)xq;
7149 	struct mcx_softc *sc = q->q_sc;
7150 	struct mcx_eq *eq = &q->q_eq;
7151 	struct mcx_eq_entry *eqe;
7152 	int cqn;
7153 
7154 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7155 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_POSTREAD);
7156 
7157 	while ((eqe = mcx_next_eq_entry(sc, eq)) != NULL) {
7158 		switch (eqe->eq_event_type) {
7159 		case MCX_EVENT_TYPE_COMPLETION:
7160 			cqn = betoh32(eqe->eq_event_data[6]);
7161 			if (cqn == q->q_cq.cq_n)
7162 				mcx_process_cq(sc, q, &q->q_cq);
7163 			break;
7164 		}
7165 	}
7166 
7167 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&eq->eq_mem),
7168 	    0, MCX_DMA_LEN(&eq->eq_mem), BUS_DMASYNC_PREREAD);
7169 
7170 	mcx_arm_eq(sc, eq, q->q_uar);
7171 
7172 	return (1);
7173 }
7174 
7175 static void
mcx_free_slots(struct mcx_softc * sc,struct mcx_slot * slots,int allocated,int total)7176 mcx_free_slots(struct mcx_softc *sc, struct mcx_slot *slots, int allocated,
7177     int total)
7178 {
7179 	struct mcx_slot *ms;
7180 
7181 	int i = allocated;
7182 	while (i-- > 0) {
7183 		ms = &slots[i];
7184 		bus_dmamap_destroy(sc->sc_dmat, ms->ms_map);
7185 		if (ms->ms_m != NULL)
7186 			m_freem(ms->ms_m);
7187 	}
7188 	free(slots, M_DEVBUF, total * sizeof(*ms));
7189 }
7190 
7191 static int
mcx_queue_up(struct mcx_softc * sc,struct mcx_queues * q)7192 mcx_queue_up(struct mcx_softc *sc, struct mcx_queues *q)
7193 {
7194 	struct mcx_rx *rx;
7195 	struct mcx_tx *tx;
7196 	struct mcx_slot *ms;
7197 	int i;
7198 
7199 	rx = &q->q_rx;
7200 	rx->rx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_RQ_SIZE),
7201 	    M_DEVBUF, M_WAITOK | M_ZERO);
7202 	if (rx->rx_slots == NULL) {
7203 		printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
7204 		return ENOMEM;
7205 	}
7206 
7207 	for (i = 0; i < (1 << MCX_LOG_RQ_SIZE); i++) {
7208 		ms = &rx->rx_slots[i];
7209 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu, 1,
7210 		    sc->sc_hardmtu, 0,
7211 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
7212 		    &ms->ms_map) != 0) {
7213 			printf("%s: failed to allocate rx dma maps\n",
7214 			    DEVNAME(sc));
7215 			goto destroy_rx_slots;
7216 		}
7217 	}
7218 
7219 	tx = &q->q_tx;
7220 	tx->tx_slots = mallocarray(sizeof(*ms), (1 << MCX_LOG_SQ_SIZE),
7221 	    M_DEVBUF, M_WAITOK | M_ZERO);
7222 	if (tx->tx_slots == NULL) {
7223 		printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
7224 		goto destroy_rx_slots;
7225 	}
7226 
7227 	for (i = 0; i < (1 << MCX_LOG_SQ_SIZE); i++) {
7228 		ms = &tx->tx_slots[i];
7229 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_hardmtu,
7230 		    MCX_SQ_MAX_SEGMENTS, sc->sc_hardmtu, 0,
7231 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
7232 		    &ms->ms_map) != 0) {
7233 			printf("%s: failed to allocate tx dma maps\n",
7234 			    DEVNAME(sc));
7235 			goto destroy_tx_slots;
7236 		}
7237 	}
7238 
7239 	if (mcx_create_cq(sc, &q->q_cq, q->q_uar, q->q_index,
7240 	    q->q_eq.eq_n) != 0)
7241 		goto destroy_tx_slots;
7242 
7243 	if (mcx_create_sq(sc, tx, q->q_uar, q->q_index, q->q_cq.cq_n)
7244 	    != 0)
7245 		goto destroy_cq;
7246 
7247 	if (mcx_create_rq(sc, rx, q->q_index, q->q_cq.cq_n) != 0)
7248 		goto destroy_sq;
7249 
7250 	return 0;
7251 
7252 destroy_sq:
7253 	mcx_destroy_sq(sc, tx);
7254 destroy_cq:
7255 	mcx_destroy_cq(sc, &q->q_cq);
7256 destroy_tx_slots:
7257 	mcx_free_slots(sc, tx->tx_slots, i, (1 << MCX_LOG_SQ_SIZE));
7258 	tx->tx_slots = NULL;
7259 
7260 	i = (1 << MCX_LOG_RQ_SIZE);
7261 destroy_rx_slots:
7262 	mcx_free_slots(sc, rx->rx_slots, i, (1 << MCX_LOG_RQ_SIZE));
7263 	rx->rx_slots = NULL;
7264 	return ENOMEM;
7265 }
7266 
7267 static int
mcx_rss_group_entry_count(struct mcx_softc * sc,int group)7268 mcx_rss_group_entry_count(struct mcx_softc *sc, int group)
7269 {
7270 	int i;
7271 	int count;
7272 
7273 	count = 0;
7274 	for (i = 0; i < nitems(mcx_rss_config); i++) {
7275 		if (mcx_rss_config[i].flow_group == group)
7276 			count++;
7277 	}
7278 
7279 	return count;
7280 }
7281 
7282 static int
mcx_up(struct mcx_softc * sc)7283 mcx_up(struct mcx_softc *sc)
7284 {
7285 	struct ifnet *ifp = &sc->sc_ac.ac_if;
7286 	struct mcx_rx *rx;
7287 	struct mcx_tx *tx;
7288 	int i, start, count, flow_group, flow_index;
7289 	struct mcx_flow_match match_crit;
7290 	struct mcx_rss_rule *rss;
7291 	uint32_t dest;
7292 	int rqns[MCX_MAX_QUEUES];
7293 
7294 	if (mcx_create_tis(sc, &sc->sc_tis) != 0)
7295 		goto down;
7296 
7297 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7298 		if (mcx_queue_up(sc, &sc->sc_queues[i]) != 0) {
7299 			goto down;
7300 		}
7301 	}
7302 
7303 	/* RSS flow table and flow groups */
7304 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 1,
7305 	    &sc->sc_rss_flow_table_id) != 0)
7306 		goto down;
7307 
7308 	dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7309 	    sc->sc_rss_flow_table_id;
7310 
7311 	/* L4 RSS flow group (v4/v6 tcp/udp, no fragments) */
7312 	memset(&match_crit, 0, sizeof(match_crit));
7313 	match_crit.mc_ethertype = 0xffff;
7314 	match_crit.mc_ip_proto = 0xff;
7315 	match_crit.mc_vlan_flags = MCX_FLOW_MATCH_IP_FRAG;
7316 	start = 0;
7317 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L4);
7318 	if (count != 0) {
7319 		if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7320 		    MCX_FLOW_GROUP_RSS_L4, start, count,
7321 		    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7322 			goto down;
7323 		start += count;
7324 	}
7325 
7326 	/* L3 RSS flow group (v4/v6, including fragments) */
7327 	memset(&match_crit, 0, sizeof(match_crit));
7328 	match_crit.mc_ethertype = 0xffff;
7329 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_L3);
7330 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7331 	    MCX_FLOW_GROUP_RSS_L3, start, count,
7332 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7333 		goto down;
7334 	start += count;
7335 
7336 	/* non-RSS flow group */
7337 	count = mcx_rss_group_entry_count(sc, MCX_FLOW_GROUP_RSS_NONE);
7338 	memset(&match_crit, 0, sizeof(match_crit));
7339 	if (mcx_create_flow_group(sc, sc->sc_rss_flow_table_id,
7340 	    MCX_FLOW_GROUP_RSS_NONE, start, count, 0, &match_crit) != 0)
7341 		goto down;
7342 
7343 	/* Root flow table, matching packets based on mac address */
7344 	if (mcx_create_flow_table(sc, MCX_LOG_FLOW_TABLE_SIZE, 0,
7345 	    &sc->sc_mac_flow_table_id) != 0)
7346 		goto down;
7347 
7348 	/* promisc flow group */
7349 	start = 0;
7350 	memset(&match_crit, 0, sizeof(match_crit));
7351 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7352 	    MCX_FLOW_GROUP_PROMISC, start, 1, 0, &match_crit) != 0)
7353 		goto down;
7354 	sc->sc_promisc_flow_enabled = 0;
7355 	start++;
7356 
7357 	/* all multicast flow group */
7358 	match_crit.mc_dest_mac[0] = 0x01;
7359 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7360 	    MCX_FLOW_GROUP_ALLMULTI, start, 1,
7361 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7362 		goto down;
7363 	sc->sc_allmulti_flow_enabled = 0;
7364 	start++;
7365 
7366 	/* mac address matching flow group */
7367 	memset(&match_crit.mc_dest_mac, 0xff, sizeof(match_crit.mc_dest_mac));
7368 	if (mcx_create_flow_group(sc, sc->sc_mac_flow_table_id,
7369 	    MCX_FLOW_GROUP_MAC, start, (1 << MCX_LOG_FLOW_TABLE_SIZE) - start,
7370 	    MCX_CREATE_FLOW_GROUP_CRIT_OUTER, &match_crit) != 0)
7371 		goto down;
7372 
7373 	/* flow table entries for unicast and broadcast */
7374 	start = 0;
7375 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7376 	    sc->sc_ac.ac_enaddr, dest) != 0)
7377 		goto down;
7378 	start++;
7379 
7380 	if (mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC, start,
7381 	    etherbroadcastaddr, dest) != 0)
7382 		goto down;
7383 	start++;
7384 
7385 	/* multicast entries go after that */
7386 	sc->sc_mcast_flow_base = start;
7387 
7388 	/* re-add any existing multicast flows */
7389 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7390 		if (sc->sc_mcast_flows[i][0] != 0) {
7391 			mcx_set_flow_table_entry_mac(sc, MCX_FLOW_GROUP_MAC,
7392 			    sc->sc_mcast_flow_base + i,
7393 			    sc->sc_mcast_flows[i], dest);
7394 		}
7395 	}
7396 
7397 	if (mcx_set_flow_table_root(sc, sc->sc_mac_flow_table_id) != 0)
7398 		goto down;
7399 
7400 	/*
7401 	 * the RQT can be any size as long as it's a power of two.
7402 	 * since we also restrict the number of queues to a power of two,
7403 	 * we can just put each rx queue in once.
7404 	 */
7405 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++)
7406 		rqns[i] = sc->sc_queues[i].q_rx.rx_rqn;
7407 
7408 	if (mcx_create_rqt(sc, intrmap_count(sc->sc_intrmap), rqns,
7409 	    &sc->sc_rqt) != 0)
7410 		goto down;
7411 
7412 	start = 0;
7413 	flow_index = 0;
7414 	flow_group = -1;
7415 	for (i = 0; i < nitems(mcx_rss_config); i++) {
7416 		rss = &mcx_rss_config[i];
7417 		if (rss->flow_group != flow_group) {
7418 			flow_group = rss->flow_group;
7419 			flow_index = 0;
7420 		}
7421 
7422 		if (rss->hash_sel == 0) {
7423 			if (mcx_create_tir_direct(sc, &sc->sc_queues[0].q_rx,
7424 			    &sc->sc_tir[i]) != 0)
7425 				goto down;
7426 		} else {
7427 			if (mcx_create_tir_indirect(sc, sc->sc_rqt,
7428 			    rss->hash_sel, &sc->sc_tir[i]) != 0)
7429 				goto down;
7430 		}
7431 
7432 		if (mcx_set_flow_table_entry_proto(sc, flow_group,
7433 		    flow_index, rss->ethertype, rss->ip_proto,
7434 		    MCX_FLOW_CONTEXT_DEST_TYPE_TIR | sc->sc_tir[i]) != 0)
7435 			goto down;
7436 		flow_index++;
7437 	}
7438 
7439 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7440 		struct mcx_queues *q = &sc->sc_queues[i];
7441 		rx = &q->q_rx;
7442 		tx = &q->q_tx;
7443 
7444 		/* start the queues */
7445 		if (mcx_ready_sq(sc, tx) != 0)
7446 			goto down;
7447 
7448 		if (mcx_ready_rq(sc, rx) != 0)
7449 			goto down;
7450 
7451 		if_rxr_init(&rx->rx_rxr, 1, (1 << MCX_LOG_RQ_SIZE));
7452 		rx->rx_prod = 0;
7453 		mcx_rx_fill(sc, rx);
7454 
7455 		tx->tx_cons = 0;
7456 		tx->tx_prod = 0;
7457 		ifq_clr_oactive(tx->tx_ifq);
7458 	}
7459 
7460 	mcx_calibrate_first(sc);
7461 
7462 	SET(ifp->if_flags, IFF_RUNNING);
7463 
7464 	return ENETRESET;
7465 down:
7466 	mcx_down(sc);
7467 	return ENOMEM;
7468 }
7469 
7470 static void
mcx_down(struct mcx_softc * sc)7471 mcx_down(struct mcx_softc *sc)
7472 {
7473 	struct ifnet *ifp = &sc->sc_ac.ac_if;
7474 	struct mcx_rss_rule *rss;
7475 	int group, i, flow_group, flow_index;
7476 
7477 	CLR(ifp->if_flags, IFF_RUNNING);
7478 
7479 	/*
7480 	 * delete flow table entries first, so no packets can arrive
7481 	 * after the barriers
7482 	 */
7483 	if (sc->sc_promisc_flow_enabled)
7484 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_PROMISC, 0);
7485 	if (sc->sc_allmulti_flow_enabled)
7486 		mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_ALLMULTI, 0);
7487 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 0);
7488 	mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC, 1);
7489 	for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7490 		if (sc->sc_mcast_flows[i][0] != 0) {
7491 			mcx_delete_flow_table_entry(sc, MCX_FLOW_GROUP_MAC,
7492 			    sc->sc_mcast_flow_base + i);
7493 		}
7494 	}
7495 
7496 	flow_group = -1;
7497 	flow_index = 0;
7498 	for (i = 0; i < nitems(mcx_rss_config); i++) {
7499 		rss = &mcx_rss_config[i];
7500 		if (rss->flow_group != flow_group) {
7501 			flow_group = rss->flow_group;
7502 			flow_index = 0;
7503 		}
7504 
7505 		mcx_delete_flow_table_entry(sc, flow_group, flow_index);
7506 
7507 		mcx_destroy_tir(sc, sc->sc_tir[i]);
7508 		sc->sc_tir[i] = 0;
7509 
7510 		flow_index++;
7511 	}
7512 	intr_barrier(sc->sc_ihc);
7513 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7514 		struct ifqueue *ifq = sc->sc_queues[i].q_tx.tx_ifq;
7515 		ifq_barrier(ifq);
7516 
7517 		timeout_del_barrier(&sc->sc_queues[i].q_rx.rx_refill);
7518 
7519 		intr_barrier(sc->sc_queues[i].q_ihc);
7520 	}
7521 
7522 	timeout_del_barrier(&sc->sc_calibrate);
7523 
7524 	for (group = 0; group < MCX_NUM_FLOW_GROUPS; group++) {
7525 		if (sc->sc_flow_group[group].g_id != -1)
7526 			mcx_destroy_flow_group(sc, group);
7527 	}
7528 
7529 	if (sc->sc_mac_flow_table_id != -1) {
7530 		mcx_destroy_flow_table(sc, sc->sc_mac_flow_table_id);
7531 		sc->sc_mac_flow_table_id = -1;
7532 	}
7533 	if (sc->sc_rss_flow_table_id != -1) {
7534 		mcx_destroy_flow_table(sc, sc->sc_rss_flow_table_id);
7535 		sc->sc_rss_flow_table_id = -1;
7536 	}
7537 	if (sc->sc_rqt != -1) {
7538 		mcx_destroy_rqt(sc, sc->sc_rqt);
7539 		sc->sc_rqt = -1;
7540 	}
7541 
7542 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7543 		struct mcx_queues *q = &sc->sc_queues[i];
7544 		struct mcx_rx *rx = &q->q_rx;
7545 		struct mcx_tx *tx = &q->q_tx;
7546 		struct mcx_cq *cq = &q->q_cq;
7547 
7548 		if (rx->rx_rqn != 0)
7549 			mcx_destroy_rq(sc, rx);
7550 
7551 		if (tx->tx_sqn != 0)
7552 			mcx_destroy_sq(sc, tx);
7553 
7554 		if (tx->tx_slots != NULL) {
7555 			mcx_free_slots(sc, tx->tx_slots,
7556 			    (1 << MCX_LOG_SQ_SIZE), (1 << MCX_LOG_SQ_SIZE));
7557 			tx->tx_slots = NULL;
7558 		}
7559 		if (rx->rx_slots != NULL) {
7560 			mcx_free_slots(sc, rx->rx_slots,
7561 			    (1 << MCX_LOG_RQ_SIZE), (1 << MCX_LOG_RQ_SIZE));
7562 			rx->rx_slots = NULL;
7563 		}
7564 
7565 		if (cq->cq_n != 0)
7566 			mcx_destroy_cq(sc, cq);
7567 	}
7568 	if (sc->sc_tis != 0) {
7569 		mcx_destroy_tis(sc, sc->sc_tis);
7570 		sc->sc_tis = 0;
7571 	}
7572 }
7573 
7574 static int
mcx_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)7575 mcx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
7576 {
7577 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7578 	struct ifreq *ifr = (struct ifreq *)data;
7579 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
7580 	int s, i, error = 0;
7581 	uint32_t dest;
7582 
7583 	s = splnet();
7584 	switch (cmd) {
7585 	case SIOCSIFADDR:
7586 		ifp->if_flags |= IFF_UP;
7587 		/* FALLTHROUGH */
7588 
7589 	case SIOCSIFFLAGS:
7590 		if (ISSET(ifp->if_flags, IFF_UP)) {
7591 			if (ISSET(ifp->if_flags, IFF_RUNNING))
7592 				error = ENETRESET;
7593 			else
7594 				error = mcx_up(sc);
7595 		} else {
7596 			if (ISSET(ifp->if_flags, IFF_RUNNING))
7597 				mcx_down(sc);
7598 		}
7599 		break;
7600 
7601 	case SIOCGIFMEDIA:
7602 	case SIOCSIFMEDIA:
7603 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
7604 		break;
7605 
7606 	case SIOCGIFSFFPAGE:
7607 		error = mcx_get_sffpage(ifp, (struct if_sffpage *)data);
7608 		break;
7609 
7610 	case SIOCGIFRXR:
7611 		error = mcx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
7612 		break;
7613 
7614 	case SIOCADDMULTI:
7615 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
7616 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7617 			if (error != 0)
7618 				break;
7619 
7620 			dest = MCX_FLOW_CONTEXT_DEST_TYPE_TABLE |
7621 			    sc->sc_rss_flow_table_id;
7622 
7623 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7624 				if (sc->sc_mcast_flows[i][0] == 0) {
7625 					memcpy(sc->sc_mcast_flows[i], addrlo,
7626 					    ETHER_ADDR_LEN);
7627 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7628 						mcx_set_flow_table_entry_mac(sc,
7629 						    MCX_FLOW_GROUP_MAC,
7630 						    sc->sc_mcast_flow_base + i,
7631 						    sc->sc_mcast_flows[i], dest);
7632 					}
7633 					break;
7634 				}
7635 			}
7636 
7637 			if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
7638 				if (i == MCX_NUM_MCAST_FLOWS) {
7639 					SET(ifp->if_flags, IFF_ALLMULTI);
7640 					sc->sc_extra_mcast++;
7641 					error = ENETRESET;
7642 				}
7643 
7644 				if (sc->sc_ac.ac_multirangecnt > 0) {
7645 					SET(ifp->if_flags, IFF_ALLMULTI);
7646 					error = ENETRESET;
7647 				}
7648 			}
7649 		}
7650 		break;
7651 
7652 	case SIOCDELMULTI:
7653 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
7654 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
7655 			if (error != 0)
7656 				break;
7657 
7658 			for (i = 0; i < MCX_NUM_MCAST_FLOWS; i++) {
7659 				if (memcmp(sc->sc_mcast_flows[i], addrlo,
7660 				    ETHER_ADDR_LEN) == 0) {
7661 					if (ISSET(ifp->if_flags, IFF_RUNNING)) {
7662 						mcx_delete_flow_table_entry(sc,
7663 						    MCX_FLOW_GROUP_MAC,
7664 						    sc->sc_mcast_flow_base + i);
7665 					}
7666 					sc->sc_mcast_flows[i][0] = 0;
7667 					break;
7668 				}
7669 			}
7670 
7671 			if (i == MCX_NUM_MCAST_FLOWS)
7672 				sc->sc_extra_mcast--;
7673 
7674 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
7675 			    (sc->sc_extra_mcast == 0) &&
7676 			    (sc->sc_ac.ac_multirangecnt == 0)) {
7677 				CLR(ifp->if_flags, IFF_ALLMULTI);
7678 				error = ENETRESET;
7679 			}
7680 		}
7681 		break;
7682 
7683 	default:
7684 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
7685 	}
7686 
7687 	if (error == ENETRESET) {
7688 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7689 		    (IFF_UP | IFF_RUNNING))
7690 			mcx_iff(sc);
7691 		error = 0;
7692 	}
7693 	splx(s);
7694 
7695 	return (error);
7696 }
7697 
7698 static int
mcx_get_sffpage(struct ifnet * ifp,struct if_sffpage * sff)7699 mcx_get_sffpage(struct ifnet *ifp, struct if_sffpage *sff)
7700 {
7701 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
7702 	struct mcx_reg_mcia mcia;
7703 	struct mcx_reg_pmlp pmlp;
7704 	int offset, error;
7705 
7706 	rw_enter_write(&sc->sc_cmdq_ioctl_lk);
7707 
7708 	/* get module number */
7709 	memset(&pmlp, 0, sizeof(pmlp));
7710 	pmlp.rp_local_port = 1;
7711 	error = mcx_access_hca_reg(sc, MCX_REG_PMLP, MCX_REG_OP_READ, &pmlp,
7712 	    sizeof(pmlp), MCX_CMDQ_SLOT_IOCTL);
7713 	if (error != 0) {
7714 		printf("%s: unable to get eeprom module number\n",
7715 		    DEVNAME(sc));
7716 		goto out;
7717 	}
7718 
7719 	for (offset = 0; offset < 256; offset += MCX_MCIA_EEPROM_BYTES) {
7720 		memset(&mcia, 0, sizeof(mcia));
7721 		mcia.rm_l = 0;
7722 		mcia.rm_module = betoh32(pmlp.rp_lane0_mapping) &
7723 		    MCX_PMLP_MODULE_NUM_MASK;
7724 		mcia.rm_i2c_addr = sff->sff_addr / 2;	/* apparently */
7725 		mcia.rm_page_num = sff->sff_page;
7726 		mcia.rm_dev_addr = htobe16(offset);
7727 		mcia.rm_size = htobe16(MCX_MCIA_EEPROM_BYTES);
7728 
7729 		error = mcx_access_hca_reg(sc, MCX_REG_MCIA, MCX_REG_OP_READ,
7730 		    &mcia, sizeof(mcia), MCX_CMDQ_SLOT_IOCTL);
7731 		if (error != 0) {
7732 			printf("%s: unable to read eeprom at %x\n",
7733 			    DEVNAME(sc), offset);
7734 			goto out;
7735 		}
7736 
7737 		memcpy(sff->sff_data + offset, mcia.rm_data,
7738 		    MCX_MCIA_EEPROM_BYTES);
7739 	}
7740 
7741  out:
7742 	rw_exit_write(&sc->sc_cmdq_ioctl_lk);
7743 	return (error);
7744 }
7745 
7746 static int
mcx_rxrinfo(struct mcx_softc * sc,struct if_rxrinfo * ifri)7747 mcx_rxrinfo(struct mcx_softc *sc, struct if_rxrinfo *ifri)
7748 {
7749 	struct if_rxring_info *ifrs;
7750 	unsigned int i;
7751 	int error;
7752 
7753 	ifrs = mallocarray(intrmap_count(sc->sc_intrmap), sizeof(*ifrs),
7754 	    M_TEMP, M_WAITOK|M_ZERO|M_CANFAIL);
7755 	if (ifrs == NULL)
7756 		return (ENOMEM);
7757 
7758 	for (i = 0; i < intrmap_count(sc->sc_intrmap); i++) {
7759 		struct mcx_rx *rx = &sc->sc_queues[i].q_rx;
7760 		struct if_rxring_info *ifr = &ifrs[i];
7761 
7762 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
7763 		ifr->ifr_size = sc->sc_hardmtu;
7764 		ifr->ifr_info = rx->rx_rxr;
7765 	}
7766 
7767 	error = if_rxr_info_ioctl(ifri, i, ifrs);
7768 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
7769 
7770 	return (error);
7771 }
7772 
7773 int
mcx_load_mbuf(struct mcx_softc * sc,struct mcx_slot * ms,struct mbuf * m)7774 mcx_load_mbuf(struct mcx_softc *sc, struct mcx_slot *ms, struct mbuf *m)
7775 {
7776 	switch (bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7777 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
7778 	case 0:
7779 		break;
7780 
7781 	case EFBIG:
7782 		if (m_defrag(m, M_DONTWAIT) == 0 &&
7783 		    bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m,
7784 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
7785 			break;
7786 
7787 	default:
7788 		return (1);
7789 	}
7790 
7791 	ms->ms_m = m;
7792 	return (0);
7793 }
7794 
7795 static void
mcx_start(struct ifqueue * ifq)7796 mcx_start(struct ifqueue *ifq)
7797 {
7798 	struct mcx_tx *tx = ifq->ifq_softc;
7799 	struct ifnet *ifp = ifq->ifq_if;
7800 	struct mcx_softc *sc = ifp->if_softc;
7801 	struct mcx_sq_entry *sq, *sqe;
7802 	struct mcx_sq_entry_seg *sqs;
7803 	struct mcx_slot *ms;
7804 	bus_dmamap_t map;
7805 	struct mbuf *m;
7806 	u_int idx, free, used;
7807 	uint64_t *bf;
7808 	uint32_t csum;
7809 	size_t bf_base;
7810 	int i, seg, nseg;
7811 
7812 	bf_base = (tx->tx_uar * MCX_PAGE_SIZE) + MCX_UAR_BF;
7813 
7814 	idx = tx->tx_prod % (1 << MCX_LOG_SQ_SIZE);
7815 	free = (tx->tx_cons + (1 << MCX_LOG_SQ_SIZE)) - tx->tx_prod;
7816 
7817 	used = 0;
7818 	bf = NULL;
7819 
7820 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7821 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_POSTWRITE);
7822 
7823 	sq = (struct mcx_sq_entry *)MCX_DMA_KVA(&tx->tx_sq_mem);
7824 
7825 	for (;;) {
7826 		if (used + MCX_SQ_ENTRY_MAX_SLOTS >= free) {
7827 			ifq_set_oactive(ifq);
7828 			break;
7829 		}
7830 
7831 		m = ifq_dequeue(ifq);
7832 		if (m == NULL) {
7833 			break;
7834 		}
7835 
7836 		sqe = sq + idx;
7837 		ms = &tx->tx_slots[idx];
7838 		memset(sqe, 0, sizeof(*sqe));
7839 
7840 		/* ctrl segment */
7841 		sqe->sqe_opcode_index = htobe32(MCX_SQE_WQE_OPCODE_SEND |
7842 		    ((tx->tx_prod & 0xffff) << MCX_SQE_WQE_INDEX_SHIFT));
7843 		/* always generate a completion event */
7844 		sqe->sqe_signature = htobe32(MCX_SQE_CE_CQE_ALWAYS);
7845 
7846 		/* eth segment */
7847 		csum = 0;
7848 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
7849 			csum |= MCX_SQE_L3_CSUM;
7850 		if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
7851 			csum |= MCX_SQE_L4_CSUM;
7852 		sqe->sqe_mss_csum = htobe32(csum);
7853 		sqe->sqe_inline_header_size = htobe16(MCX_SQ_INLINE_SIZE);
7854 #if NVLAN > 0
7855 		if (m->m_flags & M_VLANTAG) {
7856 			struct ether_vlan_header *evh;
7857 			evh = (struct ether_vlan_header *)
7858 			    &sqe->sqe_inline_headers;
7859 
7860 			/* slightly cheaper vlan_inject() */
7861 			m_copydata(m, 0, ETHER_HDR_LEN, evh);
7862 			evh->evl_proto = evh->evl_encap_proto;
7863 			evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
7864 			evh->evl_tag = htons(m->m_pkthdr.ether_vtag);
7865 
7866 			m_adj(m, ETHER_HDR_LEN);
7867 		} else
7868 #endif
7869 		{
7870 			m_copydata(m, 0, MCX_SQ_INLINE_SIZE,
7871 			    sqe->sqe_inline_headers);
7872 			m_adj(m, MCX_SQ_INLINE_SIZE);
7873 		}
7874 
7875 		if (mcx_load_mbuf(sc, ms, m) != 0) {
7876 			m_freem(m);
7877 			ifp->if_oerrors++;
7878 			continue;
7879 		}
7880 		bf = (uint64_t *)sqe;
7881 
7882 #if NBPFILTER > 0
7883 		if (ifp->if_bpf)
7884 			bpf_mtap_hdr(ifp->if_bpf,
7885 			    (caddr_t)sqe->sqe_inline_headers,
7886 			    MCX_SQ_INLINE_SIZE, m, BPF_DIRECTION_OUT);
7887 #endif
7888 		map = ms->ms_map;
7889 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
7890 		    BUS_DMASYNC_PREWRITE);
7891 
7892 		sqe->sqe_ds_sq_num =
7893 		    htobe32((tx->tx_sqn << MCX_SQE_SQ_NUM_SHIFT) |
7894 		    (map->dm_nsegs + 3));
7895 
7896 		/* data segment - first wqe has one segment */
7897 		sqs = sqe->sqe_segs;
7898 		seg = 0;
7899 		nseg = 1;
7900 		for (i = 0; i < map->dm_nsegs; i++) {
7901 			if (seg == nseg) {
7902 				/* next slot */
7903 				idx++;
7904 				if (idx == (1 << MCX_LOG_SQ_SIZE))
7905 					idx = 0;
7906 				tx->tx_prod++;
7907 				used++;
7908 
7909 				sqs = (struct mcx_sq_entry_seg *)(sq + idx);
7910 				seg = 0;
7911 				nseg = MCX_SQ_SEGS_PER_SLOT;
7912 			}
7913 			sqs[seg].sqs_byte_count =
7914 			    htobe32(map->dm_segs[i].ds_len);
7915 			sqs[seg].sqs_lkey = htobe32(sc->sc_lkey);
7916 			sqs[seg].sqs_addr = htobe64(map->dm_segs[i].ds_addr);
7917 			seg++;
7918 		}
7919 
7920 		idx++;
7921 		if (idx == (1 << MCX_LOG_SQ_SIZE))
7922 			idx = 0;
7923 		tx->tx_prod++;
7924 		used++;
7925 	}
7926 
7927 	bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&tx->tx_sq_mem),
7928 	    0, MCX_DMA_LEN(&tx->tx_sq_mem), BUS_DMASYNC_PREWRITE);
7929 
7930 	if (used) {
7931 		bus_size_t blueflame;
7932 
7933 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7934 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_POSTWRITE);
7935 		htobem32(MCX_DMA_OFF(&sc->sc_doorbell_mem, tx->tx_doorbell),
7936 		    tx->tx_prod & MCX_WQ_DOORBELL_MASK);
7937 		bus_dmamap_sync(sc->sc_dmat, MCX_DMA_MAP(&sc->sc_doorbell_mem),
7938 		    tx->tx_doorbell, sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
7939 
7940 		/*
7941 		 * write the first 64 bits of the last sqe we produced
7942 		 * to the blue flame buffer
7943 		 */
7944 
7945 		blueflame = bf_base + tx->tx_bf_offset;
7946 		bus_space_write_raw_8(sc->sc_memt, sc->sc_memh,
7947 		    blueflame, *bf);
7948 		mcx_bar(sc, blueflame, sizeof(*bf), BUS_SPACE_BARRIER_WRITE);
7949 
7950 		/* next write goes to the other buffer */
7951 		tx->tx_bf_offset ^= sc->sc_bf_size;
7952 	}
7953 }
7954 
7955 static void
mcx_watchdog(struct ifnet * ifp)7956 mcx_watchdog(struct ifnet *ifp)
7957 {
7958 }
7959 
7960 static void
mcx_media_add_types(struct mcx_softc * sc)7961 mcx_media_add_types(struct mcx_softc *sc)
7962 {
7963 	struct mcx_reg_ptys ptys;
7964 	int i;
7965 	uint32_t proto_cap;
7966 
7967 	memset(&ptys, 0, sizeof(ptys));
7968 	ptys.rp_local_port = 1;
7969 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
7970 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
7971 	    sizeof(ptys), MCX_CMDQ_SLOT_POLL) != 0) {
7972 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
7973 		return;
7974 	}
7975 
7976 	proto_cap = betoh32(ptys.rp_eth_proto_cap);
7977 	for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
7978 		const struct mcx_eth_proto_capability *cap;
7979 		if (!ISSET(proto_cap, 1 << i))
7980 			continue;
7981 
7982 		cap = &mcx_eth_cap_map[i];
7983 		if (cap->cap_media == 0)
7984 			continue;
7985 
7986 		ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
7987 	}
7988 
7989 	proto_cap = betoh32(ptys.rp_ext_eth_proto_cap);
7990 	for (i = 0; i < nitems(mcx_ext_eth_cap_map); i++) {
7991 		const struct mcx_eth_proto_capability *cap;
7992 		if (!ISSET(proto_cap, 1 << i))
7993 			continue;
7994 
7995 		cap = &mcx_ext_eth_cap_map[i];
7996 		if (cap->cap_media == 0)
7997 			continue;
7998 
7999 		ifmedia_add(&sc->sc_media, IFM_ETHER | cap->cap_media, 0, NULL);
8000 	}
8001 }
8002 
8003 static void
mcx_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)8004 mcx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
8005 {
8006 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
8007 	struct mcx_reg_ptys ptys;
8008 	int i;
8009 	uint32_t proto_oper;
8010 	uint32_t ext_proto_oper;
8011 	uint64_t media_oper;
8012 
8013 	memset(&ptys, 0, sizeof(ptys));
8014 	ptys.rp_local_port = 1;
8015 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8016 
8017 	rw_enter_write(&sc->sc_cmdq_ioctl_lk);
8018 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8019 	    sizeof(ptys), MCX_CMDQ_SLOT_IOCTL) != 0) {
8020 		printf("%s: unable to read port type/speed\n", DEVNAME(sc));
8021 		goto out;
8022 	}
8023 
8024 	proto_oper = betoh32(ptys.rp_eth_proto_oper);
8025 	ext_proto_oper = betoh32(ptys.rp_ext_eth_proto_oper);
8026 
8027 	media_oper = 0;
8028 
8029 	for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
8030 		const struct mcx_eth_proto_capability *cap;
8031 		if (!ISSET(proto_oper, 1 << i))
8032 			continue;
8033 
8034 		cap = &mcx_eth_cap_map[i];
8035 
8036 		if (cap->cap_media != 0)
8037 			media_oper = cap->cap_media;
8038 	}
8039 
8040 	if (media_oper == 0) {
8041 		for (i = 0; i < nitems(mcx_ext_eth_cap_map); i++) {
8042 			const struct mcx_eth_proto_capability *cap;
8043 			if (!ISSET(ext_proto_oper, 1 << i))
8044 				continue;
8045 
8046 			cap = &mcx_ext_eth_cap_map[i];
8047 
8048 			if (cap->cap_media != 0)
8049 				media_oper = cap->cap_media;
8050 		}
8051 	}
8052 
8053 	ifmr->ifm_status = IFM_AVALID;
8054 	if ((proto_oper | ext_proto_oper) != 0) {
8055 		ifmr->ifm_status |= IFM_ACTIVE;
8056 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media_oper;
8057 		/* txpause, rxpause, duplex? */
8058 	}
8059  out:
8060 	rw_exit_write(&sc->sc_cmdq_ioctl_lk);
8061 }
8062 
8063 static int
mcx_media_change(struct ifnet * ifp)8064 mcx_media_change(struct ifnet *ifp)
8065 {
8066 	struct mcx_softc *sc = (struct mcx_softc *)ifp->if_softc;
8067 	struct mcx_reg_ptys ptys;
8068 	struct mcx_reg_paos paos;
8069 	uint32_t media;
8070 	uint32_t ext_media;
8071 	int i, error;
8072 
8073 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
8074 		return EINVAL;
8075 
8076 	error = 0;
8077 	rw_enter_write(&sc->sc_cmdq_ioctl_lk);
8078 
8079 	if (IFM_SUBTYPE(sc->sc_media.ifm_media) == IFM_AUTO) {
8080 		/* read ptys to get supported media */
8081 		memset(&ptys, 0, sizeof(ptys));
8082 		ptys.rp_local_port = 1;
8083 		ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8084 		if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ,
8085 		    &ptys, sizeof(ptys), MCX_CMDQ_SLOT_IOCTL) != 0) {
8086 			printf("%s: unable to read port type/speed\n",
8087 			    DEVNAME(sc));
8088 			error = EIO;
8089 			goto out;
8090 		}
8091 
8092 		media = betoh32(ptys.rp_eth_proto_cap);
8093 		ext_media = betoh32(ptys.rp_ext_eth_proto_cap);
8094 	} else {
8095 		/* map media type */
8096 		media = 0;
8097 		for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
8098 			const struct  mcx_eth_proto_capability *cap;
8099 
8100 			cap = &mcx_eth_cap_map[i];
8101 			if (cap->cap_media ==
8102 			    IFM_SUBTYPE(sc->sc_media.ifm_media)) {
8103 				media = (1 << i);
8104 				break;
8105 			}
8106 		}
8107 
8108 		ext_media = 0;
8109 		for (i = 0; i < nitems(mcx_ext_eth_cap_map); i++) {
8110 			const struct  mcx_eth_proto_capability *cap;
8111 
8112 			cap = &mcx_ext_eth_cap_map[i];
8113 			if (cap->cap_media ==
8114 			    IFM_SUBTYPE(sc->sc_media.ifm_media)) {
8115 				ext_media = (1 << i);
8116 				break;
8117 			}
8118 		}
8119 	}
8120 
8121 	/* disable the port */
8122 	memset(&paos, 0, sizeof(paos));
8123 	paos.rp_local_port = 1;
8124 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_DOWN;
8125 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8126 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8127 	    sizeof(paos), MCX_CMDQ_SLOT_IOCTL) != 0) {
8128 		printf("%s: unable to set port state to down\n", DEVNAME(sc));
8129 		error = EIO;
8130 		goto out;
8131 	}
8132 
8133 	memset(&ptys, 0, sizeof(ptys));
8134 	ptys.rp_local_port = 1;
8135 	ptys.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH;
8136 	ptys.rp_eth_proto_admin = htobe32(media);
8137 	ptys.rp_ext_eth_proto_admin = htobe32(ext_media);
8138 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_WRITE, &ptys,
8139 	    sizeof(ptys), MCX_CMDQ_SLOT_IOCTL) != 0) {
8140 		printf("%s: unable to set port media type/speed\n",
8141 		    DEVNAME(sc));
8142 		error = EIO;
8143 		/* continue on */
8144 	}
8145 
8146 	/* re-enable the port to start negotiation */
8147 	memset(&paos, 0, sizeof(paos));
8148 	paos.rp_local_port = 1;
8149 	paos.rp_admin_status = MCX_REG_PAOS_ADMIN_STATUS_UP;
8150 	paos.rp_admin_state_update = MCX_REG_PAOS_ADMIN_STATE_UPDATE_EN;
8151 	if (mcx_access_hca_reg(sc, MCX_REG_PAOS, MCX_REG_OP_WRITE, &paos,
8152 	    sizeof(paos), MCX_CMDQ_SLOT_IOCTL) != 0) {
8153 		printf("%s: unable to set port state to up\n", DEVNAME(sc));
8154 		error = EIO;
8155 	}
8156 
8157  out:
8158 	rw_exit_write(&sc->sc_cmdq_ioctl_lk);
8159 	return error;
8160 }
8161 
8162 static void
mcx_port_change(void * xsc)8163 mcx_port_change(void *xsc)
8164 {
8165 	struct mcx_softc *sc = xsc;
8166 	struct ifnet *ifp = &sc->sc_ac.ac_if;
8167 	struct mcx_reg_ptys ptys = {
8168 		.rp_local_port = 1,
8169 		.rp_proto_mask = MCX_REG_PTYS_PROTO_MASK_ETH,
8170 	};
8171 	int link_state = LINK_STATE_DOWN;
8172 	int slot;
8173 
8174 	if (cold) {
8175 		slot = MCX_CMDQ_SLOT_POLL;
8176 	} else
8177 		slot = MCX_CMDQ_SLOT_LINK;
8178 
8179 	if (mcx_access_hca_reg(sc, MCX_REG_PTYS, MCX_REG_OP_READ, &ptys,
8180 	    sizeof(ptys), slot) == 0) {
8181 		uint32_t proto_oper = betoh32(ptys.rp_eth_proto_oper);
8182 		uint32_t ext_proto_oper = betoh32(ptys.rp_ext_eth_proto_oper);
8183 		uint64_t baudrate = 0;
8184 		unsigned int i;
8185 
8186 		if ((proto_oper | ext_proto_oper) != 0)
8187 			link_state = LINK_STATE_FULL_DUPLEX;
8188 
8189 		for (i = 0; i < nitems(mcx_eth_cap_map); i++) {
8190 			const struct mcx_eth_proto_capability *cap;
8191 			if (!ISSET(proto_oper, 1 << i))
8192 				continue;
8193 
8194 			cap = &mcx_eth_cap_map[i];
8195 			if (cap->cap_baudrate == 0)
8196 				continue;
8197 
8198 			baudrate = cap->cap_baudrate;
8199 			break;
8200 		}
8201 
8202 		if (baudrate == 0) {
8203 			for (i = 0; i < nitems(mcx_ext_eth_cap_map); i++) {
8204 				const struct mcx_eth_proto_capability *cap;
8205 				if (!ISSET(ext_proto_oper, 1 << i))
8206 					continue;
8207 
8208 				cap = &mcx_ext_eth_cap_map[i];
8209 				if (cap->cap_baudrate == 0)
8210 					continue;
8211 
8212 				baudrate = cap->cap_baudrate;
8213 				break;
8214 			}
8215 		}
8216 
8217 		ifp->if_baudrate = baudrate;
8218 	}
8219 
8220 	if (link_state != ifp->if_link_state) {
8221 		ifp->if_link_state = link_state;
8222 		if_link_state_change(ifp);
8223 	}
8224 }
8225 
8226 static inline uint32_t
mcx_rd(struct mcx_softc * sc,bus_size_t r)8227 mcx_rd(struct mcx_softc *sc, bus_size_t r)
8228 {
8229 	uint32_t word;
8230 
8231 	word = bus_space_read_raw_4(sc->sc_memt, sc->sc_memh, r);
8232 
8233 	return (betoh32(word));
8234 }
8235 
8236 static inline void
mcx_wr(struct mcx_softc * sc,bus_size_t r,uint32_t v)8237 mcx_wr(struct mcx_softc *sc, bus_size_t r, uint32_t v)
8238 {
8239 	bus_space_write_raw_4(sc->sc_memt, sc->sc_memh, r, htobe32(v));
8240 }
8241 
8242 static inline void
mcx_bar(struct mcx_softc * sc,bus_size_t r,bus_size_t l,int f)8243 mcx_bar(struct mcx_softc *sc, bus_size_t r, bus_size_t l, int f)
8244 {
8245 	bus_space_barrier(sc->sc_memt, sc->sc_memh, r, l, f);
8246 }
8247 
8248 static uint64_t
mcx_timer(struct mcx_softc * sc)8249 mcx_timer(struct mcx_softc *sc)
8250 {
8251 	uint32_t hi, lo, ni;
8252 
8253 	hi = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8254 	for (;;) {
8255 		lo = mcx_rd(sc, MCX_INTERNAL_TIMER_L);
8256 		mcx_bar(sc, MCX_INTERNAL_TIMER_L, 8, BUS_SPACE_BARRIER_READ);
8257 		ni = mcx_rd(sc, MCX_INTERNAL_TIMER_H);
8258 
8259 		if (ni == hi)
8260 			break;
8261 
8262 		hi = ni;
8263 	}
8264 
8265 	return (((uint64_t)hi << 32) | (uint64_t)lo);
8266 }
8267 
8268 static int
mcx_dmamem_alloc(struct mcx_softc * sc,struct mcx_dmamem * mxm,bus_size_t size,u_int align)8269 mcx_dmamem_alloc(struct mcx_softc *sc, struct mcx_dmamem *mxm,
8270     bus_size_t size, u_int align)
8271 {
8272 	mxm->mxm_size = size;
8273 
8274 	if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1,
8275 	    mxm->mxm_size, 0,
8276 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
8277 	    &mxm->mxm_map) != 0)
8278 		return (1);
8279 	if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size,
8280 	    align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs,
8281 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_64BIT) != 0)
8282 		goto destroy;
8283 	if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs,
8284 	    mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0)
8285 		goto free;
8286 	if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva,
8287 	    mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0)
8288 		goto unmap;
8289 
8290 	return (0);
8291 unmap:
8292 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8293 free:
8294 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8295 destroy:
8296 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8297 	return (1);
8298 }
8299 
8300 static void
mcx_dmamem_zero(struct mcx_dmamem * mxm)8301 mcx_dmamem_zero(struct mcx_dmamem *mxm)
8302 {
8303 	memset(MCX_DMA_KVA(mxm), 0, MCX_DMA_LEN(mxm));
8304 }
8305 
8306 static void
mcx_dmamem_free(struct mcx_softc * sc,struct mcx_dmamem * mxm)8307 mcx_dmamem_free(struct mcx_softc *sc, struct mcx_dmamem *mxm)
8308 {
8309 	bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map);
8310 	bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size);
8311 	bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1);
8312 	bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map);
8313 }
8314 
8315 static int
mcx_hwmem_alloc(struct mcx_softc * sc,struct mcx_hwmem * mhm,unsigned int pages)8316 mcx_hwmem_alloc(struct mcx_softc *sc, struct mcx_hwmem *mhm, unsigned int pages)
8317 {
8318 	bus_dma_segment_t *segs;
8319 	bus_size_t len = pages * MCX_PAGE_SIZE;
8320 	size_t seglen;
8321 
8322 	segs = mallocarray(sizeof(*segs), pages, M_DEVBUF, M_WAITOK|M_CANFAIL);
8323 	if (segs == NULL)
8324 		return (-1);
8325 
8326 	seglen = sizeof(*segs) * pages;
8327 
8328 	if (bus_dmamem_alloc(sc->sc_dmat, len, MCX_PAGE_SIZE, 0,
8329 	    segs, pages, &mhm->mhm_seg_count,
8330             BUS_DMA_NOWAIT|BUS_DMA_64BIT) != 0)
8331 		goto free_segs;
8332 
8333 	if (mhm->mhm_seg_count < pages) {
8334 		size_t nseglen;
8335 
8336 		mhm->mhm_segs = mallocarray(sizeof(*mhm->mhm_segs),
8337 		    mhm->mhm_seg_count, M_DEVBUF, M_WAITOK|M_CANFAIL);
8338 		if (mhm->mhm_segs == NULL)
8339 			goto free_dmamem;
8340 
8341 		nseglen = sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count;
8342 
8343 		memcpy(mhm->mhm_segs, segs, nseglen);
8344 
8345 		free(segs, M_DEVBUF, seglen);
8346 
8347 		segs = mhm->mhm_segs;
8348 		seglen = nseglen;
8349 	} else
8350 		mhm->mhm_segs = segs;
8351 
8352 	if (bus_dmamap_create(sc->sc_dmat, len, pages, MCX_PAGE_SIZE,
8353 	    MCX_PAGE_SIZE, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW|BUS_DMA_64BIT,
8354 	    &mhm->mhm_map) != 0)
8355 		goto free_dmamem;
8356 
8357 	if (bus_dmamap_load_raw(sc->sc_dmat, mhm->mhm_map,
8358 	    mhm->mhm_segs, mhm->mhm_seg_count, len, BUS_DMA_NOWAIT) != 0)
8359 		goto destroy;
8360 
8361 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8362 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_PRERW);
8363 
8364 	mhm->mhm_npages = pages;
8365 
8366 	return (0);
8367 
8368 destroy:
8369 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8370 free_dmamem:
8371 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8372 free_segs:
8373 	free(segs, M_DEVBUF, seglen);
8374 	mhm->mhm_segs = NULL;
8375 
8376 	return (-1);
8377 }
8378 
8379 static void
mcx_hwmem_free(struct mcx_softc * sc,struct mcx_hwmem * mhm)8380 mcx_hwmem_free(struct mcx_softc *sc, struct mcx_hwmem *mhm)
8381 {
8382 	if (mhm->mhm_npages == 0)
8383 		return;
8384 
8385 	bus_dmamap_sync(sc->sc_dmat, mhm->mhm_map,
8386 	    0, mhm->mhm_map->dm_mapsize, BUS_DMASYNC_POSTRW);
8387 
8388 	bus_dmamap_unload(sc->sc_dmat, mhm->mhm_map);
8389 	bus_dmamap_destroy(sc->sc_dmat, mhm->mhm_map);
8390 	bus_dmamem_free(sc->sc_dmat, mhm->mhm_segs, mhm->mhm_seg_count);
8391 	free(mhm->mhm_segs, M_DEVBUF,
8392 	    sizeof(*mhm->mhm_segs) * mhm->mhm_seg_count);
8393 
8394 	mhm->mhm_npages = 0;
8395 }
8396 
8397 #if NKSTAT > 0
8398 struct mcx_ppcnt {
8399 	char			 name[KSTAT_KV_NAMELEN];
8400 	enum kstat_kv_unit	 unit;
8401 };
8402 
8403 static const struct mcx_ppcnt mcx_ppcnt_ieee8023_tpl[] = {
8404 	{ "Good Tx",		KSTAT_KV_U_PACKETS, },
8405 	{ "Good Rx",		KSTAT_KV_U_PACKETS, },
8406 	{ "FCS errs",		KSTAT_KV_U_PACKETS, },
8407 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
8408 	{ "Good Tx",		KSTAT_KV_U_BYTES, },
8409 	{ "Good Rx",		KSTAT_KV_U_BYTES, },
8410 	{ "Multicast Tx",	KSTAT_KV_U_PACKETS, },
8411 	{ "Broadcast Tx",	KSTAT_KV_U_PACKETS, },
8412 	{ "Multicast Rx",	KSTAT_KV_U_PACKETS, },
8413 	{ "Broadcast Rx",	KSTAT_KV_U_PACKETS, },
8414 	{ "In Range Len",	KSTAT_KV_U_PACKETS, },
8415 	{ "Out Of Range Len",	KSTAT_KV_U_PACKETS, },
8416 	{ "Frame Too Long",	KSTAT_KV_U_PACKETS, },
8417 	{ "Symbol Errs",	KSTAT_KV_U_PACKETS, },
8418 	{ "MAC Ctrl Tx",	KSTAT_KV_U_PACKETS, },
8419 	{ "MAC Ctrl Rx",	KSTAT_KV_U_PACKETS, },
8420 	{ "MAC Ctrl Unsup",	KSTAT_KV_U_PACKETS, },
8421 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
8422 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
8423 };
8424 CTASSERT(nitems(mcx_ppcnt_ieee8023_tpl) == mcx_ppcnt_ieee8023_count);
8425 
8426 static const struct mcx_ppcnt mcx_ppcnt_rfc2863_tpl[] = {
8427 	{ "Rx Bytes",		KSTAT_KV_U_BYTES, },
8428 	{ "Rx Unicast",		KSTAT_KV_U_PACKETS, },
8429 	{ "Rx Discards",	KSTAT_KV_U_PACKETS, },
8430 	{ "Rx Errors",		KSTAT_KV_U_PACKETS, },
8431 	{ "Rx Unknown Proto",	KSTAT_KV_U_PACKETS, },
8432 	{ "Tx Bytes",		KSTAT_KV_U_BYTES, },
8433 	{ "Tx Unicast",		KSTAT_KV_U_PACKETS, },
8434 	{ "Tx Discards",	KSTAT_KV_U_PACKETS, },
8435 	{ "Tx Errors",		KSTAT_KV_U_PACKETS, },
8436 	{ "Rx Multicast",	KSTAT_KV_U_PACKETS, },
8437 	{ "Rx Broadcast",	KSTAT_KV_U_PACKETS, },
8438 	{ "Tx Multicast",	KSTAT_KV_U_PACKETS, },
8439 	{ "Tx Broadcast",	KSTAT_KV_U_PACKETS, },
8440 };
8441 CTASSERT(nitems(mcx_ppcnt_rfc2863_tpl) == mcx_ppcnt_rfc2863_count);
8442 
8443 static const struct mcx_ppcnt mcx_ppcnt_rfc2819_tpl[] = {
8444 	{ "Drop Events",	KSTAT_KV_U_PACKETS, },
8445 	{ "Octets",		KSTAT_KV_U_BYTES, },
8446 	{ "Packets",		KSTAT_KV_U_PACKETS, },
8447 	{ "Broadcasts",		KSTAT_KV_U_PACKETS, },
8448 	{ "Multicasts",		KSTAT_KV_U_PACKETS, },
8449 	{ "CRC Align Errs",	KSTAT_KV_U_PACKETS, },
8450 	{ "Undersize",		KSTAT_KV_U_PACKETS, },
8451 	{ "Oversize",		KSTAT_KV_U_PACKETS, },
8452 	{ "Fragments",		KSTAT_KV_U_PACKETS, },
8453 	{ "Jabbers",		KSTAT_KV_U_PACKETS, },
8454 	{ "Collisions",		KSTAT_KV_U_NONE, },
8455 	{ "64B",		KSTAT_KV_U_PACKETS, },
8456 	{ "65-127B",		KSTAT_KV_U_PACKETS, },
8457 	{ "128-255B",		KSTAT_KV_U_PACKETS, },
8458 	{ "256-511B",		KSTAT_KV_U_PACKETS, },
8459 	{ "512-1023B",		KSTAT_KV_U_PACKETS, },
8460 	{ "1024-1518B",		KSTAT_KV_U_PACKETS, },
8461 	{ "1519-2047B",		KSTAT_KV_U_PACKETS, },
8462 	{ "2048-4095B",		KSTAT_KV_U_PACKETS, },
8463 	{ "4096-8191B",		KSTAT_KV_U_PACKETS, },
8464 	{ "8192-10239B",	KSTAT_KV_U_PACKETS, },
8465 };
8466 CTASSERT(nitems(mcx_ppcnt_rfc2819_tpl) == mcx_ppcnt_rfc2819_count);
8467 
8468 static const struct mcx_ppcnt mcx_ppcnt_rfc3635_tpl[] = {
8469 	{ "Alignment Errs",	KSTAT_KV_U_PACKETS, },
8470 	{ "FCS Errs",		KSTAT_KV_U_PACKETS, },
8471 	{ "Single Colls",	KSTAT_KV_U_PACKETS, },
8472 	{ "Multiple Colls",	KSTAT_KV_U_PACKETS, },
8473 	{ "SQE Test Errs",	KSTAT_KV_U_NONE, },
8474 	{ "Deferred Tx",	KSTAT_KV_U_PACKETS, },
8475 	{ "Late Colls",		KSTAT_KV_U_NONE, },
8476 	{ "Exess Colls",	KSTAT_KV_U_NONE, },
8477 	{ "Int MAC Tx Errs",	KSTAT_KV_U_PACKETS, },
8478 	{ "CSM Sense Errs",	KSTAT_KV_U_NONE, },
8479 	{ "Too Long",		KSTAT_KV_U_PACKETS, },
8480 	{ "Int MAC Rx Errs",	KSTAT_KV_U_PACKETS, },
8481 	{ "Symbol Errs",	KSTAT_KV_U_NONE, },
8482 	{ "Unknown Control",	KSTAT_KV_U_PACKETS, },
8483 	{ "Pause Rx",		KSTAT_KV_U_PACKETS, },
8484 	{ "Pause Tx",		KSTAT_KV_U_PACKETS, },
8485 };
8486 CTASSERT(nitems(mcx_ppcnt_rfc3635_tpl) == mcx_ppcnt_rfc3635_count);
8487 
8488 struct mcx_kstat_ppcnt {
8489 	const char		*ksp_name;
8490 	const struct mcx_ppcnt	*ksp_tpl;
8491 	unsigned int		 ksp_n;
8492 	uint8_t			 ksp_grp;
8493 };
8494 
8495 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = {
8496 	.ksp_name =		"ieee802.3",
8497 	.ksp_tpl =		mcx_ppcnt_ieee8023_tpl,
8498 	.ksp_n =		nitems(mcx_ppcnt_ieee8023_tpl),
8499 	.ksp_grp =		MCX_REG_PPCNT_GRP_IEEE8023,
8500 };
8501 
8502 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = {
8503 	.ksp_name =		"rfc2863",
8504 	.ksp_tpl =		mcx_ppcnt_rfc2863_tpl,
8505 	.ksp_n =		nitems(mcx_ppcnt_rfc2863_tpl),
8506 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2863,
8507 };
8508 
8509 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = {
8510 	.ksp_name =		"rfc2819",
8511 	.ksp_tpl =		mcx_ppcnt_rfc2819_tpl,
8512 	.ksp_n =		nitems(mcx_ppcnt_rfc2819_tpl),
8513 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC2819,
8514 };
8515 
8516 static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = {
8517 	.ksp_name =		"rfc3635",
8518 	.ksp_tpl =		mcx_ppcnt_rfc3635_tpl,
8519 	.ksp_n =		nitems(mcx_ppcnt_rfc3635_tpl),
8520 	.ksp_grp =		MCX_REG_PPCNT_GRP_RFC3635,
8521 };
8522 
8523 static int	mcx_kstat_ppcnt_read(struct kstat *);
8524 
8525 static void	mcx_kstat_attach_tmps(struct mcx_softc *sc);
8526 static void	mcx_kstat_attach_queues(struct mcx_softc *sc);
8527 
8528 static struct kstat *
mcx_kstat_attach_ppcnt(struct mcx_softc * sc,const struct mcx_kstat_ppcnt * ksp)8529 mcx_kstat_attach_ppcnt(struct mcx_softc *sc,
8530     const struct mcx_kstat_ppcnt *ksp)
8531 {
8532 	struct kstat *ks;
8533 	struct kstat_kv *kvs;
8534 	unsigned int i;
8535 
8536 	ks = kstat_create(DEVNAME(sc), 0, ksp->ksp_name, 0, KSTAT_T_KV, 0);
8537 	if (ks == NULL)
8538 		return (NULL);
8539 
8540 	kvs = mallocarray(ksp->ksp_n, sizeof(*kvs),
8541 	    M_DEVBUF, M_WAITOK);
8542 
8543 	for (i = 0; i < ksp->ksp_n; i++) {
8544 		const struct mcx_ppcnt *tpl = &ksp->ksp_tpl[i];
8545 
8546 		kstat_kv_unit_init(&kvs[i], tpl->name,
8547 		    KSTAT_KV_T_COUNTER64, tpl->unit);
8548 	}
8549 
8550 	ks->ks_softc = sc;
8551 	ks->ks_ptr = (void *)ksp;
8552 	ks->ks_data = kvs;
8553 	ks->ks_datalen = ksp->ksp_n * sizeof(*kvs);
8554 	ks->ks_read = mcx_kstat_ppcnt_read;
8555 	kstat_set_wlock(ks, &sc->sc_cmdq_kstat_lk);
8556 
8557 	kstat_install(ks);
8558 
8559 	return (ks);
8560 }
8561 
8562 static void
mcx_kstat_attach(struct mcx_softc * sc)8563 mcx_kstat_attach(struct mcx_softc *sc)
8564 {
8565 	sc->sc_kstat_ieee8023 = mcx_kstat_attach_ppcnt(sc,
8566 	    &mcx_kstat_ppcnt_ieee8023);
8567 	sc->sc_kstat_rfc2863 = mcx_kstat_attach_ppcnt(sc,
8568 	    &mcx_kstat_ppcnt_rfc2863);
8569 	sc->sc_kstat_rfc2819 = mcx_kstat_attach_ppcnt(sc,
8570 	    &mcx_kstat_ppcnt_rfc2819);
8571 	sc->sc_kstat_rfc3635 = mcx_kstat_attach_ppcnt(sc,
8572 	    &mcx_kstat_ppcnt_rfc3635);
8573 
8574 	mcx_kstat_attach_tmps(sc);
8575 	mcx_kstat_attach_queues(sc);
8576 }
8577 
8578 static int
mcx_kstat_ppcnt_read(struct kstat * ks)8579 mcx_kstat_ppcnt_read(struct kstat *ks)
8580 {
8581 	struct mcx_softc *sc = ks->ks_softc;
8582 	struct mcx_kstat_ppcnt *ksp = ks->ks_ptr;
8583 	struct mcx_reg_ppcnt ppcnt = {
8584 		.ppcnt_grp = ksp->ksp_grp,
8585 		.ppcnt_local_port = 1,
8586 	};
8587 	struct kstat_kv *kvs = ks->ks_data;
8588 	uint64_t *vs = (uint64_t *)&ppcnt.ppcnt_counter_set;
8589 	unsigned int i;
8590 	int rv;
8591 
8592 	rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT, MCX_REG_OP_READ,
8593 	    &ppcnt, sizeof(ppcnt), MCX_CMDQ_SLOT_KSTAT);
8594 	if (rv != 0)
8595 		return (EIO);
8596 
8597 	nanouptime(&ks->ks_updated);
8598 
8599 	for (i = 0; i < ksp->ksp_n; i++)
8600 		kstat_kv_u64(&kvs[i]) = bemtoh64(&vs[i]);
8601 
8602 	return (0);
8603 }
8604 
8605 struct mcx_kstat_mtmp {
8606 	struct kstat_kv		ktmp_name;
8607 	struct kstat_kv		ktmp_temperature;
8608 	struct kstat_kv		ktmp_threshold_lo;
8609 	struct kstat_kv		ktmp_threshold_hi;
8610 };
8611 
8612 static const struct mcx_kstat_mtmp mcx_kstat_mtmp_tpl = {
8613 	KSTAT_KV_INITIALIZER("name",		KSTAT_KV_T_ISTR),
8614 	KSTAT_KV_INITIALIZER("temperature",	KSTAT_KV_T_TEMP),
8615 	KSTAT_KV_INITIALIZER("lo threshold",	KSTAT_KV_T_TEMP),
8616 	KSTAT_KV_INITIALIZER("hi threshold",	KSTAT_KV_T_TEMP),
8617 };
8618 
8619 static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 };
8620 
8621 static int mcx_kstat_mtmp_read(struct kstat *);
8622 
8623 static void
mcx_kstat_attach_tmps(struct mcx_softc * sc)8624 mcx_kstat_attach_tmps(struct mcx_softc *sc)
8625 {
8626 	struct kstat *ks;
8627 	struct mcx_reg_mcam mcam;
8628 	struct mcx_reg_mtcap mtcap;
8629 	struct mcx_kstat_mtmp *ktmp;
8630 	uint64_t map;
8631 	unsigned int i, n;
8632 
8633 	memset(&mtcap, 0, sizeof(mtcap));
8634 	memset(&mcam, 0, sizeof(mcam));
8635 
8636 	if (sc->sc_mcam_reg == 0) {
8637 		/* no management capabilities */
8638 		return;
8639 	}
8640 
8641 	if (mcx_access_hca_reg(sc, MCX_REG_MCAM, MCX_REG_OP_READ,
8642 	    &mcam, sizeof(mcam), MCX_CMDQ_SLOT_POLL) != 0) {
8643 		/* unable to check management capabilities? */
8644 		return;
8645 	}
8646 
8647 	if (MCX_BITFIELD_BIT(mcam.mcam_feature_cap_mask,
8648 	    MCX_MCAM_FEATURE_CAP_SENSOR_MAP) == 0) {
8649 		/* no sensor map */
8650 		return;
8651 	}
8652 
8653 	if (mcx_access_hca_reg(sc, MCX_REG_MTCAP, MCX_REG_OP_READ,
8654 	    &mtcap, sizeof(mtcap), MCX_CMDQ_SLOT_POLL) != 0) {
8655 		/* unable to find temperature sensors */
8656 		return;
8657 	}
8658 
8659 	sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count;
8660 	sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count,
8661 	    sizeof(*sc->sc_kstat_mtmp), M_DEVBUF, M_WAITOK);
8662 
8663 	n = 0;
8664 	map = bemtoh64(&mtcap.mtcap_sensor_map);
8665 	for (i = 0; i < sizeof(map) * NBBY; i++) {
8666 		if (!ISSET(map, (1ULL << i)))
8667 			continue;
8668 
8669 		ks = kstat_create(DEVNAME(sc), 0, "temperature", i,
8670 		    KSTAT_T_KV, 0);
8671 		if (ks == NULL) {
8672 			/* unable to attach temperature sensor %u, i */
8673 			continue;
8674 		}
8675 
8676 		ktmp = malloc(sizeof(*ktmp), M_DEVBUF, M_WAITOK|M_ZERO);
8677 		*ktmp = mcx_kstat_mtmp_tpl;
8678 
8679 		ks->ks_data = ktmp;
8680 		ks->ks_datalen = sizeof(*ktmp);
8681 		TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval);
8682 		ks->ks_read = mcx_kstat_mtmp_read;
8683 		kstat_set_wlock(ks, &sc->sc_cmdq_kstat_lk);
8684 
8685 		ks->ks_softc = sc;
8686 		kstat_install(ks);
8687 
8688 		sc->sc_kstat_mtmp[n++] = ks;
8689 		if (n >= sc->sc_kstat_mtmp_count)
8690 			break;
8691 	}
8692 }
8693 
8694 static uint64_t
mcx_tmp_to_uK(uint16_t * t)8695 mcx_tmp_to_uK(uint16_t *t)
8696 {
8697 	int64_t mt = (int16_t)bemtoh16(t); /* 0.125 C units */
8698 	mt *= 1000000 / 8; /* convert to uC */
8699 	mt += 273150000; /* convert to uK */
8700 
8701 	return (mt);
8702 }
8703 
8704 static int
mcx_kstat_mtmp_read(struct kstat * ks)8705 mcx_kstat_mtmp_read(struct kstat *ks)
8706 {
8707 	struct mcx_softc *sc = ks->ks_softc;
8708 	struct mcx_kstat_mtmp *ktmp = ks->ks_data;
8709 	struct mcx_reg_mtmp mtmp;
8710 	int rv;
8711 	struct timeval updated;
8712 
8713 	TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated);
8714 
8715 	if (!ratecheck(&updated, &mcx_kstat_mtmp_rate))
8716 		return (0);
8717 
8718 	memset(&mtmp, 0, sizeof(mtmp));
8719 	htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit);
8720 
8721 	rv = mcx_access_hca_reg(sc, MCX_REG_MTMP, MCX_REG_OP_READ,
8722 	    &mtmp, sizeof(mtmp), MCX_CMDQ_SLOT_KSTAT);
8723 	if (rv != 0)
8724 		return (EIO);
8725 
8726 	memset(kstat_kv_istr(&ktmp->ktmp_name), 0,
8727 	    sizeof(kstat_kv_istr(&ktmp->ktmp_name)));
8728 	memcpy(kstat_kv_istr(&ktmp->ktmp_name),
8729 	    mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name));
8730 	kstat_kv_temp(&ktmp->ktmp_temperature) =
8731 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature);
8732 	kstat_kv_temp(&ktmp->ktmp_threshold_lo) =
8733 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo);
8734 	kstat_kv_temp(&ktmp->ktmp_threshold_hi) =
8735 	    mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi);
8736 
8737 	TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated);
8738 
8739 	return (0);
8740 }
8741 
8742 struct mcx_queuestat {
8743 	char			 name[KSTAT_KV_NAMELEN];
8744 	enum kstat_kv_type	 type;
8745 };
8746 
8747 static const struct mcx_queuestat mcx_queue_kstat_tpl[] = {
8748 	{ "RQ SW prod",		KSTAT_KV_T_COUNTER64 },
8749 	{ "RQ HW prod",		KSTAT_KV_T_COUNTER64 },
8750 	{ "RQ HW cons",		KSTAT_KV_T_COUNTER64 },
8751 	{ "RQ HW state",	KSTAT_KV_T_ISTR },
8752 
8753 	{ "SQ SW prod",		KSTAT_KV_T_COUNTER64 },
8754 	{ "SQ SW cons",		KSTAT_KV_T_COUNTER64 },
8755 	{ "SQ HW prod",		KSTAT_KV_T_COUNTER64 },
8756 	{ "SQ HW cons",		KSTAT_KV_T_COUNTER64 },
8757 	{ "SQ HW state",	KSTAT_KV_T_ISTR },
8758 
8759 	{ "CQ SW cons",		KSTAT_KV_T_COUNTER64 },
8760 	{ "CQ HW prod",		KSTAT_KV_T_COUNTER64 },
8761 	{ "CQ HW cons",		KSTAT_KV_T_COUNTER64 },
8762 	{ "CQ HW notify",	KSTAT_KV_T_COUNTER64 },
8763 	{ "CQ HW solicit",	KSTAT_KV_T_COUNTER64 },
8764 	{ "CQ HW status",	KSTAT_KV_T_ISTR },
8765 	{ "CQ HW state",	KSTAT_KV_T_ISTR },
8766 
8767 	{ "EQ SW cons",		KSTAT_KV_T_COUNTER64 },
8768 	{ "EQ HW prod",		KSTAT_KV_T_COUNTER64 },
8769 	{ "EQ HW cons",		KSTAT_KV_T_COUNTER64 },
8770 	{ "EQ HW status",	KSTAT_KV_T_ISTR },
8771 	{ "EQ HW state",	KSTAT_KV_T_ISTR },
8772 };
8773 
8774 static int	mcx_kstat_queue_read(struct kstat *);
8775 
8776 static void
mcx_kstat_attach_queues(struct mcx_softc * sc)8777 mcx_kstat_attach_queues(struct mcx_softc *sc)
8778 {
8779 	struct kstat *ks;
8780 	struct kstat_kv *kvs;
8781 	int q, i;
8782 
8783 	for (q = 0; q < intrmap_count(sc->sc_intrmap); q++) {
8784 		ks = kstat_create(DEVNAME(sc), 0, "mcx-queues", q,
8785 		    KSTAT_T_KV, 0);
8786 		if (ks == NULL) {
8787 			/* unable to attach queue stats %u, q */
8788 			continue;
8789 		}
8790 
8791 		kvs = mallocarray(nitems(mcx_queue_kstat_tpl),
8792 		    sizeof(*kvs), M_DEVBUF, M_WAITOK);
8793 
8794 		for (i = 0; i < nitems(mcx_queue_kstat_tpl); i++) {
8795 			const struct mcx_queuestat *tpl =
8796 			    &mcx_queue_kstat_tpl[i];
8797 
8798 			kstat_kv_init(&kvs[i], tpl->name, tpl->type);
8799 		}
8800 
8801 		ks->ks_softc = &sc->sc_queues[q];
8802 		ks->ks_data = kvs;
8803 		ks->ks_datalen = nitems(mcx_queue_kstat_tpl) * sizeof(*kvs);
8804 		ks->ks_read = mcx_kstat_queue_read;
8805 
8806 		sc->sc_queues[q].q_kstat = ks;
8807 		kstat_install(ks);
8808 	}
8809 }
8810 
8811 static int
mcx_kstat_queue_read(struct kstat * ks)8812 mcx_kstat_queue_read(struct kstat *ks)
8813 {
8814 	struct mcx_queues *q = ks->ks_softc;
8815 	struct mcx_softc *sc = q->q_sc;
8816 	struct kstat_kv *kvs = ks->ks_data;
8817 	union {
8818 		struct mcx_rq_ctx rq;
8819 		struct mcx_sq_ctx sq;
8820 		struct mcx_cq_ctx cq;
8821 		struct mcx_eq_ctx eq;
8822 	} u;
8823 	const char *text;
8824 	int error = 0;
8825 
8826 	if (mcx_query_rq(sc, &q->q_rx, &u.rq) != 0) {
8827 		error = EIO;
8828 		goto out;
8829 	}
8830 
8831 	kstat_kv_u64(kvs++) = q->q_rx.rx_prod;
8832 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_sw_counter);
8833 	kstat_kv_u64(kvs++) = bemtoh32(&u.rq.rq_wq.wq_hw_counter);
8834 	switch ((bemtoh32(&u.rq.rq_flags) & MCX_RQ_CTX_STATE_MASK) >>
8835 	    MCX_RQ_CTX_STATE_SHIFT) {
8836 	case MCX_RQ_CTX_STATE_RST:
8837 		text = "RST";
8838 		break;
8839 	case MCX_RQ_CTX_STATE_RDY:
8840 		text = "RDY";
8841 		break;
8842 	case MCX_RQ_CTX_STATE_ERR:
8843 		text = "ERR";
8844 		break;
8845 	default:
8846 		text = "unknown";
8847 		break;
8848 	}
8849 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8850 	kvs++;
8851 
8852 	if (mcx_query_sq(sc, &q->q_tx, &u.sq) != 0) {
8853 		error = EIO;
8854 		goto out;
8855 	}
8856 
8857 	kstat_kv_u64(kvs++) = q->q_tx.tx_prod;
8858 	kstat_kv_u64(kvs++) = q->q_tx.tx_cons;
8859 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_sw_counter);
8860 	kstat_kv_u64(kvs++) = bemtoh32(&u.sq.sq_wq.wq_hw_counter);
8861 	switch ((bemtoh32(&u.sq.sq_flags) & MCX_SQ_CTX_STATE_MASK) >>
8862 	    MCX_SQ_CTX_STATE_SHIFT) {
8863 	case MCX_SQ_CTX_STATE_RST:
8864 		text = "RST";
8865 		break;
8866 	case MCX_SQ_CTX_STATE_RDY:
8867 		text = "RDY";
8868 		break;
8869 	case MCX_SQ_CTX_STATE_ERR:
8870 		text = "ERR";
8871 		break;
8872 	default:
8873 		text = "unknown";
8874 		break;
8875 	}
8876 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8877 	kvs++;
8878 
8879 	if (mcx_query_cq(sc, &q->q_cq, &u.cq) != 0) {
8880 		error = EIO;
8881 		goto out;
8882 	}
8883 
8884 	kstat_kv_u64(kvs++) = q->q_cq.cq_cons;
8885 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_producer_counter);
8886 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_consumer_counter);
8887 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_notified);
8888 	kstat_kv_u64(kvs++) = bemtoh32(&u.cq.cq_last_solicit);
8889 
8890 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATUS_MASK) >>
8891 	    MCX_CQ_CTX_STATUS_SHIFT) {
8892 	case MCX_CQ_CTX_STATUS_OK:
8893 		text = "OK";
8894 		break;
8895 	case MCX_CQ_CTX_STATUS_OVERFLOW:
8896 		text = "overflow";
8897 		break;
8898 	case MCX_CQ_CTX_STATUS_WRITE_FAIL:
8899 		text = "write fail";
8900 		break;
8901 	default:
8902 		text = "unknown";
8903 		break;
8904 	}
8905 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8906 	kvs++;
8907 
8908 	switch ((bemtoh32(&u.cq.cq_status) & MCX_CQ_CTX_STATE_MASK) >>
8909 	    MCX_CQ_CTX_STATE_SHIFT) {
8910 	case MCX_CQ_CTX_STATE_SOLICITED:
8911 		text = "solicited";
8912 		break;
8913 	case MCX_CQ_CTX_STATE_ARMED:
8914 		text = "armed";
8915 		break;
8916 	case MCX_CQ_CTX_STATE_FIRED:
8917 		text = "fired";
8918 		break;
8919 	default:
8920 		text = "unknown";
8921 		break;
8922 	}
8923 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8924 	kvs++;
8925 
8926 	if (mcx_query_eq(sc, &q->q_eq, &u.eq) != 0) {
8927 		error = EIO;
8928 		goto out;
8929 	}
8930 
8931 	kstat_kv_u64(kvs++) = q->q_eq.eq_cons;
8932 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_producer_counter);
8933 	kstat_kv_u64(kvs++) = bemtoh32(&u.eq.eq_consumer_counter);
8934 
8935 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATUS_MASK) >>
8936 	    MCX_EQ_CTX_STATUS_SHIFT) {
8937 	case MCX_EQ_CTX_STATUS_EQ_WRITE_FAILURE:
8938 		text = "write fail";
8939 		break;
8940 	case MCX_EQ_CTX_STATUS_OK:
8941 		text = "OK";
8942 		break;
8943 	default:
8944 		text = "unknown";
8945 		break;
8946 	}
8947 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8948 	kvs++;
8949 
8950 	switch ((bemtoh32(&u.eq.eq_status) & MCX_EQ_CTX_STATE_MASK) >>
8951 	    MCX_EQ_CTX_STATE_SHIFT) {
8952 	case MCX_EQ_CTX_STATE_ARMED:
8953 		text = "armed";
8954 		break;
8955 	case MCX_EQ_CTX_STATE_FIRED:
8956 		text = "fired";
8957 		break;
8958 	default:
8959 		text = "unknown";
8960 		break;
8961 	}
8962 	strlcpy(kstat_kv_istr(kvs), text, sizeof(kstat_kv_istr(kvs)));
8963 	kvs++;
8964 
8965 	nanouptime(&ks->ks_updated);
8966 out:
8967 	return (error);
8968 }
8969 
8970 #endif /* NKSTAT > 0 */
8971 
8972 static unsigned int
mcx_timecounter_read(struct timecounter * tc)8973 mcx_timecounter_read(struct timecounter *tc)
8974 {
8975 	struct mcx_softc *sc = tc->tc_priv;
8976 
8977 	return (mcx_rd(sc, MCX_INTERNAL_TIMER_L));
8978 }
8979 
8980 static void
mcx_timecounter_attach(struct mcx_softc * sc)8981 mcx_timecounter_attach(struct mcx_softc *sc)
8982 {
8983 	struct timecounter *tc = &sc->sc_timecounter;
8984 
8985 	tc->tc_get_timecount = mcx_timecounter_read;
8986 	tc->tc_counter_mask = ~0U;
8987 	tc->tc_frequency = sc->sc_khz * 1000;
8988 	tc->tc_name = sc->sc_dev.dv_xname;
8989 	tc->tc_quality = -100;
8990 	tc->tc_priv = sc;
8991 
8992 	tc_init(tc);
8993 }
8994