xref: /freebsd/sys/dev/sfxge/common/ef10_ev.c (revision c697fb7f)
1 /*-
2  * Copyright (c) 2012-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "efx.h"
35 #include "efx_impl.h"
36 #if EFSYS_OPT_MON_STATS
37 #include "mcdi_mon.h"
38 #endif
39 
40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
41 
42 #if EFSYS_OPT_QSTATS
43 #define	EFX_EV_QSTAT_INCR(_eep, _stat)					\
44 	do {								\
45 		(_eep)->ee_stat[_stat]++;				\
46 	_NOTE(CONSTANTCONDITION)					\
47 	} while (B_FALSE)
48 #else
49 #define	EFX_EV_QSTAT_INCR(_eep, _stat)
50 #endif
51 
52 /*
53  * Non-interrupting event queue requires interrrupting event queue to
54  * refer to for wake-up events even if wake ups are never used.
55  * It could be even non-allocated event queue.
56  */
57 #define	EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
58 
59 static	__checkReturn	boolean_t
60 ef10_ev_rx(
61 	__in		efx_evq_t *eep,
62 	__in		efx_qword_t *eqp,
63 	__in		const efx_ev_callbacks_t *eecp,
64 	__in_opt	void *arg);
65 
66 static	__checkReturn	boolean_t
67 ef10_ev_tx(
68 	__in		efx_evq_t *eep,
69 	__in		efx_qword_t *eqp,
70 	__in		const efx_ev_callbacks_t *eecp,
71 	__in_opt	void *arg);
72 
73 static	__checkReturn	boolean_t
74 ef10_ev_driver(
75 	__in		efx_evq_t *eep,
76 	__in		efx_qword_t *eqp,
77 	__in		const efx_ev_callbacks_t *eecp,
78 	__in_opt	void *arg);
79 
80 static	__checkReturn	boolean_t
81 ef10_ev_drv_gen(
82 	__in		efx_evq_t *eep,
83 	__in		efx_qword_t *eqp,
84 	__in		const efx_ev_callbacks_t *eecp,
85 	__in_opt	void *arg);
86 
87 static	__checkReturn	boolean_t
88 ef10_ev_mcdi(
89 	__in		efx_evq_t *eep,
90 	__in		efx_qword_t *eqp,
91 	__in		const efx_ev_callbacks_t *eecp,
92 	__in_opt	void *arg);
93 
94 
95 static	__checkReturn	efx_rc_t
96 efx_mcdi_set_evq_tmr(
97 	__in		efx_nic_t *enp,
98 	__in		uint32_t instance,
99 	__in		uint32_t mode,
100 	__in		uint32_t timer_ns)
101 {
102 	efx_mcdi_req_t req;
103 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
104 		MC_CMD_SET_EVQ_TMR_OUT_LEN);
105 	efx_rc_t rc;
106 
107 	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
108 	req.emr_in_buf = payload;
109 	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
110 	req.emr_out_buf = payload;
111 	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
112 
113 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
114 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
115 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
116 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
117 
118 	efx_mcdi_execute(enp, &req);
119 
120 	if (req.emr_rc != 0) {
121 		rc = req.emr_rc;
122 		goto fail1;
123 	}
124 
125 	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
126 		rc = EMSGSIZE;
127 		goto fail2;
128 	}
129 
130 	return (0);
131 
132 fail2:
133 	EFSYS_PROBE(fail2);
134 fail1:
135 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
136 
137 	return (rc);
138 }
139 
140 static	__checkReturn	efx_rc_t
141 efx_mcdi_init_evq(
142 	__in		efx_nic_t *enp,
143 	__in		unsigned int instance,
144 	__in		efsys_mem_t *esmp,
145 	__in		size_t nevs,
146 	__in		uint32_t irq,
147 	__in		uint32_t us,
148 	__in		uint32_t flags,
149 	__in		boolean_t low_latency)
150 {
151 	efx_mcdi_req_t req;
152 	EFX_MCDI_DECLARE_BUF(payload,
153 		MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
154 		MC_CMD_INIT_EVQ_OUT_LEN);
155 	efx_qword_t *dma_addr;
156 	uint64_t addr;
157 	int npages;
158 	int i;
159 	boolean_t interrupting;
160 	int ev_cut_through;
161 	efx_rc_t rc;
162 
163 	npages = EFX_EVQ_NBUFS(nevs);
164 	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
165 		rc = EINVAL;
166 		goto fail1;
167 	}
168 
169 	req.emr_cmd = MC_CMD_INIT_EVQ;
170 	req.emr_in_buf = payload;
171 	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
172 	req.emr_out_buf = payload;
173 	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
174 
175 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
176 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
177 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
178 
179 	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
180 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
181 
182 	/*
183 	 * On Huntington RX and TX event batching can only be requested together
184 	 * (even if the datapath firmware doesn't actually support RX
185 	 * batching). If event cut through is enabled no RX batching will occur.
186 	 *
187 	 * So always enable RX and TX event batching, and enable event cut
188 	 * through if we want low latency operation.
189 	 */
190 	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
191 	case EFX_EVQ_FLAGS_TYPE_AUTO:
192 		ev_cut_through = low_latency ? 1 : 0;
193 		break;
194 	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
195 		ev_cut_through = 0;
196 		break;
197 	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
198 		ev_cut_through = 1;
199 		break;
200 	default:
201 		rc = EINVAL;
202 		goto fail2;
203 	}
204 	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
205 	    INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
206 	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
207 	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
208 	    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
209 	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
210 	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);
211 
212 	/* If the value is zero then disable the timer */
213 	if (us == 0) {
214 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
215 		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
216 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
217 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
218 	} else {
219 		unsigned int ticks;
220 
221 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
222 			goto fail3;
223 
224 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
225 		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
226 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
227 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
228 	}
229 
230 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
231 	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
232 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
233 
234 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
235 	addr = EFSYS_MEM_ADDR(esmp);
236 
237 	for (i = 0; i < npages; i++) {
238 		EFX_POPULATE_QWORD_2(*dma_addr,
239 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
240 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
241 
242 		dma_addr++;
243 		addr += EFX_BUF_SIZE;
244 	}
245 
246 	efx_mcdi_execute(enp, &req);
247 
248 	if (req.emr_rc != 0) {
249 		rc = req.emr_rc;
250 		goto fail4;
251 	}
252 
253 	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
254 		rc = EMSGSIZE;
255 		goto fail5;
256 	}
257 
258 	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
259 
260 	return (0);
261 
262 fail5:
263 	EFSYS_PROBE(fail5);
264 fail4:
265 	EFSYS_PROBE(fail4);
266 fail3:
267 	EFSYS_PROBE(fail3);
268 fail2:
269 	EFSYS_PROBE(fail2);
270 fail1:
271 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
272 
273 	return (rc);
274 }
275 
276 
277 static	__checkReturn	efx_rc_t
278 efx_mcdi_init_evq_v2(
279 	__in		efx_nic_t *enp,
280 	__in		unsigned int instance,
281 	__in		efsys_mem_t *esmp,
282 	__in		size_t nevs,
283 	__in		uint32_t irq,
284 	__in		uint32_t us,
285 	__in		uint32_t flags)
286 {
287 	efx_mcdi_req_t req;
288 	EFX_MCDI_DECLARE_BUF(payload,
289 		MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
290 		MC_CMD_INIT_EVQ_V2_OUT_LEN);
291 	boolean_t interrupting;
292 	unsigned int evq_type;
293 	efx_qword_t *dma_addr;
294 	uint64_t addr;
295 	int npages;
296 	int i;
297 	efx_rc_t rc;
298 
299 	npages = EFX_EVQ_NBUFS(nevs);
300 	if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
301 		rc = EINVAL;
302 		goto fail1;
303 	}
304 
305 	req.emr_cmd = MC_CMD_INIT_EVQ;
306 	req.emr_in_buf = payload;
307 	req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
308 	req.emr_out_buf = payload;
309 	req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
310 
311 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
312 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
313 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
314 
315 	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
316 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
317 
318 	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
319 	case EFX_EVQ_FLAGS_TYPE_AUTO:
320 		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
321 		break;
322 	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
323 		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
324 		break;
325 	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
326 		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
327 		break;
328 	default:
329 		rc = EINVAL;
330 		goto fail2;
331 	}
332 	MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
333 	    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
334 	    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
335 	    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
336 	    INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
337 
338 	/* If the value is zero then disable the timer */
339 	if (us == 0) {
340 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
341 		    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
342 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
343 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
344 	} else {
345 		unsigned int ticks;
346 
347 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
348 			goto fail3;
349 
350 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
351 		    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
352 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
353 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
354 	}
355 
356 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
357 	    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
358 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
359 
360 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
361 	addr = EFSYS_MEM_ADDR(esmp);
362 
363 	for (i = 0; i < npages; i++) {
364 		EFX_POPULATE_QWORD_2(*dma_addr,
365 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
366 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
367 
368 		dma_addr++;
369 		addr += EFX_BUF_SIZE;
370 	}
371 
372 	efx_mcdi_execute(enp, &req);
373 
374 	if (req.emr_rc != 0) {
375 		rc = req.emr_rc;
376 		goto fail4;
377 	}
378 
379 	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
380 		rc = EMSGSIZE;
381 		goto fail5;
382 	}
383 
384 	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
385 
386 	EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
387 		    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
388 
389 	return (0);
390 
391 fail5:
392 	EFSYS_PROBE(fail5);
393 fail4:
394 	EFSYS_PROBE(fail4);
395 fail3:
396 	EFSYS_PROBE(fail3);
397 fail2:
398 	EFSYS_PROBE(fail2);
399 fail1:
400 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
401 
402 	return (rc);
403 }
404 
405 static	__checkReturn	efx_rc_t
406 efx_mcdi_fini_evq(
407 	__in		efx_nic_t *enp,
408 	__in		uint32_t instance)
409 {
410 	efx_mcdi_req_t req;
411 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
412 		MC_CMD_FINI_EVQ_OUT_LEN);
413 	efx_rc_t rc;
414 
415 	req.emr_cmd = MC_CMD_FINI_EVQ;
416 	req.emr_in_buf = payload;
417 	req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
418 	req.emr_out_buf = payload;
419 	req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
420 
421 	MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
422 
423 	efx_mcdi_execute_quiet(enp, &req);
424 
425 	if (req.emr_rc != 0) {
426 		rc = req.emr_rc;
427 		goto fail1;
428 	}
429 
430 	return (0);
431 
432 fail1:
433 	/*
434 	 * EALREADY is not an error, but indicates that the MC has rebooted and
435 	 * that the EVQ has already been destroyed.
436 	 */
437 	if (rc != EALREADY)
438 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
439 
440 	return (rc);
441 }
442 
443 
444 
445 	__checkReturn	efx_rc_t
446 ef10_ev_init(
447 	__in		efx_nic_t *enp)
448 {
449 	_NOTE(ARGUNUSED(enp))
450 	return (0);
451 }
452 
453 			void
454 ef10_ev_fini(
455 	__in		efx_nic_t *enp)
456 {
457 	_NOTE(ARGUNUSED(enp))
458 }
459 
460 	__checkReturn	efx_rc_t
461 ef10_ev_qcreate(
462 	__in		efx_nic_t *enp,
463 	__in		unsigned int index,
464 	__in		efsys_mem_t *esmp,
465 	__in		size_t ndescs,
466 	__in		uint32_t id,
467 	__in		uint32_t us,
468 	__in		uint32_t flags,
469 	__in		efx_evq_t *eep)
470 {
471 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
472 	uint32_t irq;
473 	efx_rc_t rc;
474 
475 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
476 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
477 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
478 
479 	if (!ISP2(ndescs) ||
480 	    (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
481 		rc = EINVAL;
482 		goto fail1;
483 	}
484 
485 	if (index >= encp->enc_evq_limit) {
486 		rc = EINVAL;
487 		goto fail2;
488 	}
489 
490 	if (us > encp->enc_evq_timer_max_us) {
491 		rc = EINVAL;
492 		goto fail3;
493 	}
494 
495 	/* Set up the handler table */
496 	eep->ee_rx	= ef10_ev_rx;
497 	eep->ee_tx	= ef10_ev_tx;
498 	eep->ee_driver	= ef10_ev_driver;
499 	eep->ee_drv_gen	= ef10_ev_drv_gen;
500 	eep->ee_mcdi	= ef10_ev_mcdi;
501 
502 	/* Set up the event queue */
503 	/* INIT_EVQ expects function-relative vector number */
504 	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
505 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
506 		irq = index;
507 	} else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
508 		irq = index;
509 		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
510 		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
511 	} else {
512 		irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
513 	}
514 
515 	/*
516 	 * Interrupts may be raised for events immediately after the queue is
517 	 * created. See bug58606.
518 	 */
519 
520 	if (encp->enc_init_evq_v2_supported) {
521 		/*
522 		 * On Medford the low latency license is required to enable RX
523 		 * and event cut through and to disable RX batching.  If event
524 		 * queue type in flags is auto, we let the firmware decide the
525 		 * settings to use. If the adapter has a low latency license,
526 		 * it will choose the best settings for low latency, otherwise
527 		 * it will choose the best settings for throughput.
528 		 */
529 		rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
530 		    flags);
531 		if (rc != 0)
532 			goto fail4;
533 	} else {
534 		/*
535 		 * On Huntington we need to specify the settings to use.
536 		 * If event queue type in flags is auto, we favour throughput
537 		 * if the adapter is running virtualization supporting firmware
538 		 * (i.e. the full featured firmware variant)
539 		 * and latency otherwise. The Ethernet Virtual Bridging
540 		 * capability is used to make this decision. (Note though that
541 		 * the low latency firmware variant is also best for
542 		 * throughput and corresponding type should be specified
543 		 * to choose it.)
544 		 */
545 		boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
546 		rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
547 		    low_latency);
548 		if (rc != 0)
549 			goto fail5;
550 	}
551 
552 	return (0);
553 
554 fail5:
555 	EFSYS_PROBE(fail5);
556 fail4:
557 	EFSYS_PROBE(fail4);
558 fail3:
559 	EFSYS_PROBE(fail3);
560 fail2:
561 	EFSYS_PROBE(fail2);
562 fail1:
563 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
564 
565 	return (rc);
566 }
567 
568 			void
569 ef10_ev_qdestroy(
570 	__in		efx_evq_t *eep)
571 {
572 	efx_nic_t *enp = eep->ee_enp;
573 
574 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
575 	    enp->en_family == EFX_FAMILY_MEDFORD ||
576 	    enp->en_family == EFX_FAMILY_MEDFORD2);
577 
578 	(void) efx_mcdi_fini_evq(enp, eep->ee_index);
579 }
580 
581 	__checkReturn	efx_rc_t
582 ef10_ev_qprime(
583 	__in		efx_evq_t *eep,
584 	__in		unsigned int count)
585 {
586 	efx_nic_t *enp = eep->ee_enp;
587 	uint32_t rptr;
588 	efx_dword_t dword;
589 
590 	rptr = count & eep->ee_mask;
591 
592 	if (enp->en_nic_cfg.enc_bug35388_workaround) {
593 		EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
594 		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
595 		EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
596 		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
597 
598 		EFX_POPULATE_DWORD_2(dword,
599 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
600 		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
601 		    ERF_DD_EVQ_IND_RPTR,
602 		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
603 		EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
604 		    &dword, B_FALSE);
605 
606 		EFX_POPULATE_DWORD_2(dword,
607 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
608 		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
609 		    ERF_DD_EVQ_IND_RPTR,
610 		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
611 		EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
612 		    &dword, B_FALSE);
613 	} else {
614 		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
615 		EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
616 		    &dword, B_FALSE);
617 	}
618 
619 	return (0);
620 }
621 
622 static	__checkReturn	efx_rc_t
623 efx_mcdi_driver_event(
624 	__in		efx_nic_t *enp,
625 	__in		uint32_t evq,
626 	__in		efx_qword_t data)
627 {
628 	efx_mcdi_req_t req;
629 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
630 		MC_CMD_DRIVER_EVENT_OUT_LEN);
631 	efx_rc_t rc;
632 
633 	req.emr_cmd = MC_CMD_DRIVER_EVENT;
634 	req.emr_in_buf = payload;
635 	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
636 	req.emr_out_buf = payload;
637 	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
638 
639 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
640 
641 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
642 	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
643 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
644 	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
645 
646 	efx_mcdi_execute(enp, &req);
647 
648 	if (req.emr_rc != 0) {
649 		rc = req.emr_rc;
650 		goto fail1;
651 	}
652 
653 	return (0);
654 
655 fail1:
656 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
657 
658 	return (rc);
659 }
660 
661 			void
662 ef10_ev_qpost(
663 	__in	efx_evq_t *eep,
664 	__in	uint16_t data)
665 {
666 	efx_nic_t *enp = eep->ee_enp;
667 	efx_qword_t event;
668 
669 	EFX_POPULATE_QWORD_3(event,
670 	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
671 	    ESF_DZ_DRV_SUB_CODE, 0,
672 	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
673 
674 	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
675 }
676 
677 	__checkReturn	efx_rc_t
678 ef10_ev_qmoderate(
679 	__in		efx_evq_t *eep,
680 	__in		unsigned int us)
681 {
682 	efx_nic_t *enp = eep->ee_enp;
683 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
684 	efx_dword_t dword;
685 	uint32_t mode;
686 	efx_rc_t rc;
687 
688 	/* Check that hardware and MCDI use the same timer MODE values */
689 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
690 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
691 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
692 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
693 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
694 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
695 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
696 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
697 
698 	if (us > encp->enc_evq_timer_max_us) {
699 		rc = EINVAL;
700 		goto fail1;
701 	}
702 
703 	/* If the value is zero then disable the timer */
704 	if (us == 0) {
705 		mode = FFE_CZ_TIMER_MODE_DIS;
706 	} else {
707 		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
708 	}
709 
710 	if (encp->enc_bug61265_workaround) {
711 		uint32_t ns = us * 1000;
712 
713 		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
714 		if (rc != 0)
715 			goto fail2;
716 	} else {
717 		unsigned int ticks;
718 
719 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
720 			goto fail3;
721 
722 		if (encp->enc_bug35388_workaround) {
723 			EFX_POPULATE_DWORD_3(dword,
724 			    ERF_DD_EVQ_IND_TIMER_FLAGS,
725 			    EFE_DD_EVQ_IND_TIMER_FLAGS,
726 			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
727 			    ERF_DD_EVQ_IND_TIMER_VAL, ticks);
728 			EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
729 			    eep->ee_index, &dword, 0);
730 		} else {
731 			/*
732 			 * NOTE: The TMR_REL field introduced in Medford2 is
733 			 * ignored on earlier EF10 controllers. See bug66418
734 			 * comment 9 for details.
735 			 */
736 			EFX_POPULATE_DWORD_3(dword,
737 			    ERF_DZ_TC_TIMER_MODE, mode,
738 			    ERF_DZ_TC_TIMER_VAL, ticks,
739 			    ERF_FZ_TC_TMR_REL_VAL, ticks);
740 			EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
741 			    eep->ee_index, &dword, 0);
742 		}
743 	}
744 
745 	return (0);
746 
747 fail3:
748 	EFSYS_PROBE(fail3);
749 fail2:
750 	EFSYS_PROBE(fail2);
751 fail1:
752 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
753 
754 	return (rc);
755 }
756 
757 
758 #if EFSYS_OPT_QSTATS
759 			void
760 ef10_ev_qstats_update(
761 	__in				efx_evq_t *eep,
762 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
763 {
764 	unsigned int id;
765 
766 	for (id = 0; id < EV_NQSTATS; id++) {
767 		efsys_stat_t *essp = &stat[id];
768 
769 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
770 		eep->ee_stat[id] = 0;
771 	}
772 }
773 #endif /* EFSYS_OPT_QSTATS */
774 
775 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
776 
777 static	__checkReturn	boolean_t
778 ef10_ev_rx_packed_stream(
779 	__in		efx_evq_t *eep,
780 	__in		efx_qword_t *eqp,
781 	__in		const efx_ev_callbacks_t *eecp,
782 	__in_opt	void *arg)
783 {
784 	uint32_t label;
785 	uint32_t pkt_count_lbits;
786 	uint16_t flags;
787 	boolean_t should_abort;
788 	efx_evq_rxq_state_t *eersp;
789 	unsigned int pkt_count;
790 	unsigned int current_id;
791 	boolean_t new_buffer;
792 
793 	pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
794 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
795 	new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
796 
797 	flags = 0;
798 
799 	eersp = &eep->ee_rxq_state[label];
800 
801 	/*
802 	 * RX_DSC_PTR_LBITS has least significant bits of the global
803 	 * (not per-buffer) packet counter. It is guaranteed that
804 	 * maximum number of completed packets fits in lbits-mask.
805 	 * So, modulo lbits-mask arithmetic should be used to calculate
806 	 * packet counter increment.
807 	 */
808 	pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
809 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
810 	eersp->eers_rx_stream_npackets += pkt_count;
811 
812 	if (new_buffer) {
813 		flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
814 #if EFSYS_OPT_RX_PACKED_STREAM
815 		/*
816 		 * If both packed stream and equal stride super-buffer
817 		 * modes are compiled in, in theory credits should be
818 		 * be maintained for packed stream only, but right now
819 		 * these modes are not distinguished in the event queue
820 		 * Rx queue state and it is OK to increment the counter
821 		 * regardless (it might be event cheaper than branching
822 		 * since neighbour structure member are updated as well).
823 		 */
824 		eersp->eers_rx_packed_stream_credits++;
825 #endif
826 		eersp->eers_rx_read_ptr++;
827 	}
828 	current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
829 
830 	/* Check for errors that invalidate checksum and L3/L4 fields */
831 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
832 		/* RX frame truncated */
833 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
834 		flags |= EFX_DISCARD;
835 		goto deliver;
836 	}
837 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
838 		/* Bad Ethernet frame CRC */
839 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
840 		flags |= EFX_DISCARD;
841 		goto deliver;
842 	}
843 
844 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
845 		flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
846 		goto deliver;
847 	}
848 
849 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
850 		EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
851 
852 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
853 		EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
854 
855 deliver:
856 	/* If we're not discarding the packet then it is ok */
857 	if (~flags & EFX_DISCARD)
858 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
859 
860 	EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
861 	should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
862 	    flags);
863 
864 	return (should_abort);
865 }
866 
867 #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
868 
869 static	__checkReturn	boolean_t
870 ef10_ev_rx(
871 	__in		efx_evq_t *eep,
872 	__in		efx_qword_t *eqp,
873 	__in		const efx_ev_callbacks_t *eecp,
874 	__in_opt	void *arg)
875 {
876 	efx_nic_t *enp = eep->ee_enp;
877 	uint32_t size;
878 	uint32_t label;
879 	uint32_t mac_class;
880 	uint32_t eth_tag_class;
881 	uint32_t l3_class;
882 	uint32_t l4_class;
883 	uint32_t next_read_lbits;
884 	uint16_t flags;
885 	boolean_t cont;
886 	boolean_t should_abort;
887 	efx_evq_rxq_state_t *eersp;
888 	unsigned int desc_count;
889 	unsigned int last_used_id;
890 
891 	EFX_EV_QSTAT_INCR(eep, EV_RX);
892 
893 	/* Discard events after RXQ/TXQ errors, or hardware not available */
894 	if (enp->en_reset_flags &
895 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
896 		return (B_FALSE);
897 
898 	/* Basic packet information */
899 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
900 	eersp = &eep->ee_rxq_state[label];
901 
902 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
903 	/*
904 	 * Packed stream events are very different,
905 	 * so handle them separately
906 	 */
907 	if (eersp->eers_rx_packed_stream)
908 	    return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
909 #endif
910 
911 	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
912 	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
913 	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
914 	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
915 	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
916 	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
917 
918 	/*
919 	 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
920 	 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
921 	 * and values for all EF10 controllers.
922 	 */
923 	EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
924 	EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
925 	EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
926 	EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
927 
928 	l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
929 
930 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
931 		/* Drop this event */
932 		return (B_FALSE);
933 	}
934 	flags = 0;
935 
936 	if (cont != 0) {
937 		/*
938 		 * This may be part of a scattered frame, or it may be a
939 		 * truncated frame if scatter is disabled on this RXQ.
940 		 * Overlength frames can be received if e.g. a VF is configured
941 		 * for 1500 MTU but connected to a port set to 9000 MTU
942 		 * (see bug56567).
943 		 * FIXME: There is not yet any driver that supports scatter on
944 		 * Huntington.  Scatter support is required for OSX.
945 		 */
946 		flags |= EFX_PKT_CONT;
947 	}
948 
949 	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
950 		flags |= EFX_PKT_UNICAST;
951 
952 	/* Increment the count of descriptors read */
953 	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
954 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
955 	eersp->eers_rx_read_ptr += desc_count;
956 
957 	/*
958 	 * FIXME: add error checking to make sure this a batched event.
959 	 * This could also be an aborted scatter, see Bug36629.
960 	 */
961 	if (desc_count > 1) {
962 		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
963 		flags |= EFX_PKT_PREFIX_LEN;
964 	}
965 
966 	/* Calculate the index of the last descriptor consumed */
967 	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
968 
969 	/* Check for errors that invalidate checksum and L3/L4 fields */
970 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
971 		/* RX frame truncated */
972 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
973 		flags |= EFX_DISCARD;
974 		goto deliver;
975 	}
976 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
977 		/* Bad Ethernet frame CRC */
978 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
979 		flags |= EFX_DISCARD;
980 		goto deliver;
981 	}
982 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
983 		/*
984 		 * Hardware parse failed, due to malformed headers
985 		 * or headers that are too long for the parser.
986 		 * Headers and checksums must be validated by the host.
987 		 */
988 		/* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
989 		goto deliver;
990 	}
991 
992 	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
993 	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
994 		flags |= EFX_PKT_VLAN_TAGGED;
995 	}
996 
997 	switch (l3_class) {
998 	case ESE_DZ_L3_CLASS_IP4:
999 	case ESE_DZ_L3_CLASS_IP4_FRAG:
1000 		flags |= EFX_PKT_IPV4;
1001 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
1002 			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
1003 		} else {
1004 			flags |= EFX_CKSUM_IPV4;
1005 		}
1006 
1007 		/*
1008 		 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1009 		 * only 2 bits wide on Medford2. Check it is safe to use the
1010 		 * Medford2 field and values for all EF10 controllers.
1011 		 */
1012 		EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1013 		    ESF_DE_RX_L4_CLASS_LBN);
1014 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1015 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1016 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1017 		    ESE_DE_L4_CLASS_UNKNOWN);
1018 
1019 		if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1020 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
1021 			flags |= EFX_PKT_TCP;
1022 		} else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1023 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
1024 			flags |= EFX_PKT_UDP;
1025 		} else {
1026 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
1027 		}
1028 		break;
1029 
1030 	case ESE_DZ_L3_CLASS_IP6:
1031 	case ESE_DZ_L3_CLASS_IP6_FRAG:
1032 		flags |= EFX_PKT_IPV6;
1033 
1034 		/*
1035 		 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1036 		 * only 2 bits wide on Medford2. Check it is safe to use the
1037 		 * Medford2 field and values for all EF10 controllers.
1038 		 */
1039 		EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1040 		    ESF_DE_RX_L4_CLASS_LBN);
1041 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1042 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1043 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1044 		    ESE_DE_L4_CLASS_UNKNOWN);
1045 
1046 		if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1047 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
1048 			flags |= EFX_PKT_TCP;
1049 		} else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1050 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
1051 			flags |= EFX_PKT_UDP;
1052 		} else {
1053 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
1054 		}
1055 		break;
1056 
1057 	default:
1058 		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
1059 		break;
1060 	}
1061 
1062 	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
1063 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
1064 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
1065 		} else {
1066 			flags |= EFX_CKSUM_TCPUDP;
1067 		}
1068 	}
1069 
1070 deliver:
1071 	/* If we're not discarding the packet then it is ok */
1072 	if (~flags & EFX_DISCARD)
1073 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
1074 
1075 	EFSYS_ASSERT(eecp->eec_rx != NULL);
1076 	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
1077 
1078 	return (should_abort);
1079 }
1080 
1081 static	__checkReturn	boolean_t
1082 ef10_ev_tx(
1083 	__in		efx_evq_t *eep,
1084 	__in		efx_qword_t *eqp,
1085 	__in		const efx_ev_callbacks_t *eecp,
1086 	__in_opt	void *arg)
1087 {
1088 	efx_nic_t *enp = eep->ee_enp;
1089 	uint32_t id;
1090 	uint32_t label;
1091 	boolean_t should_abort;
1092 
1093 	EFX_EV_QSTAT_INCR(eep, EV_TX);
1094 
1095 	/* Discard events after RXQ/TXQ errors, or hardware not available */
1096 	if (enp->en_reset_flags &
1097 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
1098 		return (B_FALSE);
1099 
1100 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
1101 		/* Drop this event */
1102 		return (B_FALSE);
1103 	}
1104 
1105 	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
1106 	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
1107 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
1108 
1109 	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
1110 
1111 	EFSYS_ASSERT(eecp->eec_tx != NULL);
1112 	should_abort = eecp->eec_tx(arg, label, id);
1113 
1114 	return (should_abort);
1115 }
1116 
1117 static	__checkReturn	boolean_t
1118 ef10_ev_driver(
1119 	__in		efx_evq_t *eep,
1120 	__in		efx_qword_t *eqp,
1121 	__in		const efx_ev_callbacks_t *eecp,
1122 	__in_opt	void *arg)
1123 {
1124 	unsigned int code;
1125 	boolean_t should_abort;
1126 
1127 	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
1128 	should_abort = B_FALSE;
1129 
1130 	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
1131 	switch (code) {
1132 	case ESE_DZ_DRV_TIMER_EV: {
1133 		uint32_t id;
1134 
1135 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
1136 
1137 		EFSYS_ASSERT(eecp->eec_timer != NULL);
1138 		should_abort = eecp->eec_timer(arg, id);
1139 		break;
1140 	}
1141 
1142 	case ESE_DZ_DRV_WAKE_UP_EV: {
1143 		uint32_t id;
1144 
1145 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
1146 
1147 		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1148 		should_abort = eecp->eec_wake_up(arg, id);
1149 		break;
1150 	}
1151 
1152 	case ESE_DZ_DRV_START_UP_EV:
1153 		EFSYS_ASSERT(eecp->eec_initialized != NULL);
1154 		should_abort = eecp->eec_initialized(arg);
1155 		break;
1156 
1157 	default:
1158 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1159 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1160 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1161 		break;
1162 	}
1163 
1164 	return (should_abort);
1165 }
1166 
1167 static	__checkReturn	boolean_t
1168 ef10_ev_drv_gen(
1169 	__in		efx_evq_t *eep,
1170 	__in		efx_qword_t *eqp,
1171 	__in		const efx_ev_callbacks_t *eecp,
1172 	__in_opt	void *arg)
1173 {
1174 	uint32_t data;
1175 	boolean_t should_abort;
1176 
1177 	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1178 	should_abort = B_FALSE;
1179 
1180 	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1181 	if (data >= ((uint32_t)1 << 16)) {
1182 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1183 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1184 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1185 
1186 		return (B_TRUE);
1187 	}
1188 
1189 	EFSYS_ASSERT(eecp->eec_software != NULL);
1190 	should_abort = eecp->eec_software(arg, (uint16_t)data);
1191 
1192 	return (should_abort);
1193 }
1194 
1195 static	__checkReturn	boolean_t
1196 ef10_ev_mcdi(
1197 	__in		efx_evq_t *eep,
1198 	__in		efx_qword_t *eqp,
1199 	__in		const efx_ev_callbacks_t *eecp,
1200 	__in_opt	void *arg)
1201 {
1202 	efx_nic_t *enp = eep->ee_enp;
1203 	unsigned int code;
1204 	boolean_t should_abort = B_FALSE;
1205 
1206 	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1207 
1208 	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1209 	switch (code) {
1210 	case MCDI_EVENT_CODE_BADSSERT:
1211 		efx_mcdi_ev_death(enp, EINTR);
1212 		break;
1213 
1214 	case MCDI_EVENT_CODE_CMDDONE:
1215 		efx_mcdi_ev_cpl(enp,
1216 		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1217 		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1218 		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1219 		break;
1220 
1221 #if EFSYS_OPT_MCDI_PROXY_AUTH
1222 	case MCDI_EVENT_CODE_PROXY_RESPONSE:
1223 		/*
1224 		 * This event notifies a function that an authorization request
1225 		 * has been processed. If the request was authorized then the
1226 		 * function can now re-send the original MCDI request.
1227 		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1228 		 */
1229 		efx_mcdi_ev_proxy_response(enp,
1230 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1231 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1232 		break;
1233 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1234 
1235 	case MCDI_EVENT_CODE_LINKCHANGE: {
1236 		efx_link_mode_t link_mode;
1237 
1238 		ef10_phy_link_ev(enp, eqp, &link_mode);
1239 		should_abort = eecp->eec_link_change(arg, link_mode);
1240 		break;
1241 	}
1242 
1243 	case MCDI_EVENT_CODE_SENSOREVT: {
1244 #if EFSYS_OPT_MON_STATS
1245 		efx_mon_stat_t id;
1246 		efx_mon_stat_value_t value;
1247 		efx_rc_t rc;
1248 
1249 		/* Decode monitor stat for MCDI sensor (if supported) */
1250 		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1251 			/* Report monitor stat change */
1252 			should_abort = eecp->eec_monitor(arg, id, value);
1253 		} else if (rc == ENOTSUP) {
1254 			should_abort = eecp->eec_exception(arg,
1255 				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1256 				MCDI_EV_FIELD(eqp, DATA));
1257 		} else {
1258 			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
1259 		}
1260 #endif
1261 		break;
1262 	}
1263 
1264 	case MCDI_EVENT_CODE_SCHEDERR:
1265 		/* Informational only */
1266 		break;
1267 
1268 	case MCDI_EVENT_CODE_REBOOT:
1269 		/* Falcon/Siena only (should not been seen with Huntington). */
1270 		efx_mcdi_ev_death(enp, EIO);
1271 		break;
1272 
1273 	case MCDI_EVENT_CODE_MC_REBOOT:
1274 		/* MC_REBOOT event is used for Huntington (EF10) and later. */
1275 		efx_mcdi_ev_death(enp, EIO);
1276 		break;
1277 
1278 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
1279 #if EFSYS_OPT_MAC_STATS
1280 		if (eecp->eec_mac_stats != NULL) {
1281 			eecp->eec_mac_stats(arg,
1282 			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1283 		}
1284 #endif
1285 		break;
1286 
1287 	case MCDI_EVENT_CODE_FWALERT: {
1288 		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1289 
1290 		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1291 			should_abort = eecp->eec_exception(arg,
1292 				EFX_EXCEPTION_FWALERT_SRAM,
1293 				MCDI_EV_FIELD(eqp, FWALERT_DATA));
1294 		else
1295 			should_abort = eecp->eec_exception(arg,
1296 				EFX_EXCEPTION_UNKNOWN_FWALERT,
1297 				MCDI_EV_FIELD(eqp, DATA));
1298 		break;
1299 	}
1300 
1301 	case MCDI_EVENT_CODE_TX_ERR: {
1302 		/*
1303 		 * After a TXQ error is detected, firmware sends a TX_ERR event.
1304 		 * This may be followed by TX completions (which we discard),
1305 		 * and then finally by a TX_FLUSH event. Firmware destroys the
1306 		 * TXQ automatically after sending the TX_FLUSH event.
1307 		 */
1308 		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1309 
1310 		EFSYS_PROBE2(tx_descq_err,
1311 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1312 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1313 
1314 		/* Inform the driver that a reset is required. */
1315 		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1316 		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1317 		break;
1318 	}
1319 
1320 	case MCDI_EVENT_CODE_TX_FLUSH: {
1321 		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1322 
1323 		/*
1324 		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1325 		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1326 		 * We want to wait for all completions, so ignore the events
1327 		 * with TX_FLUSH_TO_DRIVER.
1328 		 */
1329 		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1330 			should_abort = B_FALSE;
1331 			break;
1332 		}
1333 
1334 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1335 
1336 		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1337 
1338 		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1339 		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1340 		break;
1341 	}
1342 
1343 	case MCDI_EVENT_CODE_RX_ERR: {
1344 		/*
1345 		 * After an RXQ error is detected, firmware sends an RX_ERR
1346 		 * event. This may be followed by RX events (which we discard),
1347 		 * and then finally by an RX_FLUSH event. Firmware destroys the
1348 		 * RXQ automatically after sending the RX_FLUSH event.
1349 		 */
1350 		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1351 
1352 		EFSYS_PROBE2(rx_descq_err,
1353 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1354 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1355 
1356 		/* Inform the driver that a reset is required. */
1357 		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1358 		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1359 		break;
1360 	}
1361 
1362 	case MCDI_EVENT_CODE_RX_FLUSH: {
1363 		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1364 
1365 		/*
1366 		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1367 		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1368 		 * We want to wait for all completions, so ignore the events
1369 		 * with RX_FLUSH_TO_DRIVER.
1370 		 */
1371 		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1372 			should_abort = B_FALSE;
1373 			break;
1374 		}
1375 
1376 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1377 
1378 		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1379 
1380 		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1381 		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1382 		break;
1383 	}
1384 
1385 	default:
1386 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1387 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1388 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1389 		break;
1390 	}
1391 
1392 	return (should_abort);
1393 }
1394 
1395 		void
1396 ef10_ev_rxlabel_init(
1397 	__in		efx_evq_t *eep,
1398 	__in		efx_rxq_t *erp,
1399 	__in		unsigned int label,
1400 	__in		efx_rxq_type_t type)
1401 {
1402 	efx_evq_rxq_state_t *eersp;
1403 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1404 	boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
1405 	boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
1406 #endif
1407 
1408 	_NOTE(ARGUNUSED(type))
1409 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1410 	eersp = &eep->ee_rxq_state[label];
1411 
1412 	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1413 
1414 #if EFSYS_OPT_RX_PACKED_STREAM
1415 	/*
1416 	 * For packed stream modes, the very first event will
1417 	 * have a new buffer flag set, so it will be incremented,
1418 	 * yielding the correct pointer. That results in a simpler
1419 	 * code than trying to detect start-of-the-world condition
1420 	 * in the event handler.
1421 	 */
1422 	eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
1423 #else
1424 	eersp->eers_rx_read_ptr = 0;
1425 #endif
1426 	eersp->eers_rx_mask = erp->er_mask;
1427 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1428 	eersp->eers_rx_stream_npackets = 0;
1429 	eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
1430 #endif
1431 #if EFSYS_OPT_RX_PACKED_STREAM
1432 	if (packed_stream) {
1433 		eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
1434 		    EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
1435 		    EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
1436 		EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
1437 		/*
1438 		 * A single credit is allocated to the queue when it is started.
1439 		 * It is immediately spent by the first packet which has NEW
1440 		 * BUFFER flag set, though, but still we shall take into
1441 		 * account, as to not wrap around the maximum number of credits
1442 		 * accidentally
1443 		 */
1444 		eersp->eers_rx_packed_stream_credits--;
1445 		EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
1446 		    EFX_RX_PACKED_STREAM_MAX_CREDITS);
1447 	}
1448 #endif
1449 }
1450 
1451 		void
1452 ef10_ev_rxlabel_fini(
1453 	__in		efx_evq_t *eep,
1454 	__in		unsigned int label)
1455 {
1456 	efx_evq_rxq_state_t *eersp;
1457 
1458 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1459 	eersp = &eep->ee_rxq_state[label];
1460 
1461 	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1462 
1463 	eersp->eers_rx_read_ptr = 0;
1464 	eersp->eers_rx_mask = 0;
1465 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1466 	eersp->eers_rx_stream_npackets = 0;
1467 	eersp->eers_rx_packed_stream = B_FALSE;
1468 #endif
1469 #if EFSYS_OPT_RX_PACKED_STREAM
1470 	eersp->eers_rx_packed_stream_credits = 0;
1471 #endif
1472 }
1473 
1474 #endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
1475