xref: /freebsd/sys/dev/sfxge/common/ef10_ev.c (revision b00ab754)
1 /*-
2  * Copyright (c) 2012-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "efx.h"
35 #include "efx_impl.h"
36 #if EFSYS_OPT_MON_STATS
37 #include "mcdi_mon.h"
38 #endif
39 
40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
41 
42 #if EFSYS_OPT_QSTATS
43 #define	EFX_EV_QSTAT_INCR(_eep, _stat)					\
44 	do {								\
45 		(_eep)->ee_stat[_stat]++;				\
46 	_NOTE(CONSTANTCONDITION)					\
47 	} while (B_FALSE)
48 #else
49 #define	EFX_EV_QSTAT_INCR(_eep, _stat)
50 #endif
51 
52 /*
53  * Non-interrupting event queue requires interrrupting event queue to
54  * refer to for wake-up events even if wake ups are never used.
55  * It could be even non-allocated event queue.
56  */
57 #define	EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
58 
59 static	__checkReturn	boolean_t
60 ef10_ev_rx(
61 	__in		efx_evq_t *eep,
62 	__in		efx_qword_t *eqp,
63 	__in		const efx_ev_callbacks_t *eecp,
64 	__in_opt	void *arg);
65 
66 static	__checkReturn	boolean_t
67 ef10_ev_tx(
68 	__in		efx_evq_t *eep,
69 	__in		efx_qword_t *eqp,
70 	__in		const efx_ev_callbacks_t *eecp,
71 	__in_opt	void *arg);
72 
73 static	__checkReturn	boolean_t
74 ef10_ev_driver(
75 	__in		efx_evq_t *eep,
76 	__in		efx_qword_t *eqp,
77 	__in		const efx_ev_callbacks_t *eecp,
78 	__in_opt	void *arg);
79 
80 static	__checkReturn	boolean_t
81 ef10_ev_drv_gen(
82 	__in		efx_evq_t *eep,
83 	__in		efx_qword_t *eqp,
84 	__in		const efx_ev_callbacks_t *eecp,
85 	__in_opt	void *arg);
86 
87 static	__checkReturn	boolean_t
88 ef10_ev_mcdi(
89 	__in		efx_evq_t *eep,
90 	__in		efx_qword_t *eqp,
91 	__in		const efx_ev_callbacks_t *eecp,
92 	__in_opt	void *arg);
93 
94 
95 static	__checkReturn	efx_rc_t
96 efx_mcdi_set_evq_tmr(
97 	__in		efx_nic_t *enp,
98 	__in		uint32_t instance,
99 	__in		uint32_t mode,
100 	__in		uint32_t timer_ns)
101 {
102 	efx_mcdi_req_t req;
103 	uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
104 			    MC_CMD_SET_EVQ_TMR_OUT_LEN)];
105 	efx_rc_t rc;
106 
107 	(void) memset(payload, 0, sizeof (payload));
108 	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
109 	req.emr_in_buf = payload;
110 	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
111 	req.emr_out_buf = payload;
112 	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
113 
114 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
115 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
116 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
117 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
118 
119 	efx_mcdi_execute(enp, &req);
120 
121 	if (req.emr_rc != 0) {
122 		rc = req.emr_rc;
123 		goto fail1;
124 	}
125 
126 	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
127 		rc = EMSGSIZE;
128 		goto fail2;
129 	}
130 
131 	return (0);
132 
133 fail2:
134 	EFSYS_PROBE(fail2);
135 fail1:
136 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
137 
138 	return (rc);
139 }
140 
141 static	__checkReturn	efx_rc_t
142 efx_mcdi_init_evq(
143 	__in		efx_nic_t *enp,
144 	__in		unsigned int instance,
145 	__in		efsys_mem_t *esmp,
146 	__in		size_t nevs,
147 	__in		uint32_t irq,
148 	__in		uint32_t us,
149 	__in		uint32_t flags,
150 	__in		boolean_t low_latency)
151 {
152 	efx_mcdi_req_t req;
153 	uint8_t payload[
154 	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
155 		MC_CMD_INIT_EVQ_OUT_LEN)];
156 	efx_qword_t *dma_addr;
157 	uint64_t addr;
158 	int npages;
159 	int i;
160 	boolean_t interrupting;
161 	int ev_cut_through;
162 	efx_rc_t rc;
163 
164 	npages = EFX_EVQ_NBUFS(nevs);
165 	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
166 		rc = EINVAL;
167 		goto fail1;
168 	}
169 
170 	(void) memset(payload, 0, sizeof (payload));
171 	req.emr_cmd = MC_CMD_INIT_EVQ;
172 	req.emr_in_buf = payload;
173 	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
174 	req.emr_out_buf = payload;
175 	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
176 
177 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
178 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
179 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
180 
181 	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
182 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
183 
184 	/*
185 	 * On Huntington RX and TX event batching can only be requested together
186 	 * (even if the datapath firmware doesn't actually support RX
187 	 * batching). If event cut through is enabled no RX batching will occur.
188 	 *
189 	 * So always enable RX and TX event batching, and enable event cut
190 	 * through if we want low latency operation.
191 	 */
192 	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
193 	case EFX_EVQ_FLAGS_TYPE_AUTO:
194 		ev_cut_through = low_latency ? 1 : 0;
195 		break;
196 	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
197 		ev_cut_through = 0;
198 		break;
199 	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
200 		ev_cut_through = 1;
201 		break;
202 	default:
203 		rc = EINVAL;
204 		goto fail2;
205 	}
206 	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
207 	    INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
208 	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
209 	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
210 	    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
211 	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
212 	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);
213 
214 	/* If the value is zero then disable the timer */
215 	if (us == 0) {
216 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
217 		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
218 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
219 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
220 	} else {
221 		unsigned int ticks;
222 
223 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
224 			goto fail3;
225 
226 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
227 		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
228 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
229 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
230 	}
231 
232 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
233 	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
234 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
235 
236 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
237 	addr = EFSYS_MEM_ADDR(esmp);
238 
239 	for (i = 0; i < npages; i++) {
240 		EFX_POPULATE_QWORD_2(*dma_addr,
241 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
242 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
243 
244 		dma_addr++;
245 		addr += EFX_BUF_SIZE;
246 	}
247 
248 	efx_mcdi_execute(enp, &req);
249 
250 	if (req.emr_rc != 0) {
251 		rc = req.emr_rc;
252 		goto fail4;
253 	}
254 
255 	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
256 		rc = EMSGSIZE;
257 		goto fail5;
258 	}
259 
260 	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
261 
262 	return (0);
263 
264 fail5:
265 	EFSYS_PROBE(fail5);
266 fail4:
267 	EFSYS_PROBE(fail4);
268 fail3:
269 	EFSYS_PROBE(fail3);
270 fail2:
271 	EFSYS_PROBE(fail2);
272 fail1:
273 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
274 
275 	return (rc);
276 }
277 
278 
279 static	__checkReturn	efx_rc_t
280 efx_mcdi_init_evq_v2(
281 	__in		efx_nic_t *enp,
282 	__in		unsigned int instance,
283 	__in		efsys_mem_t *esmp,
284 	__in		size_t nevs,
285 	__in		uint32_t irq,
286 	__in		uint32_t us,
287 	__in		uint32_t flags)
288 {
289 	efx_mcdi_req_t req;
290 	uint8_t payload[
291 		MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
292 		    MC_CMD_INIT_EVQ_V2_OUT_LEN)];
293 	boolean_t interrupting;
294 	unsigned int evq_type;
295 	efx_qword_t *dma_addr;
296 	uint64_t addr;
297 	int npages;
298 	int i;
299 	efx_rc_t rc;
300 
301 	npages = EFX_EVQ_NBUFS(nevs);
302 	if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
303 		rc = EINVAL;
304 		goto fail1;
305 	}
306 
307 	(void) memset(payload, 0, sizeof (payload));
308 	req.emr_cmd = MC_CMD_INIT_EVQ;
309 	req.emr_in_buf = payload;
310 	req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
311 	req.emr_out_buf = payload;
312 	req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
313 
314 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
315 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
316 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
317 
318 	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
319 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
320 
321 	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
322 	case EFX_EVQ_FLAGS_TYPE_AUTO:
323 		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
324 		break;
325 	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
326 		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
327 		break;
328 	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
329 		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
330 		break;
331 	default:
332 		rc = EINVAL;
333 		goto fail2;
334 	}
335 	MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
336 	    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
337 	    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
338 	    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
339 	    INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
340 
341 	/* If the value is zero then disable the timer */
342 	if (us == 0) {
343 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
344 		    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
345 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
346 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
347 	} else {
348 		unsigned int ticks;
349 
350 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
351 			goto fail3;
352 
353 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
354 		    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
355 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
356 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
357 	}
358 
359 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
360 	    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
361 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
362 
363 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
364 	addr = EFSYS_MEM_ADDR(esmp);
365 
366 	for (i = 0; i < npages; i++) {
367 		EFX_POPULATE_QWORD_2(*dma_addr,
368 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
369 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
370 
371 		dma_addr++;
372 		addr += EFX_BUF_SIZE;
373 	}
374 
375 	efx_mcdi_execute(enp, &req);
376 
377 	if (req.emr_rc != 0) {
378 		rc = req.emr_rc;
379 		goto fail4;
380 	}
381 
382 	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
383 		rc = EMSGSIZE;
384 		goto fail5;
385 	}
386 
387 	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
388 
389 	EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
390 		    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
391 
392 	return (0);
393 
394 fail5:
395 	EFSYS_PROBE(fail5);
396 fail4:
397 	EFSYS_PROBE(fail4);
398 fail3:
399 	EFSYS_PROBE(fail3);
400 fail2:
401 	EFSYS_PROBE(fail2);
402 fail1:
403 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
404 
405 	return (rc);
406 }
407 
408 static	__checkReturn	efx_rc_t
409 efx_mcdi_fini_evq(
410 	__in		efx_nic_t *enp,
411 	__in		uint32_t instance)
412 {
413 	efx_mcdi_req_t req;
414 	uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
415 			    MC_CMD_FINI_EVQ_OUT_LEN)];
416 	efx_rc_t rc;
417 
418 	(void) memset(payload, 0, sizeof (payload));
419 	req.emr_cmd = MC_CMD_FINI_EVQ;
420 	req.emr_in_buf = payload;
421 	req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
422 	req.emr_out_buf = payload;
423 	req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
424 
425 	MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
426 
427 	efx_mcdi_execute_quiet(enp, &req);
428 
429 	if (req.emr_rc != 0) {
430 		rc = req.emr_rc;
431 		goto fail1;
432 	}
433 
434 	return (0);
435 
436 fail1:
437 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
438 
439 	return (rc);
440 }
441 
442 
443 
444 	__checkReturn	efx_rc_t
445 ef10_ev_init(
446 	__in		efx_nic_t *enp)
447 {
448 	_NOTE(ARGUNUSED(enp))
449 	return (0);
450 }
451 
452 			void
453 ef10_ev_fini(
454 	__in		efx_nic_t *enp)
455 {
456 	_NOTE(ARGUNUSED(enp))
457 }
458 
459 	__checkReturn	efx_rc_t
460 ef10_ev_qcreate(
461 	__in		efx_nic_t *enp,
462 	__in		unsigned int index,
463 	__in		efsys_mem_t *esmp,
464 	__in		size_t n,
465 	__in		uint32_t id,
466 	__in		uint32_t us,
467 	__in		uint32_t flags,
468 	__in		efx_evq_t *eep)
469 {
470 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
471 	uint32_t irq;
472 	efx_rc_t rc;
473 
474 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
475 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
476 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
477 
478 	if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
479 		rc = EINVAL;
480 		goto fail1;
481 	}
482 
483 	if (index >= encp->enc_evq_limit) {
484 		rc = EINVAL;
485 		goto fail2;
486 	}
487 
488 	if (us > encp->enc_evq_timer_max_us) {
489 		rc = EINVAL;
490 		goto fail3;
491 	}
492 
493 	/* Set up the handler table */
494 	eep->ee_rx	= ef10_ev_rx;
495 	eep->ee_tx	= ef10_ev_tx;
496 	eep->ee_driver	= ef10_ev_driver;
497 	eep->ee_drv_gen	= ef10_ev_drv_gen;
498 	eep->ee_mcdi	= ef10_ev_mcdi;
499 
500 	/* Set up the event queue */
501 	/* INIT_EVQ expects function-relative vector number */
502 	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
503 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
504 		irq = index;
505 	} else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
506 		irq = index;
507 		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
508 		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
509 	} else {
510 		irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
511 	}
512 
513 	/*
514 	 * Interrupts may be raised for events immediately after the queue is
515 	 * created. See bug58606.
516 	 */
517 
518 	if (encp->enc_init_evq_v2_supported) {
519 		/*
520 		 * On Medford the low latency license is required to enable RX
521 		 * and event cut through and to disable RX batching.  If event
522 		 * queue type in flags is auto, we let the firmware decide the
523 		 * settings to use. If the adapter has a low latency license,
524 		 * it will choose the best settings for low latency, otherwise
525 		 * it will choose the best settings for throughput.
526 		 */
527 		rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
528 		if (rc != 0)
529 			goto fail4;
530 	} else {
531 		/*
532 		 * On Huntington we need to specify the settings to use.
533 		 * If event queue type in flags is auto, we favour throughput
534 		 * if the adapter is running virtualization supporting firmware
535 		 * (i.e. the full featured firmware variant)
536 		 * and latency otherwise. The Ethernet Virtual Bridging
537 		 * capability is used to make this decision. (Note though that
538 		 * the low latency firmware variant is also best for
539 		 * throughput and corresponding type should be specified
540 		 * to choose it.)
541 		 */
542 		boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
543 		rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
544 		    low_latency);
545 		if (rc != 0)
546 			goto fail5;
547 	}
548 
549 	return (0);
550 
551 fail5:
552 	EFSYS_PROBE(fail5);
553 fail4:
554 	EFSYS_PROBE(fail4);
555 fail3:
556 	EFSYS_PROBE(fail3);
557 fail2:
558 	EFSYS_PROBE(fail2);
559 fail1:
560 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
561 
562 	return (rc);
563 }
564 
565 			void
566 ef10_ev_qdestroy(
567 	__in		efx_evq_t *eep)
568 {
569 	efx_nic_t *enp = eep->ee_enp;
570 
571 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
572 	    enp->en_family == EFX_FAMILY_MEDFORD);
573 
574 	(void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
575 }
576 
577 	__checkReturn	efx_rc_t
578 ef10_ev_qprime(
579 	__in		efx_evq_t *eep,
580 	__in		unsigned int count)
581 {
582 	efx_nic_t *enp = eep->ee_enp;
583 	uint32_t rptr;
584 	efx_dword_t dword;
585 
586 	rptr = count & eep->ee_mask;
587 
588 	if (enp->en_nic_cfg.enc_bug35388_workaround) {
589 		EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
590 		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
591 		EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
592 		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
593 
594 		EFX_POPULATE_DWORD_2(dword,
595 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
596 		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
597 		    ERF_DD_EVQ_IND_RPTR,
598 		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
599 		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
600 		    &dword, B_FALSE);
601 
602 		EFX_POPULATE_DWORD_2(dword,
603 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
604 		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
605 		    ERF_DD_EVQ_IND_RPTR,
606 		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
607 		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
608 		    &dword, B_FALSE);
609 	} else {
610 		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
611 		EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
612 		    &dword, B_FALSE);
613 	}
614 
615 	return (0);
616 }
617 
618 static	__checkReturn	efx_rc_t
619 efx_mcdi_driver_event(
620 	__in		efx_nic_t *enp,
621 	__in		uint32_t evq,
622 	__in		efx_qword_t data)
623 {
624 	efx_mcdi_req_t req;
625 	uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
626 			    MC_CMD_DRIVER_EVENT_OUT_LEN)];
627 	efx_rc_t rc;
628 
629 	req.emr_cmd = MC_CMD_DRIVER_EVENT;
630 	req.emr_in_buf = payload;
631 	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
632 	req.emr_out_buf = payload;
633 	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
634 
635 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
636 
637 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
638 	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
639 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
640 	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
641 
642 	efx_mcdi_execute(enp, &req);
643 
644 	if (req.emr_rc != 0) {
645 		rc = req.emr_rc;
646 		goto fail1;
647 	}
648 
649 	return (0);
650 
651 fail1:
652 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
653 
654 	return (rc);
655 }
656 
657 			void
658 ef10_ev_qpost(
659 	__in	efx_evq_t *eep,
660 	__in	uint16_t data)
661 {
662 	efx_nic_t *enp = eep->ee_enp;
663 	efx_qword_t event;
664 
665 	EFX_POPULATE_QWORD_3(event,
666 	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
667 	    ESF_DZ_DRV_SUB_CODE, 0,
668 	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
669 
670 	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
671 }
672 
673 	__checkReturn	efx_rc_t
674 ef10_ev_qmoderate(
675 	__in		efx_evq_t *eep,
676 	__in		unsigned int us)
677 {
678 	efx_nic_t *enp = eep->ee_enp;
679 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
680 	efx_dword_t dword;
681 	uint32_t mode;
682 	efx_rc_t rc;
683 
684 	/* Check that hardware and MCDI use the same timer MODE values */
685 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
686 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
687 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
688 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
689 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
690 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
691 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
692 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
693 
694 	if (us > encp->enc_evq_timer_max_us) {
695 		rc = EINVAL;
696 		goto fail1;
697 	}
698 
699 	/* If the value is zero then disable the timer */
700 	if (us == 0) {
701 		mode = FFE_CZ_TIMER_MODE_DIS;
702 	} else {
703 		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
704 	}
705 
706 	if (encp->enc_bug61265_workaround) {
707 		uint32_t ns = us * 1000;
708 
709 		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
710 		if (rc != 0)
711 			goto fail2;
712 	} else {
713 		unsigned int ticks;
714 
715 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
716 			goto fail3;
717 
718 		if (encp->enc_bug35388_workaround) {
719 			EFX_POPULATE_DWORD_3(dword,
720 			    ERF_DD_EVQ_IND_TIMER_FLAGS,
721 			    EFE_DD_EVQ_IND_TIMER_FLAGS,
722 			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
723 			    ERF_DD_EVQ_IND_TIMER_VAL, ticks);
724 			EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
725 			    eep->ee_index, &dword, 0);
726 		} else {
727 			EFX_POPULATE_DWORD_2(dword,
728 			    ERF_DZ_TC_TIMER_MODE, mode,
729 			    ERF_DZ_TC_TIMER_VAL, ticks);
730 			EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
731 			    eep->ee_index, &dword, 0);
732 		}
733 	}
734 
735 	return (0);
736 
737 fail3:
738 	EFSYS_PROBE(fail3);
739 fail2:
740 	EFSYS_PROBE(fail2);
741 fail1:
742 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
743 
744 	return (rc);
745 }
746 
747 
748 #if EFSYS_OPT_QSTATS
749 			void
750 ef10_ev_qstats_update(
751 	__in				efx_evq_t *eep,
752 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
753 {
754 	unsigned int id;
755 
756 	for (id = 0; id < EV_NQSTATS; id++) {
757 		efsys_stat_t *essp = &stat[id];
758 
759 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
760 		eep->ee_stat[id] = 0;
761 	}
762 }
763 #endif /* EFSYS_OPT_QSTATS */
764 
765 
766 static	__checkReturn	boolean_t
767 ef10_ev_rx(
768 	__in		efx_evq_t *eep,
769 	__in		efx_qword_t *eqp,
770 	__in		const efx_ev_callbacks_t *eecp,
771 	__in_opt	void *arg)
772 {
773 	efx_nic_t *enp = eep->ee_enp;
774 	uint32_t size;
775 	uint32_t label;
776 	uint32_t mac_class;
777 	uint32_t eth_tag_class;
778 	uint32_t l3_class;
779 	uint32_t l4_class;
780 	uint32_t next_read_lbits;
781 	uint16_t flags;
782 	boolean_t cont;
783 	boolean_t should_abort;
784 	efx_evq_rxq_state_t *eersp;
785 	unsigned int desc_count;
786 	unsigned int last_used_id;
787 
788 	EFX_EV_QSTAT_INCR(eep, EV_RX);
789 
790 	/* Discard events after RXQ/TXQ errors */
791 	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
792 		return (B_FALSE);
793 
794 	/* Basic packet information */
795 	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
796 	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
797 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
798 	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
799 	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
800 	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
801 	l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
802 	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
803 
804 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
805 		/* Drop this event */
806 		return (B_FALSE);
807 	}
808 	flags = 0;
809 
810 	if (cont != 0) {
811 		/*
812 		 * This may be part of a scattered frame, or it may be a
813 		 * truncated frame if scatter is disabled on this RXQ.
814 		 * Overlength frames can be received if e.g. a VF is configured
815 		 * for 1500 MTU but connected to a port set to 9000 MTU
816 		 * (see bug56567).
817 		 * FIXME: There is not yet any driver that supports scatter on
818 		 * Huntington.  Scatter support is required for OSX.
819 		 */
820 		flags |= EFX_PKT_CONT;
821 	}
822 
823 	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
824 		flags |= EFX_PKT_UNICAST;
825 
826 	/* Increment the count of descriptors read */
827 	eersp = &eep->ee_rxq_state[label];
828 	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
829 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
830 	eersp->eers_rx_read_ptr += desc_count;
831 
832 	/*
833 	 * FIXME: add error checking to make sure this a batched event.
834 	 * This could also be an aborted scatter, see Bug36629.
835 	 */
836 	if (desc_count > 1) {
837 		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
838 		flags |= EFX_PKT_PREFIX_LEN;
839 	}
840 
841 	/* Calculate the index of the last descriptor consumed */
842 	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
843 
844 	/* Check for errors that invalidate checksum and L3/L4 fields */
845 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
846 		/* RX frame truncated (error flag is misnamed) */
847 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
848 		flags |= EFX_DISCARD;
849 		goto deliver;
850 	}
851 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
852 		/* Bad Ethernet frame CRC */
853 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
854 		flags |= EFX_DISCARD;
855 		goto deliver;
856 	}
857 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
858 		/*
859 		 * Hardware parse failed, due to malformed headers
860 		 * or headers that are too long for the parser.
861 		 * Headers and checksums must be validated by the host.
862 		 */
863 		/* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
864 		goto deliver;
865 	}
866 
867 	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
868 	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
869 		flags |= EFX_PKT_VLAN_TAGGED;
870 	}
871 
872 	switch (l3_class) {
873 	case ESE_DZ_L3_CLASS_IP4:
874 	case ESE_DZ_L3_CLASS_IP4_FRAG:
875 		flags |= EFX_PKT_IPV4;
876 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
877 			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
878 		} else {
879 			flags |= EFX_CKSUM_IPV4;
880 		}
881 
882 		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
883 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
884 			flags |= EFX_PKT_TCP;
885 		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
886 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
887 			flags |= EFX_PKT_UDP;
888 		} else {
889 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
890 		}
891 		break;
892 
893 	case ESE_DZ_L3_CLASS_IP6:
894 	case ESE_DZ_L3_CLASS_IP6_FRAG:
895 		flags |= EFX_PKT_IPV6;
896 
897 		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
898 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
899 			flags |= EFX_PKT_TCP;
900 		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
901 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
902 			flags |= EFX_PKT_UDP;
903 		} else {
904 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
905 		}
906 		break;
907 
908 	default:
909 		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
910 		break;
911 	}
912 
913 	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
914 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
915 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
916 		} else {
917 			flags |= EFX_CKSUM_TCPUDP;
918 		}
919 	}
920 
921 deliver:
922 	/* If we're not discarding the packet then it is ok */
923 	if (~flags & EFX_DISCARD)
924 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
925 
926 	EFSYS_ASSERT(eecp->eec_rx != NULL);
927 	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
928 
929 	return (should_abort);
930 }
931 
932 static	__checkReturn	boolean_t
933 ef10_ev_tx(
934 	__in		efx_evq_t *eep,
935 	__in		efx_qword_t *eqp,
936 	__in		const efx_ev_callbacks_t *eecp,
937 	__in_opt	void *arg)
938 {
939 	efx_nic_t *enp = eep->ee_enp;
940 	uint32_t id;
941 	uint32_t label;
942 	boolean_t should_abort;
943 
944 	EFX_EV_QSTAT_INCR(eep, EV_TX);
945 
946 	/* Discard events after RXQ/TXQ errors */
947 	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
948 		return (B_FALSE);
949 
950 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
951 		/* Drop this event */
952 		return (B_FALSE);
953 	}
954 
955 	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
956 	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
957 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
958 
959 	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
960 
961 	EFSYS_ASSERT(eecp->eec_tx != NULL);
962 	should_abort = eecp->eec_tx(arg, label, id);
963 
964 	return (should_abort);
965 }
966 
967 static	__checkReturn	boolean_t
968 ef10_ev_driver(
969 	__in		efx_evq_t *eep,
970 	__in		efx_qword_t *eqp,
971 	__in		const efx_ev_callbacks_t *eecp,
972 	__in_opt	void *arg)
973 {
974 	unsigned int code;
975 	boolean_t should_abort;
976 
977 	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
978 	should_abort = B_FALSE;
979 
980 	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
981 	switch (code) {
982 	case ESE_DZ_DRV_TIMER_EV: {
983 		uint32_t id;
984 
985 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
986 
987 		EFSYS_ASSERT(eecp->eec_timer != NULL);
988 		should_abort = eecp->eec_timer(arg, id);
989 		break;
990 	}
991 
992 	case ESE_DZ_DRV_WAKE_UP_EV: {
993 		uint32_t id;
994 
995 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
996 
997 		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
998 		should_abort = eecp->eec_wake_up(arg, id);
999 		break;
1000 	}
1001 
1002 	case ESE_DZ_DRV_START_UP_EV:
1003 		EFSYS_ASSERT(eecp->eec_initialized != NULL);
1004 		should_abort = eecp->eec_initialized(arg);
1005 		break;
1006 
1007 	default:
1008 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1009 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1010 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1011 		break;
1012 	}
1013 
1014 	return (should_abort);
1015 }
1016 
1017 static	__checkReturn	boolean_t
1018 ef10_ev_drv_gen(
1019 	__in		efx_evq_t *eep,
1020 	__in		efx_qword_t *eqp,
1021 	__in		const efx_ev_callbacks_t *eecp,
1022 	__in_opt	void *arg)
1023 {
1024 	uint32_t data;
1025 	boolean_t should_abort;
1026 
1027 	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1028 	should_abort = B_FALSE;
1029 
1030 	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1031 	if (data >= ((uint32_t)1 << 16)) {
1032 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1033 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1034 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1035 
1036 		return (B_TRUE);
1037 	}
1038 
1039 	EFSYS_ASSERT(eecp->eec_software != NULL);
1040 	should_abort = eecp->eec_software(arg, (uint16_t)data);
1041 
1042 	return (should_abort);
1043 }
1044 
1045 static	__checkReturn	boolean_t
1046 ef10_ev_mcdi(
1047 	__in		efx_evq_t *eep,
1048 	__in		efx_qword_t *eqp,
1049 	__in		const efx_ev_callbacks_t *eecp,
1050 	__in_opt	void *arg)
1051 {
1052 	efx_nic_t *enp = eep->ee_enp;
1053 	unsigned int code;
1054 	boolean_t should_abort = B_FALSE;
1055 
1056 	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1057 
1058 	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1059 	switch (code) {
1060 	case MCDI_EVENT_CODE_BADSSERT:
1061 		efx_mcdi_ev_death(enp, EINTR);
1062 		break;
1063 
1064 	case MCDI_EVENT_CODE_CMDDONE:
1065 		efx_mcdi_ev_cpl(enp,
1066 		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1067 		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1068 		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1069 		break;
1070 
1071 #if EFSYS_OPT_MCDI_PROXY_AUTH
1072 	case MCDI_EVENT_CODE_PROXY_RESPONSE:
1073 		/*
1074 		 * This event notifies a function that an authorization request
1075 		 * has been processed. If the request was authorized then the
1076 		 * function can now re-send the original MCDI request.
1077 		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1078 		 */
1079 		efx_mcdi_ev_proxy_response(enp,
1080 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1081 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1082 		break;
1083 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1084 
1085 	case MCDI_EVENT_CODE_LINKCHANGE: {
1086 		efx_link_mode_t link_mode;
1087 
1088 		ef10_phy_link_ev(enp, eqp, &link_mode);
1089 		should_abort = eecp->eec_link_change(arg, link_mode);
1090 		break;
1091 	}
1092 
1093 	case MCDI_EVENT_CODE_SENSOREVT: {
1094 #if EFSYS_OPT_MON_STATS
1095 		efx_mon_stat_t id;
1096 		efx_mon_stat_value_t value;
1097 		efx_rc_t rc;
1098 
1099 		/* Decode monitor stat for MCDI sensor (if supported) */
1100 		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1101 			/* Report monitor stat change */
1102 			should_abort = eecp->eec_monitor(arg, id, value);
1103 		} else if (rc == ENOTSUP) {
1104 			should_abort = eecp->eec_exception(arg,
1105 				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1106 				MCDI_EV_FIELD(eqp, DATA));
1107 		} else {
1108 			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
1109 		}
1110 #endif
1111 		break;
1112 	}
1113 
1114 	case MCDI_EVENT_CODE_SCHEDERR:
1115 		/* Informational only */
1116 		break;
1117 
1118 	case MCDI_EVENT_CODE_REBOOT:
1119 		/* Falcon/Siena only (should not been seen with Huntington). */
1120 		efx_mcdi_ev_death(enp, EIO);
1121 		break;
1122 
1123 	case MCDI_EVENT_CODE_MC_REBOOT:
1124 		/* MC_REBOOT event is used for Huntington (EF10) and later. */
1125 		efx_mcdi_ev_death(enp, EIO);
1126 		break;
1127 
1128 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
1129 #if EFSYS_OPT_MAC_STATS
1130 		if (eecp->eec_mac_stats != NULL) {
1131 			eecp->eec_mac_stats(arg,
1132 			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1133 		}
1134 #endif
1135 		break;
1136 
1137 	case MCDI_EVENT_CODE_FWALERT: {
1138 		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1139 
1140 		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1141 			should_abort = eecp->eec_exception(arg,
1142 				EFX_EXCEPTION_FWALERT_SRAM,
1143 				MCDI_EV_FIELD(eqp, FWALERT_DATA));
1144 		else
1145 			should_abort = eecp->eec_exception(arg,
1146 				EFX_EXCEPTION_UNKNOWN_FWALERT,
1147 				MCDI_EV_FIELD(eqp, DATA));
1148 		break;
1149 	}
1150 
1151 	case MCDI_EVENT_CODE_TX_ERR: {
1152 		/*
1153 		 * After a TXQ error is detected, firmware sends a TX_ERR event.
1154 		 * This may be followed by TX completions (which we discard),
1155 		 * and then finally by a TX_FLUSH event. Firmware destroys the
1156 		 * TXQ automatically after sending the TX_FLUSH event.
1157 		 */
1158 		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1159 
1160 		EFSYS_PROBE2(tx_descq_err,
1161 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1162 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1163 
1164 		/* Inform the driver that a reset is required. */
1165 		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1166 		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1167 		break;
1168 	}
1169 
1170 	case MCDI_EVENT_CODE_TX_FLUSH: {
1171 		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1172 
1173 		/*
1174 		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1175 		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1176 		 * We want to wait for all completions, so ignore the events
1177 		 * with TX_FLUSH_TO_DRIVER.
1178 		 */
1179 		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1180 			should_abort = B_FALSE;
1181 			break;
1182 		}
1183 
1184 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1185 
1186 		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1187 
1188 		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1189 		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1190 		break;
1191 	}
1192 
1193 	case MCDI_EVENT_CODE_RX_ERR: {
1194 		/*
1195 		 * After an RXQ error is detected, firmware sends an RX_ERR
1196 		 * event. This may be followed by RX events (which we discard),
1197 		 * and then finally by an RX_FLUSH event. Firmware destroys the
1198 		 * RXQ automatically after sending the RX_FLUSH event.
1199 		 */
1200 		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1201 
1202 		EFSYS_PROBE2(rx_descq_err,
1203 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1204 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1205 
1206 		/* Inform the driver that a reset is required. */
1207 		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1208 		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1209 		break;
1210 	}
1211 
1212 	case MCDI_EVENT_CODE_RX_FLUSH: {
1213 		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1214 
1215 		/*
1216 		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1217 		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1218 		 * We want to wait for all completions, so ignore the events
1219 		 * with RX_FLUSH_TO_DRIVER.
1220 		 */
1221 		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1222 			should_abort = B_FALSE;
1223 			break;
1224 		}
1225 
1226 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1227 
1228 		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1229 
1230 		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1231 		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1232 		break;
1233 	}
1234 
1235 	default:
1236 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1237 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1238 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1239 		break;
1240 	}
1241 
1242 	return (should_abort);
1243 }
1244 
1245 		void
1246 ef10_ev_rxlabel_init(
1247 	__in		efx_evq_t *eep,
1248 	__in		efx_rxq_t *erp,
1249 	__in		unsigned int label)
1250 {
1251 	efx_evq_rxq_state_t *eersp;
1252 
1253 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1254 	eersp = &eep->ee_rxq_state[label];
1255 
1256 	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1257 
1258 	eersp->eers_rx_read_ptr = 0;
1259 	eersp->eers_rx_mask = erp->er_mask;
1260 }
1261 
1262 		void
1263 ef10_ev_rxlabel_fini(
1264 	__in		efx_evq_t *eep,
1265 	__in		unsigned int label)
1266 {
1267 	efx_evq_rxq_state_t *eersp;
1268 
1269 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1270 	eersp = &eep->ee_rxq_state[label];
1271 
1272 	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1273 
1274 	eersp->eers_rx_read_ptr = 0;
1275 	eersp->eers_rx_mask = 0;
1276 }
1277 
1278 #endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
1279