xref: /freebsd/sys/dev/sfxge/common/efsys.h (revision 4f52dfbb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010-2016 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was developed in part by Philip Paeps under contract for
8  * Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing official
33  * policies, either expressed or implied, of the FreeBSD Project.
34  *
35  * $FreeBSD$
36  */
37 
38 #ifndef	_SYS_EFSYS_H
39 #define	_SYS_EFSYS_H
40 
41 #ifdef	__cplusplus
42 extern "C" {
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/bus.h>
47 #include <sys/endian.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/mutex.h>
52 #include <sys/rwlock.h>
53 #include <sys/sdt.h>
54 #include <sys/systm.h>
55 
56 #include <machine/bus.h>
57 #include <machine/endian.h>
58 
59 #define	EFSYS_HAS_UINT64 1
60 #if defined(__x86_64__)
61 #define	EFSYS_USE_UINT64 1
62 #else
63 #define	EFSYS_USE_UINT64 0
64 #endif
65 #define	EFSYS_HAS_SSE2_M128 0
66 #if _BYTE_ORDER == _BIG_ENDIAN
67 #define	EFSYS_IS_BIG_ENDIAN 1
68 #define	EFSYS_IS_LITTLE_ENDIAN 0
69 #elif _BYTE_ORDER == _LITTLE_ENDIAN
70 #define	EFSYS_IS_BIG_ENDIAN 0
71 #define	EFSYS_IS_LITTLE_ENDIAN 1
72 #endif
73 #include "efx_types.h"
74 
75 /* Common code requires this */
76 #if __FreeBSD_version < 800068
77 #define	memmove(d, s, l) bcopy(s, d, l)
78 #endif
79 
80 /* FreeBSD equivalents of Solaris things */
81 #ifndef _NOTE
82 #define	_NOTE(s)
83 #endif
84 
85 #ifndef B_FALSE
86 #define	B_FALSE	FALSE
87 #endif
88 #ifndef B_TRUE
89 #define	B_TRUE	TRUE
90 #endif
91 
92 #ifndef IS_P2ALIGNED
93 #define	IS_P2ALIGNED(v, a)	((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
94 #endif
95 
96 #ifndef P2ROUNDUP
97 #define	P2ROUNDUP(x, align)	(-(-(x) & -(align)))
98 #endif
99 
100 #ifndef P2ALIGN
101 #define	P2ALIGN(_x, _a)		((_x) & -(_a))
102 #endif
103 
104 #ifndef IS2P
105 #define	ISP2(x)			(((x) & ((x) - 1)) == 0)
106 #endif
107 
108 #if defined(__x86_64__) && __FreeBSD_version >= 1000000
109 
110 #define	SFXGE_USE_BUS_SPACE_8		1
111 
112 #if !defined(bus_space_read_stream_8)
113 
114 #define	bus_space_read_stream_8(t, h, o)				\
115 	bus_space_read_8((t), (h), (o))
116 
117 #define	bus_space_write_stream_8(t, h, o, v)				\
118 	bus_space_write_8((t), (h), (o), (v))
119 
120 #endif
121 
122 #endif
123 
124 #define	ENOTACTIVE EINVAL
125 
126 /* Memory type to use on FreeBSD */
127 MALLOC_DECLARE(M_SFXGE);
128 
129 /* Machine dependend prefetch wrappers */
130 #if defined(__i386__) || defined(__amd64__)
131 static __inline void
132 prefetch_read_many(void *addr)
133 {
134 
135 	__asm__(
136 	    "prefetcht0 (%0)"
137 	    :
138 	    : "r" (addr));
139 }
140 
141 static __inline void
142 prefetch_read_once(void *addr)
143 {
144 
145 	__asm__(
146 	    "prefetchnta (%0)"
147 	    :
148 	    : "r" (addr));
149 }
150 #elif defined(__sparc64__)
151 static __inline void
152 prefetch_read_many(void *addr)
153 {
154 
155 	__asm__(
156 	    "prefetch [%0], 0"
157 	    :
158 	    : "r" (addr));
159 }
160 
161 static __inline void
162 prefetch_read_once(void *addr)
163 {
164 
165 	__asm__(
166 	    "prefetch [%0], 1"
167 	    :
168 	    : "r" (addr));
169 }
170 #else
171 static __inline void
172 prefetch_read_many(void *addr)
173 {
174 
175 }
176 
177 static __inline void
178 prefetch_read_once(void *addr)
179 {
180 
181 }
182 #endif
183 
184 #if defined(__i386__) || defined(__amd64__)
185 #include <vm/vm.h>
186 #include <vm/pmap.h>
187 #endif
188 static __inline void
189 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
190 		    struct mbuf *m, bus_dma_segment_t *seg)
191 {
192 #if defined(__i386__) || defined(__amd64__)
193 	seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
194 	seg->ds_len = m->m_len;
195 #else
196 	int nsegstmp;
197 
198 	bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
199 #endif
200 }
201 
202 /* Modifiers used for Windows builds */
203 #define	__in
204 #define	__in_opt
205 #define	__in_ecount(_n)
206 #define	__in_ecount_opt(_n)
207 #define	__in_bcount(_n)
208 #define	__in_bcount_opt(_n)
209 
210 #define	__out
211 #define	__out_opt
212 #define	__out_ecount(_n)
213 #define	__out_ecount_opt(_n)
214 #define	__out_bcount(_n)
215 #define	__out_bcount_opt(_n)
216 #define	__out_bcount_part(_n, _l)
217 #define	__out_bcount_part_opt(_n, _l)
218 
219 #define	__deref_out
220 
221 #define	__inout
222 #define	__inout_opt
223 #define	__inout_ecount(_n)
224 #define	__inout_ecount_opt(_n)
225 #define	__inout_bcount(_n)
226 #define	__inout_bcount_opt(_n)
227 #define	__inout_bcount_full_opt(_n)
228 
229 #define	__deref_out_bcount_opt(n)
230 
231 #define	__checkReturn
232 #define	__success(_x)
233 
234 #define	__drv_when(_p, _c)
235 
236 /* Code inclusion options */
237 
238 
239 #define	EFSYS_OPT_NAMES 1
240 
241 #define	EFSYS_OPT_SIENA 1
242 #define	EFSYS_OPT_HUNTINGTON 1
243 #define	EFSYS_OPT_MEDFORD 1
244 #ifdef DEBUG
245 #define	EFSYS_OPT_CHECK_REG 1
246 #else
247 #define	EFSYS_OPT_CHECK_REG 0
248 #endif
249 
250 #define	EFSYS_OPT_MCDI 1
251 #define	EFSYS_OPT_MCDI_LOGGING 0
252 #define	EFSYS_OPT_MCDI_PROXY_AUTH 0
253 
254 #define	EFSYS_OPT_MAC_STATS 1
255 
256 #define	EFSYS_OPT_LOOPBACK 0
257 
258 #define	EFSYS_OPT_MON_MCDI 0
259 #define	EFSYS_OPT_MON_STATS 0
260 
261 #define	EFSYS_OPT_PHY_STATS 1
262 #define	EFSYS_OPT_BIST 1
263 #define	EFSYS_OPT_PHY_LED_CONTROL 1
264 #define	EFSYS_OPT_PHY_FLAGS 0
265 
266 #define	EFSYS_OPT_VPD 1
267 #define	EFSYS_OPT_NVRAM 1
268 #define	EFSYS_OPT_BOOTCFG 0
269 
270 #define	EFSYS_OPT_DIAG 0
271 #define	EFSYS_OPT_RX_SCALE 1
272 #define	EFSYS_OPT_QSTATS 1
273 #define	EFSYS_OPT_FILTER 1
274 #define	EFSYS_OPT_RX_SCATTER 0
275 
276 #define	EFSYS_OPT_EV_PREFETCH 0
277 
278 #define	EFSYS_OPT_DECODE_INTR_FATAL 1
279 
280 #define	EFSYS_OPT_LICENSING 0
281 
282 #define	EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
283 
284 /* ID */
285 
286 typedef struct __efsys_identifier_s	efsys_identifier_t;
287 
288 /* PROBE */
289 
290 #ifndef DTRACE_PROBE
291 
292 #define	EFSYS_PROBE(_name)
293 
294 #define	EFSYS_PROBE1(_name, _type1, _arg1)
295 
296 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
297 
298 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
299 	    _type3, _arg3)
300 
301 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
302 	    _type3, _arg3, _type4, _arg4)
303 
304 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
305 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
306 
307 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
308 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
309 	    _type6, _arg6)
310 
311 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
312 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
313 	    _type6, _arg6, _type7, _arg7)
314 
315 #else /* DTRACE_PROBE */
316 
317 #define	EFSYS_PROBE(_name)						\
318 	DTRACE_PROBE(_name)
319 
320 #define	EFSYS_PROBE1(_name, _type1, _arg1)				\
321 	DTRACE_PROBE1(_name, _type1, _arg1)
322 
323 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
324 	DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
325 
326 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
327 	    _type3, _arg3)						\
328 	DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
329 	    _type3, _arg3)
330 
331 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
332 	    _type3, _arg3, _type4, _arg4)				\
333 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
334 	    _type3, _arg3, _type4, _arg4)
335 
336 #ifdef DTRACE_PROBE5
337 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
338 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
339 	DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
340 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
341 #else
342 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
343 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
344 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
345 	    _type3, _arg3, _type4, _arg4)
346 #endif
347 
348 #ifdef DTRACE_PROBE6
349 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
350 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
351 	    _type6, _arg6)						\
352 	DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
353 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
354 	    _type6, _arg6)
355 #else
356 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
357 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
358 	    _type6, _arg6)						\
359 	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
360 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
361 #endif
362 
363 #ifdef DTRACE_PROBE7
364 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
365 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
366 	    _type6, _arg6, _type7, _arg7)				\
367 	DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
368 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
369 	    _type6, _arg6, _type7, _arg7)
370 #else
371 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
372 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
373 	    _type6, _arg6, _type7, _arg7)				\
374 	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
375 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
376 	    _type6, _arg6)
377 #endif
378 
379 #endif /* DTRACE_PROBE */
380 
381 /* DMA */
382 
383 typedef uint64_t		efsys_dma_addr_t;
384 
385 typedef struct efsys_mem_s {
386 	bus_dma_tag_t		esm_tag;
387 	bus_dmamap_t		esm_map;
388 	caddr_t			esm_base;
389 	efsys_dma_addr_t	esm_addr;
390 } efsys_mem_t;
391 
392 
393 #define	EFSYS_MEM_ZERO(_esmp, _size)					\
394 	do {								\
395 		(void) memset((_esmp)->esm_base, 0, (_size));		\
396 									\
397 	_NOTE(CONSTANTCONDITION)					\
398 	} while (B_FALSE)
399 
400 #define	EFSYS_MEM_READD(_esmp, _offset, _edp)				\
401 	do {								\
402 		uint32_t *addr;						\
403 									\
404 		_NOTE(CONSTANTCONDITION)				\
405 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
406 		    ("not power of 2 aligned"));			\
407 									\
408 		addr = (void *)((_esmp)->esm_base + (_offset));		\
409 									\
410 		(_edp)->ed_u32[0] = *addr;				\
411 									\
412 		EFSYS_PROBE2(mem_readd, unsigned int, (_offset),	\
413 		    uint32_t, (_edp)->ed_u32[0]);			\
414 									\
415 	_NOTE(CONSTANTCONDITION)					\
416 	} while (B_FALSE)
417 
418 #if defined(__x86_64__)
419 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
420 	do {								\
421 		uint64_t *addr;						\
422 									\
423 		_NOTE(CONSTANTCONDITION)				\
424 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
425 		    ("not power of 2 aligned"));			\
426 									\
427 		addr = (void *)((_esmp)->esm_base + (_offset));		\
428 									\
429 		(_eqp)->eq_u64[0] = *addr;				\
430 									\
431 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
432 		    uint32_t, (_eqp)->eq_u32[1],			\
433 		    uint32_t, (_eqp)->eq_u32[0]);			\
434 									\
435 	_NOTE(CONSTANTCONDITION)					\
436 	} while (B_FALSE)
437 #else
438 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
439 	do {								\
440 		uint32_t *addr;						\
441 									\
442 		_NOTE(CONSTANTCONDITION)				\
443 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
444 		    ("not power of 2 aligned"));			\
445 									\
446 		addr = (void *)((_esmp)->esm_base + (_offset));		\
447 									\
448 		(_eqp)->eq_u32[0] = *addr++;				\
449 		(_eqp)->eq_u32[1] = *addr;				\
450 									\
451 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
452 		    uint32_t, (_eqp)->eq_u32[1],			\
453 		    uint32_t, (_eqp)->eq_u32[0]);			\
454 									\
455 	_NOTE(CONSTANTCONDITION)					\
456 	} while (B_FALSE)
457 #endif
458 
459 #if defined(__x86_64__)
460 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
461 	do {								\
462 		uint64_t *addr;						\
463 									\
464 		_NOTE(CONSTANTCONDITION)				\
465 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
466 		    ("not power of 2 aligned"));			\
467 									\
468 		addr = (void *)((_esmp)->esm_base + (_offset));		\
469 									\
470 		(_eop)->eo_u64[0] = *addr++;				\
471 		(_eop)->eo_u64[1] = *addr;				\
472 									\
473 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
474 		    uint32_t, (_eop)->eo_u32[3],			\
475 		    uint32_t, (_eop)->eo_u32[2],			\
476 		    uint32_t, (_eop)->eo_u32[1],			\
477 		    uint32_t, (_eop)->eo_u32[0]);			\
478 									\
479 	_NOTE(CONSTANTCONDITION)					\
480 	} while (B_FALSE)
481 #else
482 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
483 	do {								\
484 		uint32_t *addr;						\
485 									\
486 		_NOTE(CONSTANTCONDITION)				\
487 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
488 		    ("not power of 2 aligned"));			\
489 									\
490 		addr = (void *)((_esmp)->esm_base + (_offset));		\
491 									\
492 		(_eop)->eo_u32[0] = *addr++;				\
493 		(_eop)->eo_u32[1] = *addr++;				\
494 		(_eop)->eo_u32[2] = *addr++;				\
495 		(_eop)->eo_u32[3] = *addr;				\
496 									\
497 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
498 		    uint32_t, (_eop)->eo_u32[3],			\
499 		    uint32_t, (_eop)->eo_u32[2],			\
500 		    uint32_t, (_eop)->eo_u32[1],			\
501 		    uint32_t, (_eop)->eo_u32[0]);			\
502 									\
503 	_NOTE(CONSTANTCONDITION)					\
504 	} while (B_FALSE)
505 #endif
506 
507 #define	EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
508 	do {								\
509 		uint32_t *addr;						\
510 									\
511 		_NOTE(CONSTANTCONDITION)				\
512 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
513 		    ("not power of 2 aligned"));			\
514 									\
515 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
516 		    uint32_t, (_edp)->ed_u32[0]);			\
517 									\
518 		addr = (void *)((_esmp)->esm_base + (_offset));		\
519 									\
520 		*addr = (_edp)->ed_u32[0];				\
521 									\
522 	_NOTE(CONSTANTCONDITION)					\
523 	} while (B_FALSE)
524 
525 #if defined(__x86_64__)
526 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
527 	do {								\
528 		uint64_t *addr;						\
529 									\
530 		_NOTE(CONSTANTCONDITION)				\
531 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
532 		    ("not power of 2 aligned"));			\
533 									\
534 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
535 		    uint32_t, (_eqp)->eq_u32[1],			\
536 		    uint32_t, (_eqp)->eq_u32[0]);			\
537 									\
538 		addr = (void *)((_esmp)->esm_base + (_offset));		\
539 									\
540 		*addr   = (_eqp)->eq_u64[0];				\
541 									\
542 	_NOTE(CONSTANTCONDITION)					\
543 	} while (B_FALSE)
544 
545 #else
546 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
547 	do {								\
548 		uint32_t *addr;						\
549 									\
550 		_NOTE(CONSTANTCONDITION)				\
551 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
552 		    ("not power of 2 aligned"));			\
553 									\
554 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
555 		    uint32_t, (_eqp)->eq_u32[1],			\
556 		    uint32_t, (_eqp)->eq_u32[0]);			\
557 									\
558 		addr = (void *)((_esmp)->esm_base + (_offset));		\
559 									\
560 		*addr++ = (_eqp)->eq_u32[0];				\
561 		*addr   = (_eqp)->eq_u32[1];				\
562 									\
563 	_NOTE(CONSTANTCONDITION)					\
564 	} while (B_FALSE)
565 #endif
566 
567 #if defined(__x86_64__)
568 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
569 	do {								\
570 		uint64_t *addr;						\
571 									\
572 		_NOTE(CONSTANTCONDITION)				\
573 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
574 		    ("not power of 2 aligned"));			\
575 									\
576 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
577 		    uint32_t, (_eop)->eo_u32[3],			\
578 		    uint32_t, (_eop)->eo_u32[2],			\
579 		    uint32_t, (_eop)->eo_u32[1],			\
580 		    uint32_t, (_eop)->eo_u32[0]);			\
581 									\
582 		addr = (void *)((_esmp)->esm_base + (_offset));		\
583 									\
584 		*addr++ = (_eop)->eo_u64[0];				\
585 		*addr   = (_eop)->eo_u64[1];				\
586 									\
587 	_NOTE(CONSTANTCONDITION)					\
588 	} while (B_FALSE)
589 #else
590 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
591 	do {								\
592 		uint32_t *addr;						\
593 									\
594 		_NOTE(CONSTANTCONDITION)				\
595 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
596 		    ("not power of 2 aligned"));			\
597 									\
598 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
599 		    uint32_t, (_eop)->eo_u32[3],			\
600 		    uint32_t, (_eop)->eo_u32[2],			\
601 		    uint32_t, (_eop)->eo_u32[1],			\
602 		    uint32_t, (_eop)->eo_u32[0]);			\
603 									\
604 		addr = (void *)((_esmp)->esm_base + (_offset));		\
605 									\
606 		*addr++ = (_eop)->eo_u32[0];				\
607 		*addr++ = (_eop)->eo_u32[1];				\
608 		*addr++ = (_eop)->eo_u32[2];				\
609 		*addr   = (_eop)->eo_u32[3];				\
610 									\
611 	_NOTE(CONSTANTCONDITION)					\
612 	} while (B_FALSE)
613 #endif
614 
615 #define	EFSYS_MEM_ADDR(_esmp)						\
616 	((_esmp)->esm_addr)
617 
618 #define	EFSYS_MEM_IS_NULL(_esmp)					\
619 	((_esmp)->esm_base == NULL)
620 
621 /* BAR */
622 
623 #define	SFXGE_LOCK_NAME_MAX	16
624 
625 typedef struct efsys_bar_s {
626 	struct mtx		esb_lock;
627 	char			esb_lock_name[SFXGE_LOCK_NAME_MAX];
628 	bus_space_tag_t		esb_tag;
629 	bus_space_handle_t	esb_handle;
630 	int			esb_rid;
631 	struct resource		*esb_res;
632 } efsys_bar_t;
633 
634 #define	SFXGE_BAR_LOCK_INIT(_esbp, _ifname)				\
635 	do {								\
636 		snprintf((_esbp)->esb_lock_name,			\
637 			 sizeof((_esbp)->esb_lock_name),		\
638 			 "%s:bar", (_ifname));				\
639 		mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name,	\
640 			 NULL, MTX_DEF);				\
641 	_NOTE(CONSTANTCONDITION)					\
642 	} while (B_FALSE)
643 #define	SFXGE_BAR_LOCK_DESTROY(_esbp)					\
644 	mtx_destroy(&(_esbp)->esb_lock)
645 #define	SFXGE_BAR_LOCK(_esbp)						\
646 	mtx_lock(&(_esbp)->esb_lock)
647 #define	SFXGE_BAR_UNLOCK(_esbp)						\
648 	mtx_unlock(&(_esbp)->esb_lock)
649 
650 #define	EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
651 	do {								\
652 		_NOTE(CONSTANTCONDITION)				\
653 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
654 		    ("not power of 2 aligned"));			\
655 									\
656 		_NOTE(CONSTANTCONDITION)				\
657 		if (_lock)						\
658 			SFXGE_BAR_LOCK(_esbp);				\
659 									\
660 		(_edp)->ed_u32[0] = bus_space_read_stream_4(		\
661 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
662 		    (_offset));						\
663 									\
664 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
665 		    uint32_t, (_edp)->ed_u32[0]);			\
666 									\
667 		_NOTE(CONSTANTCONDITION)				\
668 		if (_lock)						\
669 			SFXGE_BAR_UNLOCK(_esbp);			\
670 	_NOTE(CONSTANTCONDITION)					\
671 	} while (B_FALSE)
672 
673 #if defined(SFXGE_USE_BUS_SPACE_8)
674 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
675 	do {								\
676 		_NOTE(CONSTANTCONDITION)				\
677 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
678 		    ("not power of 2 aligned"));			\
679 									\
680 		SFXGE_BAR_LOCK(_esbp);					\
681 									\
682 		(_eqp)->eq_u64[0] = bus_space_read_stream_8(		\
683 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
684 		    (_offset));						\
685 									\
686 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
687 		    uint32_t, (_eqp)->eq_u32[1],			\
688 		    uint32_t, (_eqp)->eq_u32[0]);			\
689 									\
690 		SFXGE_BAR_UNLOCK(_esbp);				\
691 	_NOTE(CONSTANTCONDITION)					\
692 	} while (B_FALSE)
693 
694 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
695 	do {								\
696 		_NOTE(CONSTANTCONDITION)				\
697 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
698 		    ("not power of 2 aligned"));			\
699 									\
700 		_NOTE(CONSTANTCONDITION)				\
701 		if (_lock)						\
702 			SFXGE_BAR_LOCK(_esbp);				\
703 									\
704 		(_eop)->eo_u64[0] = bus_space_read_stream_8(		\
705 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
706 		    (_offset));						\
707 		(_eop)->eo_u64[1] = bus_space_read_stream_8(		\
708 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
709 		    (_offset) + 8);					\
710 									\
711 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
712 		    uint32_t, (_eop)->eo_u32[3],			\
713 		    uint32_t, (_eop)->eo_u32[2],			\
714 		    uint32_t, (_eop)->eo_u32[1],			\
715 		    uint32_t, (_eop)->eo_u32[0]);			\
716 									\
717 		_NOTE(CONSTANTCONDITION)				\
718 		if (_lock)						\
719 			SFXGE_BAR_UNLOCK(_esbp);			\
720 	_NOTE(CONSTANTCONDITION)					\
721 	} while (B_FALSE)
722 
723 #else
724 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
725 	do {								\
726 		_NOTE(CONSTANTCONDITION)				\
727 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
728 		    ("not power of 2 aligned"));			\
729 									\
730 		SFXGE_BAR_LOCK(_esbp);					\
731 									\
732 		(_eqp)->eq_u32[0] = bus_space_read_stream_4(		\
733 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
734 		    (_offset));						\
735 		(_eqp)->eq_u32[1] = bus_space_read_stream_4(		\
736 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
737 		    (_offset) + 4);					\
738 									\
739 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
740 		    uint32_t, (_eqp)->eq_u32[1],			\
741 		    uint32_t, (_eqp)->eq_u32[0]);			\
742 									\
743 		SFXGE_BAR_UNLOCK(_esbp);				\
744 	_NOTE(CONSTANTCONDITION)					\
745 	} while (B_FALSE)
746 
747 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
748 	do {								\
749 		_NOTE(CONSTANTCONDITION)				\
750 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
751 		    ("not power of 2 aligned"));			\
752 									\
753 		_NOTE(CONSTANTCONDITION)				\
754 		if (_lock)						\
755 			SFXGE_BAR_LOCK(_esbp);				\
756 									\
757 		(_eop)->eo_u32[0] = bus_space_read_stream_4(		\
758 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
759 		    (_offset));						\
760 		(_eop)->eo_u32[1] = bus_space_read_stream_4(		\
761 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
762 		    (_offset) + 4);					\
763 		(_eop)->eo_u32[2] = bus_space_read_stream_4(		\
764 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
765 		    (_offset) + 8);					\
766 		(_eop)->eo_u32[3] = bus_space_read_stream_4(		\
767 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
768 		    (_offset) + 12);					\
769 									\
770 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
771 		    uint32_t, (_eop)->eo_u32[3],			\
772 		    uint32_t, (_eop)->eo_u32[2],			\
773 		    uint32_t, (_eop)->eo_u32[1],			\
774 		    uint32_t, (_eop)->eo_u32[0]);			\
775 									\
776 		_NOTE(CONSTANTCONDITION)				\
777 		if (_lock)						\
778 			SFXGE_BAR_UNLOCK(_esbp);			\
779 	_NOTE(CONSTANTCONDITION)					\
780 	} while (B_FALSE)
781 #endif
782 
783 #define	EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
784 	do {								\
785 		_NOTE(CONSTANTCONDITION)				\
786 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)),	\
787 		    ("not power of 2 aligned"));			\
788 									\
789 		_NOTE(CONSTANTCONDITION)				\
790 		if (_lock)						\
791 			SFXGE_BAR_LOCK(_esbp);				\
792 									\
793 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
794 		    uint32_t, (_edp)->ed_u32[0]);			\
795 									\
796 		/*							\
797 		 * Make sure that previous writes to the dword have	\
798 		 * been done. It should be cheaper than barrier just	\
799 		 * after the write below.				\
800 		 */							\
801 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
802 		    (_offset), sizeof (efx_dword_t),			\
803 		    BUS_SPACE_BARRIER_WRITE);				\
804 		bus_space_write_stream_4((_esbp)->esb_tag,		\
805 		    (_esbp)->esb_handle,				\
806 		    (_offset), (_edp)->ed_u32[0]);			\
807 									\
808 		_NOTE(CONSTANTCONDITION)				\
809 		if (_lock)						\
810 			SFXGE_BAR_UNLOCK(_esbp);			\
811 	_NOTE(CONSTANTCONDITION)					\
812 	} while (B_FALSE)
813 
814 #if defined(SFXGE_USE_BUS_SPACE_8)
815 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
816 	do {								\
817 		_NOTE(CONSTANTCONDITION)				\
818 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
819 		    ("not power of 2 aligned"));			\
820 									\
821 		SFXGE_BAR_LOCK(_esbp);					\
822 									\
823 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
824 		    uint32_t, (_eqp)->eq_u32[1],			\
825 		    uint32_t, (_eqp)->eq_u32[0]);			\
826 									\
827 		/*							\
828 		 * Make sure that previous writes to the qword have	\
829 		 * been done. It should be cheaper than barrier just	\
830 		 * after the write below.				\
831 		 */							\
832 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
833 		    (_offset), sizeof (efx_qword_t),			\
834 		    BUS_SPACE_BARRIER_WRITE);				\
835 		bus_space_write_stream_8((_esbp)->esb_tag,		\
836 		    (_esbp)->esb_handle,				\
837 		    (_offset), (_eqp)->eq_u64[0]);			\
838 									\
839 		SFXGE_BAR_UNLOCK(_esbp);				\
840 	_NOTE(CONSTANTCONDITION)					\
841 	} while (B_FALSE)
842 #else
843 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
844 	do {								\
845 		_NOTE(CONSTANTCONDITION)				\
846 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
847 		    ("not power of 2 aligned"));			\
848 									\
849 		SFXGE_BAR_LOCK(_esbp);					\
850 									\
851 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
852 		    uint32_t, (_eqp)->eq_u32[1],			\
853 		    uint32_t, (_eqp)->eq_u32[0]);			\
854 									\
855 		/*							\
856 		 * Make sure that previous writes to the qword have	\
857 		 * been done. It should be cheaper than barrier just	\
858 		 * after the last write below.				\
859 		 */							\
860 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
861 		    (_offset), sizeof (efx_qword_t),			\
862 		    BUS_SPACE_BARRIER_WRITE);				\
863 		bus_space_write_stream_4((_esbp)->esb_tag,		\
864 		    (_esbp)->esb_handle,				\
865 		    (_offset), (_eqp)->eq_u32[0]);			\
866 		/*							\
867 		 * It should be guaranteed that the last dword comes	\
868 		 * the last, so barrier entire qword to be sure that	\
869 		 * neither above nor below writes are reordered.	\
870 		 */							\
871 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
872 		    (_offset), sizeof (efx_qword_t),			\
873 		    BUS_SPACE_BARRIER_WRITE);				\
874 		bus_space_write_stream_4((_esbp)->esb_tag,		\
875 		    (_esbp)->esb_handle,				\
876 		    (_offset) + 4, (_eqp)->eq_u32[1]);			\
877 									\
878 		SFXGE_BAR_UNLOCK(_esbp);				\
879 	_NOTE(CONSTANTCONDITION)					\
880 	} while (B_FALSE)
881 #endif
882 
883 /*
884  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
885  * (required by PIO hardware)
886  */
887 #define	EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
888 	do {								\
889 		_NOTE(CONSTANTCONDITION)				\
890 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)),	\
891 		    ("not power of 2 aligned"));			\
892 									\
893 		(void) (_esbp);						\
894 									\
895 		/* FIXME: Perform a 64-bit write */			\
896 		KASSERT(0, ("not implemented"));			\
897 									\
898 	_NOTE(CONSTANTCONDITION)					\
899 	} while (B_FALSE)
900 
901 #if defined(SFXGE_USE_BUS_SPACE_8)
902 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
903 	do {								\
904 		_NOTE(CONSTANTCONDITION)				\
905 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
906 		    ("not power of 2 aligned"));			\
907 									\
908 		_NOTE(CONSTANTCONDITION)				\
909 		if (_lock)						\
910 			SFXGE_BAR_LOCK(_esbp);				\
911 									\
912 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
913 		    uint32_t, (_eop)->eo_u32[3],			\
914 		    uint32_t, (_eop)->eo_u32[2],			\
915 		    uint32_t, (_eop)->eo_u32[1],			\
916 		    uint32_t, (_eop)->eo_u32[0]);			\
917 									\
918 		/*							\
919 		 * Make sure that previous writes to the oword have	\
920 		 * been done. It should be cheaper than barrier just	\
921 		 * after the last write below.				\
922 		 */							\
923 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
924 		    (_offset), sizeof (efx_oword_t),			\
925 		    BUS_SPACE_BARRIER_WRITE);				\
926 		bus_space_write_stream_8((_esbp)->esb_tag,		\
927 		    (_esbp)->esb_handle,				\
928 		    (_offset), (_eop)->eo_u64[0]);			\
929 		/*							\
930 		 * It should be guaranteed that the last qword comes	\
931 		 * the last, so barrier entire oword to be sure that	\
932 		 * neither above nor below writes are reordered.	\
933 		 */							\
934 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
935 		    (_offset), sizeof (efx_oword_t),			\
936 		    BUS_SPACE_BARRIER_WRITE);				\
937 		bus_space_write_stream_8((_esbp)->esb_tag,		\
938 		    (_esbp)->esb_handle,				\
939 		    (_offset) + 8, (_eop)->eo_u64[1]);			\
940 									\
941 		_NOTE(CONSTANTCONDITION)				\
942 		if (_lock)						\
943 			SFXGE_BAR_UNLOCK(_esbp);			\
944 	_NOTE(CONSTANTCONDITION)					\
945 	} while (B_FALSE)
946 
947 #else
948 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
949 	do {								\
950 		_NOTE(CONSTANTCONDITION)				\
951 		KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)),	\
952 		    ("not power of 2 aligned"));			\
953 									\
954 		_NOTE(CONSTANTCONDITION)				\
955 		if (_lock)						\
956 			SFXGE_BAR_LOCK(_esbp);				\
957 									\
958 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
959 		    uint32_t, (_eop)->eo_u32[3],			\
960 		    uint32_t, (_eop)->eo_u32[2],			\
961 		    uint32_t, (_eop)->eo_u32[1],			\
962 		    uint32_t, (_eop)->eo_u32[0]);			\
963 									\
964 		/*							\
965 		 * Make sure that previous writes to the oword have	\
966 		 * been done. It should be cheaper than barrier just	\
967 		 * after the last write below.				\
968 		 */							\
969 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
970 		    (_offset), sizeof (efx_oword_t),			\
971 		    BUS_SPACE_BARRIER_WRITE);				\
972 		bus_space_write_stream_4((_esbp)->esb_tag,		\
973 		    (_esbp)->esb_handle,				\
974 		    (_offset), (_eop)->eo_u32[0]);			\
975 		bus_space_write_stream_4((_esbp)->esb_tag,		\
976 		    (_esbp)->esb_handle,				\
977 		    (_offset) + 4, (_eop)->eo_u32[1]);			\
978 		bus_space_write_stream_4((_esbp)->esb_tag,		\
979 		    (_esbp)->esb_handle,				\
980 		    (_offset) + 8, (_eop)->eo_u32[2]);			\
981 		/*							\
982 		 * It should be guaranteed that the last dword comes	\
983 		 * the last, so barrier entire oword to be sure that	\
984 		 * neither above nor below writes are reordered.	\
985 		 */							\
986 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
987 		    (_offset), sizeof (efx_oword_t),			\
988 		    BUS_SPACE_BARRIER_WRITE);				\
989 		bus_space_write_stream_4((_esbp)->esb_tag,		\
990 		    (_esbp)->esb_handle,				\
991 		    (_offset) + 12, (_eop)->eo_u32[3]);			\
992 									\
993 		_NOTE(CONSTANTCONDITION)				\
994 		if (_lock)						\
995 			SFXGE_BAR_UNLOCK(_esbp);			\
996 	_NOTE(CONSTANTCONDITION)					\
997 	} while (B_FALSE)
998 #endif
999 
1000 /* Use the standard octo-word write for doorbell writes */
1001 #define	EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
1002 	do {								\
1003 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
1004 	_NOTE(CONSTANTCONDITION)					\
1005 	} while (B_FALSE)
1006 
1007 /* SPIN */
1008 
1009 #define	EFSYS_SPIN(_us)							\
1010 	do {								\
1011 		DELAY(_us);						\
1012 	_NOTE(CONSTANTCONDITION)					\
1013 	} while (B_FALSE)
1014 
1015 #define	EFSYS_SLEEP	EFSYS_SPIN
1016 
1017 /* BARRIERS */
1018 
1019 #define	EFSYS_MEM_READ_BARRIER()	rmb()
1020 #define	EFSYS_PIO_WRITE_BARRIER()
1021 
1022 /* DMA SYNC */
1023 #define	EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)		\
1024 	do {								\
1025 		bus_dmamap_sync((_esmp)->esm_tag,			\
1026 		    (_esmp)->esm_map,					\
1027 		    BUS_DMASYNC_POSTREAD);				\
1028 	_NOTE(CONSTANTCONDITION)					\
1029 	} while (B_FALSE)
1030 
1031 #define	EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)		\
1032 	do {								\
1033 		bus_dmamap_sync((_esmp)->esm_tag,			\
1034 		    (_esmp)->esm_map,					\
1035 		    BUS_DMASYNC_PREWRITE);				\
1036 	_NOTE(CONSTANTCONDITION)					\
1037 	} while (B_FALSE)
1038 
1039 /* TIMESTAMP */
1040 
1041 typedef	clock_t	efsys_timestamp_t;
1042 
1043 #define	EFSYS_TIMESTAMP(_usp)						\
1044 	do {								\
1045 		clock_t now;						\
1046 									\
1047 		now = ticks;						\
1048 		*(_usp) = now * hz / 1000000;				\
1049 	_NOTE(CONSTANTCONDITION)					\
1050 	} while (B_FALSE)
1051 
1052 /* KMEM */
1053 
1054 #define	EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
1055 	do {								\
1056 		(_esip) = (_esip);					\
1057 		/*							\
1058 		 * The macro is used in non-sleepable contexts, for	\
1059 		 * example, holding a mutex.				\
1060 		 */							\
1061 		(_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO);	\
1062 	_NOTE(CONSTANTCONDITION)					\
1063 	} while (B_FALSE)
1064 
1065 #define	EFSYS_KMEM_FREE(_esip, _size, _p)				\
1066 	do {								\
1067 		(void) (_esip);						\
1068 		(void) (_size);						\
1069 		free((_p), M_SFXGE);					\
1070 	_NOTE(CONSTANTCONDITION)					\
1071 	} while (B_FALSE)
1072 
1073 /* LOCK */
1074 
1075 typedef struct efsys_lock_s {
1076 	struct mtx	lock;
1077 	char		lock_name[SFXGE_LOCK_NAME_MAX];
1078 } efsys_lock_t;
1079 
1080 #define	SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label)			\
1081 	do {								\
1082 		efsys_lock_t *__eslp = (_eslp);				\
1083 									\
1084 		snprintf((__eslp)->lock_name,				\
1085 			 sizeof((__eslp)->lock_name),			\
1086 			 "%s:%s", (_ifname), (_label));			\
1087 		mtx_init(&(__eslp)->lock, (__eslp)->lock_name,		\
1088 			 NULL, MTX_DEF);				\
1089 	} while (B_FALSE)
1090 #define	SFXGE_EFSYS_LOCK_DESTROY(_eslp)					\
1091 	mtx_destroy(&(_eslp)->lock)
1092 #define	SFXGE_EFSYS_LOCK(_eslp)						\
1093 	mtx_lock(&(_eslp)->lock)
1094 #define	SFXGE_EFSYS_UNLOCK(_eslp)					\
1095 	mtx_unlock(&(_eslp)->lock)
1096 #define	SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp)				\
1097 	mtx_assert(&(_eslp)->lock, MA_OWNED)
1098 
1099 typedef int efsys_lock_state_t;
1100 
1101 #define	EFSYS_LOCK_MAGIC	0x000010c4
1102 
1103 #define	EFSYS_LOCK(_lockp, _state)					\
1104 	do {								\
1105 		SFXGE_EFSYS_LOCK(_lockp);				\
1106 		(_state) = EFSYS_LOCK_MAGIC;				\
1107 	_NOTE(CONSTANTCONDITION)					\
1108 	} while (B_FALSE)
1109 
1110 #define	EFSYS_UNLOCK(_lockp, _state)					\
1111 	do {								\
1112 		if ((_state) != EFSYS_LOCK_MAGIC)			\
1113 			KASSERT(B_FALSE, ("not locked"));		\
1114 		SFXGE_EFSYS_UNLOCK(_lockp);				\
1115 	_NOTE(CONSTANTCONDITION)					\
1116 	} while (B_FALSE)
1117 
1118 /* STAT */
1119 
1120 typedef uint64_t		efsys_stat_t;
1121 
1122 #define	EFSYS_STAT_INCR(_knp, _delta) 					\
1123 	do {								\
1124 		*(_knp) += (_delta);					\
1125 	_NOTE(CONSTANTCONDITION)					\
1126 	} while (B_FALSE)
1127 
1128 #define	EFSYS_STAT_DECR(_knp, _delta) 					\
1129 	do {								\
1130 		*(_knp) -= (_delta);					\
1131 	_NOTE(CONSTANTCONDITION)					\
1132 	} while (B_FALSE)
1133 
1134 #define	EFSYS_STAT_SET(_knp, _val)					\
1135 	do {								\
1136 		*(_knp) = (_val);					\
1137 	_NOTE(CONSTANTCONDITION)					\
1138 	} while (B_FALSE)
1139 
1140 #define	EFSYS_STAT_SET_QWORD(_knp, _valp)				\
1141 	do {								\
1142 		*(_knp) = le64toh((_valp)->eq_u64[0]);			\
1143 	_NOTE(CONSTANTCONDITION)					\
1144 	} while (B_FALSE)
1145 
1146 #define	EFSYS_STAT_SET_DWORD(_knp, _valp)				\
1147 	do {								\
1148 		*(_knp) = le32toh((_valp)->ed_u32[0]);			\
1149 	_NOTE(CONSTANTCONDITION)					\
1150 	} while (B_FALSE)
1151 
1152 #define	EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
1153 	do {								\
1154 		*(_knp) += le64toh((_valp)->eq_u64[0]);			\
1155 	_NOTE(CONSTANTCONDITION)					\
1156 	} while (B_FALSE)
1157 
1158 #define	EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
1159 	do {								\
1160 		*(_knp) -= le64toh((_valp)->eq_u64[0]);			\
1161 	_NOTE(CONSTANTCONDITION)					\
1162 	} while (B_FALSE)
1163 
1164 /* ERR */
1165 
1166 extern void	sfxge_err(efsys_identifier_t *, unsigned int,
1167 		    uint32_t, uint32_t);
1168 
1169 #if EFSYS_OPT_DECODE_INTR_FATAL
1170 #define	EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
1171 	do {								\
1172 		sfxge_err((_esip), (_code), (_dword0), (_dword1));	\
1173 	_NOTE(CONSTANTCONDITION)					\
1174 	} while (B_FALSE)
1175 #endif
1176 
1177 /* ASSERT */
1178 
1179 #define	EFSYS_ASSERT(_exp) do {						\
1180 	if (!(_exp))							\
1181 		panic("%s", #_exp);					\
1182 	} while (0)
1183 
1184 #define	EFSYS_ASSERT3(_x, _op, _y, _t) do {				\
1185 	const _t __x = (_t)(_x);					\
1186 	const _t __y = (_t)(_y);					\
1187 	if (!(__x _op __y))						\
1188 		panic("assertion failed at %s:%u", __FILE__, __LINE__);	\
1189 	} while(0)
1190 
1191 #define	EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1192 #define	EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
1193 #define	EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1194 
1195 /* ROTATE */
1196 
1197 #define	EFSYS_HAS_ROTL_DWORD 0
1198 
1199 #ifdef	__cplusplus
1200 }
1201 #endif
1202 
1203 #endif	/* _SYS_EFSYS_H */
1204