xref: /freebsd/sys/dev/ice/ice_osdep.h (revision 015f8cc5)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2024, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file ice_osdep.h
34  * @brief OS compatibility layer
35  *
36  * Contains various definitions and functions which are part of an OS
37  * compatibility layer for sharing code with other operating systems.
38  */
39 #ifndef _ICE_OSDEP_H_
40 #define _ICE_OSDEP_H_
41 
42 #include <sys/endian.h>
43 #include <sys/param.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/proc.h>
47 #include <sys/systm.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/bus.h>
51 #include <machine/bus.h>
52 #include <sys/bus_dma.h>
53 #include <netinet/in.h>
54 #include <sys/counter.h>
55 #include <sys/sbuf.h>
56 
57 #include "ice_alloc.h"
58 
59 #define ICE_INTEL_VENDOR_ID 0x8086
60 
61 #define ICE_STR_BUF_LEN 32
62 
63 struct ice_hw;
64 
65 device_t ice_hw_to_dev(struct ice_hw *hw);
66 
67 /* configure hw->debug_mask to enable debug prints */
68 void ice_debug(struct ice_hw *hw, uint64_t mask, char *fmt, ...) __printflike(3, 4);
69 void ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
70 		     uint32_t groupsize, uint8_t *buf, size_t len);
71 void ice_info_fwlog(struct ice_hw *hw, uint32_t rowsize, uint32_t groupsize,
72 		    uint8_t *buf, size_t len);
73 
74 #define ice_fls(_n) flsl(_n)
75 
76 #define ice_info(_hw, _fmt, args...) \
77 	device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
78 
79 #define ice_warn(_hw, _fmt, args...) \
80 	device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
81 
82 #define DIVIDE_AND_ROUND_UP howmany
83 #define ROUND_UP roundup
84 
85 uint32_t rd32(struct ice_hw *hw, uint32_t reg);
86 uint64_t rd64(struct ice_hw *hw, uint32_t reg);
87 void wr32(struct ice_hw *hw, uint32_t reg, uint32_t val);
88 void wr64(struct ice_hw *hw, uint32_t reg, uint64_t val);
89 
90 #define ice_flush(_hw) rd32((_hw), GLGEN_STAT)
91 
92 MALLOC_DECLARE(M_ICE_OSDEP);
93 
94 /**
95  * ice_calloc - Allocate an array of elementes
96  * @hw: the hardware private structure
97  * @count: number of elements to allocate
98  * @size: the size of each element
99  *
100  * Allocate memory for an array of items equal to size. Note that the OS
101  * compatibility layer assumes all allocation functions will provide zero'd
102  * memory.
103  */
104 static inline void *
ice_calloc(struct ice_hw __unused * hw,size_t count,size_t size)105 ice_calloc(struct ice_hw __unused *hw, size_t count, size_t size)
106 {
107 	return malloc(count * size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
108 }
109 
110 /**
111  * ice_malloc - Allocate memory of a specified size
112  * @hw: the hardware private structure
113  * @size: the size to allocate
114  *
115  * Allocates memory of the specified size. Note that the OS compatibility
116  * layer assumes that all allocations will provide zero'd memory.
117  */
118 static inline void *
ice_malloc(struct ice_hw __unused * hw,size_t size)119 ice_malloc(struct ice_hw __unused *hw, size_t size)
120 {
121 	return malloc(size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
122 }
123 
124 /**
125  * ice_memdup - Allocate a copy of some other memory
126  * @hw: private hardware structure
127  * @src: the source to copy from
128  * @size: allocation size
129  * @dir: the direction of copying
130  *
131  * Allocate memory of the specified size, and copy bytes from the src to fill
132  * it. We don't need to zero this memory as we immediately initialize it by
133  * copying from the src pointer.
134  */
135 static inline void *
ice_memdup(struct ice_hw __unused * hw,const void * src,size_t size,enum ice_memcpy_type __unused dir)136 ice_memdup(struct ice_hw __unused *hw, const void *src, size_t size,
137 	   enum ice_memcpy_type __unused dir)
138 {
139 	void *dst = malloc(size, M_ICE_OSDEP, M_NOWAIT);
140 
141 	if (dst != NULL)
142 		memcpy(dst, src, size);
143 
144 	return dst;
145 }
146 
147 /**
148  * ice_free - Free previously allocated memory
149  * @hw: the hardware private structure
150  * @mem: pointer to the memory to free
151  *
152  * Free memory that was previously allocated by ice_calloc, ice_malloc, or
153  * ice_memdup.
154  */
155 static inline void
ice_free(struct ice_hw __unused * hw,void * mem)156 ice_free(struct ice_hw __unused *hw, void *mem)
157 {
158 	free(mem, M_ICE_OSDEP);
159 }
160 
161 /* These are macros in order to drop the unused direction enumeration constant */
162 #define ice_memset(addr, c, len, unused) memset((addr), (c), (len))
163 #define ice_memcpy(dst, src, len, unused) memcpy((dst), (src), (len))
164 
165 void ice_usec_delay(uint32_t time, bool sleep);
166 void ice_msec_delay(uint32_t time, bool sleep);
167 void ice_msec_pause(uint32_t time);
168 void ice_msec_spin(uint32_t time);
169 
170 #define UNREFERENCED_PARAMETER(_p) _p = _p
171 #define UNREFERENCED_1PARAMETER(_p) do {			\
172 	UNREFERENCED_PARAMETER(_p);				\
173 } while (0)
174 #define UNREFERENCED_2PARAMETER(_p, _q) do {			\
175 	UNREFERENCED_PARAMETER(_p);				\
176 	UNREFERENCED_PARAMETER(_q);				\
177 } while (0)
178 #define UNREFERENCED_3PARAMETER(_p, _q, _r) do {		\
179 	UNREFERENCED_PARAMETER(_p);				\
180 	UNREFERENCED_PARAMETER(_q);				\
181 	UNREFERENCED_PARAMETER(_r);				\
182 } while (0)
183 #define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do {		\
184 	UNREFERENCED_PARAMETER(_p);				\
185 	UNREFERENCED_PARAMETER(_q);				\
186 	UNREFERENCED_PARAMETER(_r);				\
187 	UNREFERENCED_PARAMETER(_s);				\
188 } while (0)
189 #define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) do {	\
190 	UNREFERENCED_PARAMETER(_p);				\
191 	UNREFERENCED_PARAMETER(_q);				\
192 	UNREFERENCED_PARAMETER(_r);				\
193 	UNREFERENCED_PARAMETER(_s);				\
194 	UNREFERENCED_PARAMETER(_t);				\
195 } while (0)
196 
197 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
198 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
199 #define MAKEMASK(_m, _s) ((_m) << (_s))
200 
201 #define LIST_HEAD_TYPE ice_list_head
202 #define LIST_ENTRY_TYPE ice_list_node
203 
204 /**
205  * @struct ice_list_node
206  * @brief simplified linked list node API
207  *
208  * Represents a node in a linked list, which can be embedded into a structure
209  * to allow that structure to be inserted into a linked list. Access to the
210  * contained structure is done via __containerof
211  */
212 struct ice_list_node {
213 	LIST_ENTRY(ice_list_node) entries;
214 };
215 
216 /**
217  * @struct ice_list_head
218  * @brief simplified linked list head API
219  *
220  * Represents the head of a linked list. The linked list should consist of
221  * a series of ice_list_node structures embedded into another structure
222  * accessed using __containerof. This way, the ice_list_head doesn't need to
223  * know the type of the structure it contains.
224  */
225 LIST_HEAD(ice_list_head, ice_list_node);
226 
227 #define INIT_LIST_HEAD LIST_INIT
228 /* LIST_EMPTY doesn't need to be changed */
229 #define LIST_ADD(entry, head) LIST_INSERT_HEAD(head, entry, entries)
230 #define LIST_ADD_AFTER(entry, elem) LIST_INSERT_AFTER(elem, entry, entries)
231 #define LIST_DEL(entry) LIST_REMOVE(entry, entries)
232 #define _osdep_LIST_ENTRY(ptr, type, member) \
233 	__containerof(ptr, type, member)
234 #define LIST_FIRST_ENTRY(head, type, member) \
235 	_osdep_LIST_ENTRY(LIST_FIRST(head), type, member)
236 #define LIST_NEXT_ENTRY(ptr, unused, member) \
237 	_osdep_LIST_ENTRY(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
238 #define LIST_REPLACE_INIT(old_head, new_head) do {			\
239 	__typeof(new_head) _new_head = (new_head);			\
240 	LIST_INIT(_new_head);						\
241 	LIST_SWAP(old_head, _new_head, ice_list_node, entries);		\
242 } while (0)
243 
244 #define LIST_ENTRY_SAFE(_ptr, _type, _member) \
245 ({ __typeof(_ptr) ____ptr = (_ptr); \
246    ____ptr ? _osdep_LIST_ENTRY(____ptr, _type, _member) : NULL; \
247 })
248 
249 /**
250  * ice_get_list_tail - Return the pointer to the last node in the list
251  * @head: the pointer to the head of the list
252  *
253  * A helper function for implementing LIST_ADD_TAIL and LIST_LAST_ENTRY.
254  * Returns the pointer to the last node in the list, or NULL of the list is
255  * empty.
256  *
257  * Note: due to the list implementation this is O(N), where N is the size of
258  * the list. An O(1) implementation requires replacing the underlying list
259  * datastructure with one that has a tail pointer. This is problematic,
260  * because using a simple TAILQ would require that the addition and deletion
261  * be given the head of the list.
262  */
263 static inline struct ice_list_node *
ice_get_list_tail(struct ice_list_head * head)264 ice_get_list_tail(struct ice_list_head *head)
265 {
266 	struct ice_list_node *node = LIST_FIRST(head);
267 
268 	if (node == NULL)
269 		return NULL;
270 	while (LIST_NEXT(node, entries) != NULL)
271 		node = LIST_NEXT(node, entries);
272 
273 	return node;
274 }
275 
276 /* TODO: This is O(N). An O(1) implementation would require a different
277  * underlying list structure, such as a circularly linked list. */
278 #define LIST_ADD_TAIL(entry, head) do {					\
279 	struct ice_list_node *node = ice_get_list_tail(head);		\
280 									\
281 	if (node == NULL) {						\
282 		LIST_ADD(entry, head);					\
283 	} else {							\
284 		LIST_INSERT_AFTER(node, entry, entries);		\
285 	}								\
286 } while (0)
287 
288 #define LIST_LAST_ENTRY(head, type, member) \
289 	LIST_ENTRY_SAFE(ice_get_list_tail(head), type, member)
290 
291 #define LIST_FIRST_ENTRY_SAFE(head, type, member) \
292 	LIST_ENTRY_SAFE(LIST_FIRST(head), type, member)
293 
294 #define LIST_NEXT_ENTRY_SAFE(ptr, member) \
295 	LIST_ENTRY_SAFE(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
296 
297 #define LIST_FOR_EACH_ENTRY(pos, head, unused, member) \
298 	for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member);		\
299 	    pos;								\
300 	    pos = LIST_NEXT_ENTRY_SAFE(pos, member))
301 
302 #define LIST_FOR_EACH_ENTRY_SAFE(pos, n, head, unused, member) \
303 	for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member);		\
304 	     pos && ({ n = LIST_NEXT_ENTRY_SAFE(pos, member); 1; });		\
305 	     pos = n)
306 
307 #define STATIC static
308 
309 #define NTOHS ntohs
310 #define NTOHL ntohl
311 #define HTONS htons
312 #define HTONL htonl
313 #define LE16_TO_CPU le16toh
314 #define LE32_TO_CPU le32toh
315 #define LE64_TO_CPU le64toh
316 #define CPU_TO_LE16 htole16
317 #define CPU_TO_LE32 htole32
318 #define CPU_TO_LE64 htole64
319 #define CPU_TO_BE16 htobe16
320 #define CPU_TO_BE32 htobe32
321 
322 #define SNPRINTF snprintf
323 
324 /**
325  * @typedef u8
326  * @brief compatibility typedef for uint8_t
327  */
328 typedef uint8_t  u8;
329 
330 /**
331  * @typedef u16
332  * @brief compatibility typedef for uint16_t
333  */
334 typedef uint16_t u16;
335 
336 /**
337  * @typedef u32
338  * @brief compatibility typedef for uint32_t
339  */
340 typedef uint32_t u32;
341 
342 /**
343  * @typedef u64
344  * @brief compatibility typedef for uint64_t
345  */
346 typedef uint64_t u64;
347 
348 /**
349  * @typedef s8
350  * @brief compatibility typedef for int8_t
351  */
352 typedef int8_t  s8;
353 
354 /**
355  * @typedef s16
356  * @brief compatibility typedef for int16_t
357  */
358 typedef int16_t s16;
359 
360 /**
361  * @typedef s32
362  * @brief compatibility typedef for int32_t
363  */
364 typedef int32_t s32;
365 
366 /**
367  * @typedef s64
368  * @brief compatibility typedef for int64_t
369  */
370 typedef int64_t s64;
371 
372 #define __le16 u16
373 #define __le32 u32
374 #define __le64 u64
375 #define __be16 u16
376 #define __be32 u32
377 #define __be64 u64
378 
379 #define ice_hweight8(x) bitcount16((u8)x)
380 #define ice_hweight16(x) bitcount16(x)
381 #define ice_hweight32(x) bitcount32(x)
382 #define ice_hweight64(x) bitcount64(x)
383 
384 /**
385  * @struct ice_dma_mem
386  * @brief DMA memory allocation
387  *
388  * Contains DMA allocation bits, used to simplify DMA allocations.
389  */
390 struct ice_dma_mem {
391 	void *va;
392 	uint64_t pa;
393 	size_t size;
394 
395 	bus_dma_tag_t		tag;
396 	bus_dmamap_t		map;
397 	bus_dma_segment_t	seg;
398 };
399 
400 
401 void * ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *mem, u64 size);
402 void ice_free_dma_mem(struct ice_hw __unused *hw, struct ice_dma_mem *mem);
403 
404 /**
405  * @struct ice_lock
406  * @brief simplified lock API
407  *
408  * Contains a simple lock implementation used to lock various resources.
409  */
410 struct ice_lock {
411 	struct mtx mutex;
412 	char name[ICE_STR_BUF_LEN];
413 };
414 
415 extern u16 ice_lock_count;
416 
417 /**
418  * ice_init_lock - Initialize a lock for use
419  * @lock: the lock memory to initialize
420  *
421  * OS compatibility layer to provide a simple locking mechanism. We use
422  * a mutex for this purpose.
423  */
424 static inline void
ice_init_lock(struct ice_lock * lock)425 ice_init_lock(struct ice_lock *lock)
426 {
427 	/*
428 	 * Make each lock unique by incrementing a counter each time this
429 	 * function is called. Use of a u16 allows 65535 possible locks before
430 	 * we'd hit a duplicate.
431 	 */
432 	memset(lock->name, 0, sizeof(lock->name));
433 	snprintf(lock->name, ICE_STR_BUF_LEN, "ice_lock_%u", ice_lock_count++);
434 	mtx_init(&lock->mutex, lock->name, NULL, MTX_DEF);
435 }
436 
437 /**
438  * ice_acquire_lock - Acquire the lock
439  * @lock: the lock to acquire
440  *
441  * Acquires the mutex specified by the lock pointer.
442  */
443 static inline void
ice_acquire_lock(struct ice_lock * lock)444 ice_acquire_lock(struct ice_lock *lock)
445 {
446 	mtx_lock(&lock->mutex);
447 }
448 
449 /**
450  * ice_release_lock - Release the lock
451  * @lock: the lock to release
452  *
453  * Releases the mutex specified by the lock pointer.
454  */
455 static inline void
ice_release_lock(struct ice_lock * lock)456 ice_release_lock(struct ice_lock *lock)
457 {
458 	mtx_unlock(&lock->mutex);
459 }
460 
461 /**
462  * ice_destroy_lock - Destroy the lock to de-allocate it
463  * @lock: the lock to destroy
464  *
465  * Destroys a previously initialized lock. We only do this if the mutex was
466  * previously initialized.
467  */
468 static inline void
ice_destroy_lock(struct ice_lock * lock)469 ice_destroy_lock(struct ice_lock *lock)
470 {
471 	if (mtx_initialized(&lock->mutex))
472 		mtx_destroy(&lock->mutex);
473 	memset(lock->name, 0, sizeof(lock->name));
474 }
475 
476 /* Some function parameters are unused outside of MPASS/KASSERT macros. Rather
477  * than marking these as __unused all the time, mark them as __invariant_only,
478  * and define this to __unused when INVARIANTS is disabled. Otherwise, define
479  * it empty so that __invariant_only parameters are caught as unused by the
480  * INVARIANTS build.
481  */
482 #ifndef INVARIANTS
483 #define __invariant_only __unused
484 #else
485 #define __invariant_only
486 #endif
487 
488 #define __ALWAYS_UNUSED __unused
489 
490 /**
491  * ice_ilog2 - Calculate the integer log base 2 of a 64bit value
492  * @n: 64bit number
493  *
494  * Calculates the integer log base 2 of a 64bit value, rounded down.
495  *
496  * @remark The integer log base 2 of zero is technically undefined, but this
497  * function will return 0 in that case.
498  *
499  */
500 static inline int
ice_ilog2(u64 n)501 ice_ilog2(u64 n) {
502 	if (n == 0)
503 		return 0;
504 	return flsll(n) - 1;
505 }
506 
507 /**
508  * ice_is_pow2 - Check if the value is a power of 2
509  * @n: 64bit number
510  *
511  * Check if the given value is a power of 2.
512  *
513  * @remark FreeBSD's powerof2 function treats zero as a power of 2, while this
514  * function does not.
515  *
516  * @returns true or false
517  */
518 static inline bool
ice_is_pow2(u64 n)519 ice_is_pow2(u64 n) {
520 	if (n == 0)
521 		return false;
522 	return powerof2(n);
523 }
524 #endif /* _ICE_OSDEP_H_ */
525