1 /* $OpenBSD: if_icevar.h,v 1.5 2024/11/26 17:34:00 stsp Exp $ */
2
3 /* Copyright (c) 2024, Intel Corporation
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * 3. Neither the name of the Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Ported from FreeBSD ice(4) by Stefan Sperling in 2024.
35 *
36 * Copyright (c) 2024 Stefan Sperling <stsp@openbsd.org>
37 *
38 * Permission to use, copy, modify, and distribute this software for any
39 * purpose with or without fee is hereby granted, provided that the above
40 * copyright notice and this permission notice appear in all copies.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51 /* Code derived from FreeBSD sys/bitstring.h:
52 *
53 * Copyright (c) 1989, 1993
54 * The Regents of the University of California. All rights reserved.
55 *
56 * This code is derived from software contributed to Berkeley by
57 * Paul Vixie.
58 *
59 * Redistribution and use in source and binary forms, with or without
60 * modification, are permitted provided that the following conditions
61 * are met:
62 * 1. Redistributions of source code must retain the above copyright
63 * notice, this list of conditions and the following disclaimer.
64 * 2. Redistributions in binary form must reproduce the above copyright
65 * notice, this list of conditions and the following disclaimer in the
66 * documentation and/or other materials provided with the distribution.
67 * 3. Neither the name of the University nor the names of its contributors
68 * may be used to endorse or promote products derived from this software
69 * without specific prior written permission.
70 *
71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81 * SUCH DAMAGE.
82 *
83 * Copyright (c) 2014 Spectra Logic Corporation
84 * All rights reserved.
85 *
86 * Redistribution and use in source and binary forms, with or without
87 * modification, are permitted provided that the following conditions
88 * are met:
89 * 1. Redistributions of source code must retain the above copyright
90 * notice, this list of conditions, and the following disclaimer,
91 * without modification.
92 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
93 * substantially similar to the "NO WARRANTY" disclaimer below
94 * ("Disclaimer") and any redistribution must be conditioned upon
95 * including a substantially similar Disclaimer requirement for further
96 * binary redistribution.
97 *
98 * NO WARRANTY
99 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
100 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
101 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
102 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
103 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
104 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
105 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
106 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
107 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
108 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
109 * POSSIBILITY OF SUCH DAMAGES.
110 */
111
112 #ifndef _ICE_BITOPS_H_
113 #define _ICE_BITOPS_H_
114
115 /* Define the size of the bitmap chunk */
116 typedef uint32_t ice_bitmap_t;
117
118 /* NOTE!
119 * Do not use any of the functions declared in this file
120 * on memory that was not declared with ice_declare_bitmap.
121 * Not following this rule might cause issues like split
122 * locks.
123 */
124
125 /* Number of bits per bitmap chunk */
126 #define BITS_PER_CHUNK (8 * sizeof(ice_bitmap_t))
127 /* Determine which chunk a bit belongs in */
128 #define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
129 /* How many chunks are required to store this many bits */
130 #define BITS_TO_CHUNKS(sz) (((sz) + BITS_PER_CHUNK - 1) / BITS_PER_CHUNK)
131 /* Which bit inside a chunk this bit corresponds to */
132 #define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK)
133 /* How many bits are valid in the last chunk, assumes nr > 0 */
134 #define LAST_CHUNK_BITS(nr) ((((nr) - 1) % BITS_PER_CHUNK) + 1)
135 /* Generate a bitmask of valid bits in the last chunk, assumes nr > 0 */
136 #define LAST_CHUNK_MASK(nr) (((ice_bitmap_t)~0) >> \
137 (BITS_PER_CHUNK - LAST_CHUNK_BITS(nr)))
138
139 #define ice_declare_bitmap(A, sz) \
140 ice_bitmap_t A[BITS_TO_CHUNKS(sz)]
141
ice_is_bit_set_internal(uint16_t nr,const ice_bitmap_t * bitmap)142 static inline bool ice_is_bit_set_internal(uint16_t nr, const ice_bitmap_t *bitmap)
143 {
144 return !!(*bitmap & BIT(nr));
145 }
146
147 /*
148 * If atomic version of the bitops are required, each specific OS
149 * implementation will need to implement OS/platform specific atomic
150 * version of the functions below:
151 *
152 * ice_clear_bit_internal
153 * ice_set_bit_internal
154 * ice_test_and_clear_bit_internal
155 * ice_test_and_set_bit_internal
156 *
157 * and define macro ICE_ATOMIC_BITOPS to overwrite the default non-atomic
158 * implementation.
159 */
ice_clear_bit_internal(uint16_t nr,ice_bitmap_t * bitmap)160 static inline void ice_clear_bit_internal(uint16_t nr, ice_bitmap_t *bitmap)
161 {
162 *bitmap &= ~BIT(nr);
163 }
164
ice_set_bit_internal(uint16_t nr,ice_bitmap_t * bitmap)165 static inline void ice_set_bit_internal(uint16_t nr, ice_bitmap_t *bitmap)
166 {
167 *bitmap |= BIT(nr);
168 }
169
ice_test_and_clear_bit_internal(uint16_t nr,ice_bitmap_t * bitmap)170 static inline bool ice_test_and_clear_bit_internal(uint16_t nr,
171 ice_bitmap_t *bitmap)
172 {
173 if (ice_is_bit_set_internal(nr, bitmap)) {
174 ice_clear_bit_internal(nr, bitmap);
175 return true;
176 }
177 return false;
178 }
179
ice_test_and_set_bit_internal(uint16_t nr,ice_bitmap_t * bitmap)180 static inline bool ice_test_and_set_bit_internal(uint16_t nr, ice_bitmap_t *bitmap)
181 {
182 if (ice_is_bit_set_internal(nr, bitmap))
183 return true;
184
185 ice_set_bit_internal(nr, bitmap);
186 return false;
187 }
188
189 /**
190 * ice_is_bit_set - Check state of a bit in a bitmap
191 * @bitmap: the bitmap to check
192 * @nr: the bit to check
193 *
194 * Returns true if bit nr of bitmap is set. False otherwise. Assumes that nr
195 * is less than the size of the bitmap.
196 */
ice_is_bit_set(const ice_bitmap_t * bitmap,uint16_t nr)197 static inline bool ice_is_bit_set(const ice_bitmap_t *bitmap, uint16_t nr)
198 {
199 return ice_is_bit_set_internal(BIT_IN_CHUNK(nr),
200 &bitmap[BIT_CHUNK(nr)]);
201 }
202
203 /**
204 * ice_clear_bit - Clear a bit in a bitmap
205 * @bitmap: the bitmap to change
206 * @nr: the bit to change
207 *
208 * Clears the bit nr in bitmap. Assumes that nr is less than the size of the
209 * bitmap.
210 */
ice_clear_bit(uint16_t nr,ice_bitmap_t * bitmap)211 static inline void ice_clear_bit(uint16_t nr, ice_bitmap_t *bitmap)
212 {
213 ice_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
214 }
215
216 /**
217 * ice_set_bit - Set a bit in a bitmap
218 * @bitmap: the bitmap to change
219 * @nr: the bit to change
220 *
221 * Sets the bit nr in bitmap. Assumes that nr is less than the size of the
222 * bitmap.
223 */
ice_set_bit(uint16_t nr,ice_bitmap_t * bitmap)224 static inline void ice_set_bit(uint16_t nr, ice_bitmap_t *bitmap)
225 {
226 ice_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
227 }
228
229 /**
230 * ice_test_and_clear_bit - Atomically clear a bit and return the old bit value
231 * @nr: the bit to change
232 * @bitmap: the bitmap to change
233 *
234 * Check and clear the bit nr in bitmap. Assumes that nr is less than the size
235 * of the bitmap.
236 */
237 static inline bool
ice_test_and_clear_bit(uint16_t nr,ice_bitmap_t * bitmap)238 ice_test_and_clear_bit(uint16_t nr, ice_bitmap_t *bitmap)
239 {
240 return ice_test_and_clear_bit_internal(BIT_IN_CHUNK(nr),
241 &bitmap[BIT_CHUNK(nr)]);
242 }
243
244 /**
245 * ice_test_and_set_bit - Atomically set a bit and return the old bit value
246 * @nr: the bit to change
247 * @bitmap: the bitmap to change
248 *
249 * Check and set the bit nr in bitmap. Assumes that nr is less than the size of
250 * the bitmap.
251 */
252 static inline bool
ice_test_and_set_bit(uint16_t nr,ice_bitmap_t * bitmap)253 ice_test_and_set_bit(uint16_t nr, ice_bitmap_t *bitmap)
254 {
255 return ice_test_and_set_bit_internal(BIT_IN_CHUNK(nr),
256 &bitmap[BIT_CHUNK(nr)]);
257 }
258
259 /* ice_zero_bitmap - set bits of bitmap to zero.
260 * @bmp: bitmap to set zeros
261 * @size: Size of the bitmaps in bits
262 *
263 * Set all of the bits in a bitmap to zero. Note that this function assumes it
264 * operates on an ice_bitmap_t which was declared using ice_declare_bitmap. It
265 * will zero every bit in the last chunk, even if those bits are beyond the
266 * size.
267 */
ice_zero_bitmap(ice_bitmap_t * bmp,uint16_t size)268 static inline void ice_zero_bitmap(ice_bitmap_t *bmp, uint16_t size)
269 {
270 memset(bmp, 0, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t));
271 }
272
273 /**
274 * ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap
275 * @dst: Destination bitmap that receive the result of the operation
276 * @bmp1: The first bitmap to intersect
277 * @bmp2: The second bitmap to intersect wit the first
278 * @size: Size of the bitmaps in bits
279 *
280 * This function performs a bitwise AND on two "source" bitmaps of the same size
281 * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
282 * size as the "source" bitmaps to avoid buffer overflows. This function returns
283 * a non-zero value if at least one bit location from both "source" bitmaps is
284 * non-zero.
285 */
286 static inline int
ice_and_bitmap(ice_bitmap_t * dst,const ice_bitmap_t * bmp1,const ice_bitmap_t * bmp2,uint16_t size)287 ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
288 const ice_bitmap_t *bmp2, uint16_t size)
289 {
290 ice_bitmap_t res = 0, mask;
291 uint16_t i;
292
293 /* Handle all but the last chunk */
294 for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) {
295 dst[i] = bmp1[i] & bmp2[i];
296 res |= dst[i];
297 }
298
299 /* We want to take care not to modify any bits outside of the bitmap
300 * size, even in the destination bitmap. Thus, we won't directly
301 * assign the last bitmap, but instead use a bitmask to ensure we only
302 * modify bits which are within the size, and leave any bits above the
303 * size value alone.
304 */
305 mask = LAST_CHUNK_MASK(size);
306 dst[i] = (dst[i] & ~mask) | ((bmp1[i] & bmp2[i]) & mask);
307 res |= dst[i] & mask;
308
309 return res != 0;
310 }
311
312 /**
313 * ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap
314 * @dst: Destination bitmap that receive the result of the operation
315 * @bmp1: The first bitmap to intersect
316 * @bmp2: The second bitmap to intersect wit the first
317 * @size: Size of the bitmaps in bits
318 *
319 * This function performs a bitwise OR on two "source" bitmaps of the same size
320 * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
321 * size as the "source" bitmaps to avoid buffer overflows.
322 */
323 static inline void
ice_or_bitmap(ice_bitmap_t * dst,const ice_bitmap_t * bmp1,const ice_bitmap_t * bmp2,uint16_t size)324 ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
325 const ice_bitmap_t *bmp2, uint16_t size)
326 {
327 ice_bitmap_t mask;
328 uint16_t i;
329
330 /* Handle all but last chunk */
331 for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
332 dst[i] = bmp1[i] | bmp2[i];
333
334 /* We want to only OR bits within the size. Furthermore, we also do
335 * not want to modify destination bits which are beyond the specified
336 * size. Use a bitmask to ensure that we only modify the bits that are
337 * within the specified size.
338 */
339 mask = LAST_CHUNK_MASK(size);
340 dst[i] = (dst[i] & ~mask) | ((bmp1[i] | bmp2[i]) & mask);
341 }
342
343 /**
344 * ice_xor_bitmap - bitwise XOR 2 bitmaps and store result in dst bitmap
345 * @dst: Destination bitmap that receive the result of the operation
346 * @bmp1: The first bitmap of XOR operation
347 * @bmp2: The second bitmap to XOR with the first
348 * @size: Size of the bitmaps in bits
349 *
350 * This function performs a bitwise XOR on two "source" bitmaps of the same size
351 * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
352 * size as the "source" bitmaps to avoid buffer overflows.
353 */
354 static inline void
ice_xor_bitmap(ice_bitmap_t * dst,const ice_bitmap_t * bmp1,const ice_bitmap_t * bmp2,uint16_t size)355 ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
356 const ice_bitmap_t *bmp2, uint16_t size)
357 {
358 ice_bitmap_t mask;
359 uint16_t i;
360
361 /* Handle all but last chunk */
362 for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
363 dst[i] = bmp1[i] ^ bmp2[i];
364
365 /* We want to only XOR bits within the size. Furthermore, we also do
366 * not want to modify destination bits which are beyond the specified
367 * size. Use a bitmask to ensure that we only modify the bits that are
368 * within the specified size.
369 */
370 mask = LAST_CHUNK_MASK(size);
371 dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask);
372 }
373
374 /**
375 * ice_andnot_bitmap - bitwise ANDNOT 2 bitmaps and result in dst bitmap
376 * @dst: Destination bitmap that receive the result of the operation
377 * @bmp1: The first bitmap of ANDNOT operation
378 * @bmp2: The second bitmap to ANDNOT operation
379 * @size: Size of the bitmaps in bits
380 *
381 * This function performs a bitwise ANDNOT on two "source" bitmaps of the same
382 * size, and stores the result to "dst" bitmap. The "dst" bitmap must be of the
383 * same size as the "source" bitmaps to avoid buffer overflows.
384 */
385 static inline void
ice_andnot_bitmap(ice_bitmap_t * dst,const ice_bitmap_t * bmp1,const ice_bitmap_t * bmp2,uint16_t size)386 ice_andnot_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
387 const ice_bitmap_t *bmp2, uint16_t size)
388 {
389 ice_bitmap_t mask;
390 uint16_t i;
391
392 /* Handle all but last chunk */
393 for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
394 dst[i] = bmp1[i] & ~bmp2[i];
395
396 /* We want to only clear bits within the size. Furthermore, we also do
397 * not want to modify destination bits which are beyond the specified
398 * size. Use a bitmask to ensure that we only modify the bits that are
399 * within the specified size.
400 */
401 mask = LAST_CHUNK_MASK(size);
402 dst[i] = (dst[i] & ~mask) | ((bmp1[i] & ~bmp2[i]) & mask);
403 }
404
405 /**
406 * ice_find_next_bit - Find the index of the next set bit of a bitmap
407 * @bitmap: the bitmap to scan
408 * @size: the size in bits of the bitmap
409 * @offset: the offset to start at
410 *
411 * Scans the bitmap and returns the index of the first set bit which is equal
412 * to or after the specified offset. Will return size if no bits are set.
413 */
414 static inline uint16_t
ice_find_next_bit(const ice_bitmap_t * bitmap,uint16_t size,uint16_t offset)415 ice_find_next_bit(const ice_bitmap_t *bitmap, uint16_t size, uint16_t offset)
416 {
417 uint16_t i, j;
418
419 if (offset >= size)
420 return size;
421
422 /* Since the starting position may not be directly on a chunk
423 * boundary, we need to be careful to handle the first chunk specially
424 */
425 i = BIT_CHUNK(offset);
426 if (bitmap[i] != 0) {
427 uint16_t off = i * BITS_PER_CHUNK;
428
429 for (j = offset % BITS_PER_CHUNK; j < BITS_PER_CHUNK; j++) {
430 if (ice_is_bit_set(bitmap, off + j))
431 return min(size, (uint16_t)(off + j));
432 }
433 }
434
435 /* Now we handle the remaining chunks, if any */
436 for (i++; i < BITS_TO_CHUNKS(size); i++) {
437 if (bitmap[i] != 0) {
438 uint16_t off = i * BITS_PER_CHUNK;
439
440 for (j = 0; j < BITS_PER_CHUNK; j++) {
441 if (ice_is_bit_set(bitmap, off + j))
442 return min(size, (uint16_t)(off + j));
443 }
444 }
445 }
446 return size;
447 }
448
449 /**
450 * ice_find_first_bit - Find the index of the first set bit of a bitmap
451 * @bitmap: the bitmap to scan
452 * @size: the size in bits of the bitmap
453 *
454 * Scans the bitmap and returns the index of the first set bit. Will return
455 * size if no bits are set.
456 */
ice_find_first_bit(const ice_bitmap_t * bitmap,uint16_t size)457 static inline uint16_t ice_find_first_bit(const ice_bitmap_t *bitmap, uint16_t size)
458 {
459 return ice_find_next_bit(bitmap, size, 0);
460 }
461
462 #define ice_for_each_set_bit(_bitpos, _addr, _maxlen) \
463 for ((_bitpos) = ice_find_first_bit((_addr), (_maxlen)); \
464 (_bitpos) < (_maxlen); \
465 (_bitpos) = ice_find_next_bit((_addr), (_maxlen), (_bitpos) + 1))
466
467 /**
468 * ice_is_any_bit_set - Return true of any bit in the bitmap is set
469 * @bitmap: the bitmap to check
470 * @size: the size of the bitmap
471 *
472 * Equivalent to checking if ice_find_first_bit returns a value less than the
473 * bitmap size.
474 */
ice_is_any_bit_set(ice_bitmap_t * bitmap,uint16_t size)475 static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, uint16_t size)
476 {
477 return ice_find_first_bit(bitmap, size) < size;
478 }
479
480 /**
481 * ice_cp_bitmap - copy bitmaps
482 * @dst: bitmap destination
483 * @src: bitmap to copy from
484 * @size: Size of the bitmaps in bits
485 *
486 * This function copy bitmap from src to dst. Note that this function assumes
487 * it is operating on a bitmap declared using ice_declare_bitmap. It will copy
488 * the entire last chunk even if this contains bits beyond the size.
489 */
ice_cp_bitmap(ice_bitmap_t * dst,ice_bitmap_t * src,uint16_t size)490 static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, uint16_t size)
491 {
492 memcpy(dst, src, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t));
493 }
494
495 /**
496 * ice_bitmap_set - set a number of bits in bitmap from a starting position
497 * @dst: bitmap destination
498 * @pos: first bit position to set
499 * @num_bits: number of bits to set
500 *
501 * This function sets bits in a bitmap from pos to (pos + num_bits) - 1.
502 * Note that this function assumes it is operating on a bitmap declared using
503 * ice_declare_bitmap.
504 */
505 static inline void
ice_bitmap_set(ice_bitmap_t * dst,uint16_t pos,uint16_t num_bits)506 ice_bitmap_set(ice_bitmap_t *dst, uint16_t pos, uint16_t num_bits)
507 {
508 uint16_t i;
509
510 for (i = pos; i < pos + num_bits; i++)
511 ice_set_bit(i, dst);
512 }
513
514 /**
515 * ice_bitmap_hweight - hamming weight of bitmap
516 * @bm: bitmap pointer
517 * @size: size of bitmap (in bits)
518 *
519 * This function determines the number of set bits in a bitmap.
520 * Note that this function assumes it is operating on a bitmap declared using
521 * ice_declare_bitmap.
522 */
523 static inline int
ice_bitmap_hweight(ice_bitmap_t * bm,uint16_t size)524 ice_bitmap_hweight(ice_bitmap_t *bm, uint16_t size)
525 {
526 int count = 0;
527 uint16_t bit = 0;
528
529 while (size > (bit = ice_find_next_bit(bm, size, bit))) {
530 count++;
531 bit++;
532 }
533
534 return count;
535 }
536
537 /**
538 * ice_cmp_bitmap - compares two bitmaps
539 * @bmp1: the bitmap to compare
540 * @bmp2: the bitmap to compare with bmp1
541 * @size: Size of the bitmaps in bits
542 *
543 * This function compares two bitmaps, and returns result as true or false.
544 */
545 static inline bool
ice_cmp_bitmap(ice_bitmap_t * bmp1,ice_bitmap_t * bmp2,uint16_t size)546 ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, uint16_t size)
547 {
548 ice_bitmap_t mask;
549 uint16_t i;
550
551 /* Handle all but last chunk */
552 for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
553 if (bmp1[i] != bmp2[i])
554 return false;
555
556 /* We want to only compare bits within the size */
557 mask = LAST_CHUNK_MASK(size);
558 if ((bmp1[i] & mask) != (bmp2[i] & mask))
559 return false;
560
561 return true;
562 }
563
564 /**
565 * ice_bitmap_from_array32 - copies u32 array source into bitmap destination
566 * @dst: the destination bitmap
567 * @src: the source u32 array
568 * @size: size of the bitmap (in bits)
569 *
570 * This function copies the src bitmap stored in an u32 array into the dst
571 * bitmap stored as an ice_bitmap_t.
572 */
573 static inline void
ice_bitmap_from_array32(ice_bitmap_t * dst,uint32_t * src,uint16_t size)574 ice_bitmap_from_array32(ice_bitmap_t *dst, uint32_t *src, uint16_t size)
575 {
576 uint32_t remaining_bits, i;
577
578 #define BITS_PER_U32 (sizeof(uint32_t) * 8)
579 /* clear bitmap so we only have to set when iterating */
580 ice_zero_bitmap(dst, size);
581
582 for (i = 0; i < (uint32_t)(size / BITS_PER_U32); i++) {
583 uint32_t bit_offset = i * BITS_PER_U32;
584 uint32_t entry = src[i];
585 uint32_t j;
586
587 for (j = 0; j < BITS_PER_U32; j++) {
588 if (entry & BIT(j))
589 ice_set_bit((uint16_t)(j + bit_offset), dst);
590 }
591 }
592
593 /* still need to check the leftover bits (i.e. if size isn't evenly
594 * divisible by BITS_PER_U32
595 **/
596 remaining_bits = size % BITS_PER_U32;
597 if (remaining_bits) {
598 uint32_t bit_offset = i * BITS_PER_U32;
599 uint32_t entry = src[i];
600 uint32_t j;
601
602 for (j = 0; j < remaining_bits; j++) {
603 if (entry & BIT(j))
604 ice_set_bit((uint16_t)(j + bit_offset), dst);
605 }
606 }
607 }
608
609 #undef BIT_CHUNK
610 #undef BIT_IN_CHUNK
611 #undef LAST_CHUNK_BITS
612 #undef LAST_CHUNK_MASK
613
614 #endif /* _ICE_BITOPS_H_ */
615
616 /*
617 * @struct ice_dma_mem
618 * @brief DMA memory allocation
619 *
620 * Contains DMA allocation bits, used to simplify DMA allocations.
621 */
622 struct ice_dma_mem {
623 void *va;
624 uint64_t pa;
625 bus_size_t size;
626
627 bus_dma_tag_t tag;
628 bus_dmamap_t map;
629 bus_dma_segment_t seg;
630 };
631 #define ICE_DMA_MAP(_m) ((_m)->map)
632 #define ICE_DMA_DVA(_m) ((_m)->map->dm_segs[0].ds_addr)
633 #define ICE_DMA_KVA(_m) ((void *)(_m)->va)
634 #define ICE_DMA_LEN(_m) ((_m)->size)
635
636 #define ICE_STR_BUF_LEN 32
637
638 /**
639 * @struct ice_lock
640 * @brief simplified lock API
641 *
642 * Contains a simple lock implementation used to lock various resources.
643 */
644 struct ice_lock {
645 struct mutex mutex;
646 char name[ICE_STR_BUF_LEN];
647 };
648
649 extern uint16_t ice_lock_count;
650
651 /*
652 * ice_init_lock - Initialize a lock for use
653 * @lock: the lock memory to initialize
654 *
655 * OS compatibility layer to provide a simple locking mechanism. We use
656 * a mutex for this purpose.
657 */
658 static inline void
ice_init_lock(struct ice_lock * lock)659 ice_init_lock(struct ice_lock *lock)
660 {
661 /*
662 * Make each lock unique by incrementing a counter each time this
663 * function is called. Use of a uint16_t allows 65535 possible locks before
664 * we'd hit a duplicate.
665 */
666 memset(lock->name, 0, sizeof(lock->name));
667 snprintf(lock->name, ICE_STR_BUF_LEN, "ice_lock_%u", ice_lock_count++);
668 mtx_init_flags(&lock->mutex, IPL_NET, lock->name, 0);
669 }
670
671 /* FW update timeout definitions are in milliseconds */
672 #define ICE_NVM_TIMEOUT 180000
673 #define ICE_CHANGE_LOCK_TIMEOUT 1000
674 #define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
675
676 #define ICE_PF_RESET_WAIT_COUNT 500
677
678 /* Error Codes */
679 enum ice_status {
680 ICE_SUCCESS = 0,
681
682 /* Generic codes : Range -1..-49 */
683 ICE_ERR_PARAM = -1,
684 ICE_ERR_NOT_IMPL = -2,
685 ICE_ERR_NOT_READY = -3,
686 ICE_ERR_NOT_SUPPORTED = -4,
687 ICE_ERR_BAD_PTR = -5,
688 ICE_ERR_INVAL_SIZE = -6,
689 ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
690 ICE_ERR_RESET_FAILED = -9,
691 ICE_ERR_FW_API_VER = -10,
692 ICE_ERR_NO_MEMORY = -11,
693 ICE_ERR_CFG = -12,
694 ICE_ERR_OUT_OF_RANGE = -13,
695 ICE_ERR_ALREADY_EXISTS = -14,
696 ICE_ERR_DOES_NOT_EXIST = -15,
697 ICE_ERR_IN_USE = -16,
698 ICE_ERR_MAX_LIMIT = -17,
699 ICE_ERR_RESET_ONGOING = -18,
700 ICE_ERR_HW_TABLE = -19,
701 ICE_ERR_FW_DDP_MISMATCH = -20,
702
703 /* NVM specific error codes: Range -50..-59 */
704 ICE_ERR_NVM = -50,
705 ICE_ERR_NVM_CHECKSUM = -51,
706 ICE_ERR_BUF_TOO_SHORT = -52,
707 ICE_ERR_NVM_BLANK_MODE = -53,
708
709 /* ARQ/ASQ specific error codes. Range -100..-109 */
710 ICE_ERR_AQ_ERROR = -100,
711 ICE_ERR_AQ_TIMEOUT = -101,
712 ICE_ERR_AQ_FULL = -102,
713 ICE_ERR_AQ_NO_WORK = -103,
714 ICE_ERR_AQ_EMPTY = -104,
715 ICE_ERR_AQ_FW_CRITICAL = -105,
716 };
717
718 #define ICE_SQ_SEND_DELAY_TIME_MS 10
719 #define ICE_SQ_SEND_MAX_EXECUTE 3
720
721 enum ice_fw_modes {
722 ICE_FW_MODE_NORMAL,
723 ICE_FW_MODE_DBG,
724 ICE_FW_MODE_REC,
725 ICE_FW_MODE_ROLLBACK
726 };
727
728 #define ICE_AQ_LEN 1023
729 #define ICE_MBXQ_LEN 512
730 #define ICE_SBQ_LEN 512
731
732 #define ICE_CTRLQ_WORK_LIMIT 256
733
734 #define ICE_DFLT_TRAFFIC_CLASS BIT(0)
735
736 /* wait up to 50 microseconds for queue state change */
737 #define ICE_Q_WAIT_RETRY_LIMIT 5
738
739 /* Maximum buffer lengths for all control queue types */
740 #define ICE_AQ_MAX_BUF_LEN 4096
741 #define ICE_MBXQ_MAX_BUF_LEN 4096
742
743 #define ICE_CTL_Q_DESC(R, i) \
744 (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
745
746 #define ICE_CTL_Q_DESC_UNUSED(R) \
747 ((uint16_t)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
748 (R)->next_to_clean - (R)->next_to_use - 1))
749
750 /* Defines that help manage the driver vs FW API checks.
751 * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
752 */
753 #define EXP_FW_API_VER_BRANCH 0x00
754 #define EXP_FW_API_VER_MAJOR 0x01
755 #define EXP_FW_API_VER_MINOR 0x05
756
757 /* Alignment for queues */
758 #define DBA_ALIGN 128
759
760 /* Maximum TSO size is (256K)-1 */
761 #define ICE_TSO_SIZE ((256*1024) - 1)
762
763 /* Minimum size for TSO MSS */
764 #define ICE_MIN_TSO_MSS 64
765
766 #define ICE_MAX_TX_SEGS 8
767 #define ICE_MAX_TSO_SEGS 128
768
769 #define ICE_MAX_DMA_SEG_SIZE ((16*1024) - 1)
770
771 #define ICE_MAX_RX_SEGS 5
772
773 #define ICE_MAX_TSO_HDR_SEGS 3
774
775 #define ICE_MSIX_BAR 3
776
777 #define ICE_DEFAULT_DESC_COUNT 1024
778 #define ICE_MAX_DESC_COUNT 8160
779 #define ICE_MIN_DESC_COUNT 64
780 #define ICE_DESC_COUNT_INCR 32
781
782 /* Maximum size of a single frame (for Tx and Rx) */
783 #define ICE_MAX_FRAME_SIZE ICE_AQ_SET_MAC_FRAME_SIZE_MAX
784
785 /* Maximum MTU size */
786 #define ICE_MAX_MTU (ICE_MAX_FRAME_SIZE - \
787 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
788
789 #define ICE_QIDX_INVALID 0xffff
790
791 /*
792 * Hardware requires that TSO packets have an segment size of at least 64
793 * bytes. To avoid sending bad frames to the hardware, the driver forces the
794 * MSS for all TSO packets to have a segment size of at least 64 bytes.
795 *
796 * However, if the MTU is reduced below a certain size, then the resulting
797 * larger MSS can result in transmitting segmented frames with a packet size
798 * larger than the MTU.
799 *
800 * Avoid this by preventing the MTU from being lowered below this limit.
801 * Alternative solutions require changing the TCP stack to disable offloading
802 * the segmentation when the requested segment size goes below 64 bytes.
803 */
804 #define ICE_MIN_MTU 112
805
806 /*
807 * The default number of queues reserved for a VF is 4, according to the
808 * AVF Base Mode specification.
809 */
810 #define ICE_DEFAULT_VF_QUEUES 4
811
812 /*
813 * An invalid VSI number to indicate that mirroring should be disabled.
814 */
815 #define ICE_INVALID_MIRROR_VSI ((u16)-1)
816 /*
817 * The maximum number of RX queues allowed per TC in a VSI.
818 */
819 #define ICE_MAX_RXQS_PER_TC 256
820
821 /*
822 * There are three settings that can be updated independently or
823 * altogether: Link speed, FEC, and Flow Control. These macros allow
824 * the caller to specify which setting(s) to update.
825 */
826 #define ICE_APPLY_LS BIT(0)
827 #define ICE_APPLY_FEC BIT(1)
828 #define ICE_APPLY_FC BIT(2)
829 #define ICE_APPLY_LS_FEC (ICE_APPLY_LS | ICE_APPLY_FEC)
830 #define ICE_APPLY_LS_FC (ICE_APPLY_LS | ICE_APPLY_FC)
831 #define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC)
832 #define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC)
833
834 /**
835 * @enum ice_dyn_idx_t
836 * @brief Dynamic Control ITR indexes
837 *
838 * This enum matches hardware bits and is meant to be used by DYN_CTLN
839 * registers and QINT registers or more generally anywhere in the manual
840 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
841 * register but instead is a special value meaning "don't update" ITR0/1/2.
842 */
843 enum ice_dyn_idx_t {
844 ICE_IDX_ITR0 = 0,
845 ICE_IDX_ITR1 = 1,
846 ICE_IDX_ITR2 = 2,
847 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
848 };
849
850 /* By convenction ITR0 is used for RX, and ITR1 is used for TX */
851 #define ICE_RX_ITR ICE_IDX_ITR0
852 #define ICE_TX_ITR ICE_IDX_ITR1
853
854 #define ICE_ITR_MAX 8160
855
856 /* Define the default Tx and Rx ITR as 50us (translates to ~20k int/sec max) */
857 #define ICE_DFLT_TX_ITR 50
858 #define ICE_DFLT_RX_ITR 50
859
860 /**
861 * @enum ice_rx_dtype
862 * @brief DTYPE header split options
863 *
864 * This enum matches the Rx context bits to define whether header split is
865 * enabled or not.
866 */
867 enum ice_rx_dtype {
868 ICE_RX_DTYPE_NO_SPLIT = 0,
869 ICE_RX_DTYPE_HEADER_SPLIT = 1,
870 ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
871 };
872
873 #if 0
874 /* List of hardware offloads we support */
875 #define ICE_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP_SCTP | \
876 CSUM_IP6_TCP| CSUM_IP6_UDP | CSUM_IP6_SCTP | \
877 CSUM_IP_TSO | CSUM_IP6_TSO)
878
879 /* Macros to decide what kind of hardware offload to enable */
880 #define ICE_CSUM_TCP (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP)
881 #define ICE_CSUM_UDP (CSUM_IP_UDP|CSUM_IP6_UDP)
882 #define ICE_CSUM_SCTP (CSUM_IP_SCTP|CSUM_IP6_SCTP)
883 #define ICE_CSUM_IP (CSUM_IP|CSUM_IP_TSO)
884
885 /* List of known RX CSUM offload flags */
886 #define ICE_RX_CSUM_FLAGS (CSUM_L3_CALC | CSUM_L3_VALID | CSUM_L4_CALC | \
887 CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \
888 CSUM_COALESCED)
889 #endif
890
891 /* List of interface capabilities supported by ice hardware */
892 #define ICE_FULL_CAPS \
893 (IFCAP_TSOv4 | IFCAP_TSOv6 | \
894 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_TCPv6| \
895 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWOFFLOAD | \
896 IFCAP_VLAN_MTU | IFCAP_LRO)
897
898 /* Safe mode disables support for hardware checksums and TSO */
899 #define ICE_SAFE_CAPS \
900 (ICE_FULL_CAPS & ~(IFCAP_CSUM_TCPv4 | IFCAP_CSUM_TCPv6 | \
901 IFCAP_TSOv4 | IFCAP_TSOv6 | IFCAP_VLAN_HWOFFLOAD))
902
903 #define ICE_CAPS(sc) \
904 (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE) ? ICE_SAFE_CAPS : ICE_FULL_CAPS)
905
906
907 /* Different control queue types: These are mainly for SW consumption. */
908 enum ice_ctl_q {
909 ICE_CTL_Q_UNKNOWN = 0,
910 ICE_CTL_Q_ADMIN,
911 ICE_CTL_Q_MAILBOX,
912 };
913
914 /* Control Queue timeout settings - max delay 1s */
915 #define ICE_CTL_Q_SQ_CMD_TIMEOUT 100000 /* Count 100000 times */
916 #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
917 #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
918
919 struct ice_ctl_q_ring {
920 void *dma_head; /* Virtual address to DMA head */
921 struct ice_dma_mem desc_buf; /* descriptor ring memory */
922
923 union {
924 struct ice_dma_mem *sq_bi;
925 struct ice_dma_mem *rq_bi;
926 } r;
927
928 uint16_t count; /* Number of descriptors */
929
930 /* used for interrupt processing */
931 uint16_t next_to_use;
932 uint16_t next_to_clean;
933
934 /* used for queue tracking */
935 uint32_t head;
936 uint32_t tail;
937 uint32_t len;
938 uint32_t bah;
939 uint32_t bal;
940 uint32_t len_mask;
941 uint32_t len_ena_mask;
942 uint32_t len_crit_mask;
943 uint32_t head_mask;
944 };
945
946 /* sq transaction details */
947 struct ice_sq_cd {
948 struct ice_aq_desc *wb_desc;
949 };
950
951 /* rq event information */
952 struct ice_rq_event_info {
953 struct ice_aq_desc desc;
954 uint16_t msg_len;
955 uint16_t buf_len;
956 uint8_t *msg_buf;
957 };
958
959 /* Control Queue information */
960 struct ice_ctl_q_info {
961 enum ice_ctl_q qtype;
962 struct ice_ctl_q_ring rq; /* receive queue */
963 struct ice_ctl_q_ring sq; /* send queue */
964 uint32_t sq_cmd_timeout; /* send queue cmd write back timeout */
965
966 uint16_t num_rq_entries; /* receive queue depth */
967 uint16_t num_sq_entries; /* send queue depth */
968 uint16_t rq_buf_size; /* receive queue buffer size */
969 uint16_t sq_buf_size; /* send queue buffer size */
970 enum ice_aq_err sq_last_status; /* last status on send queue */
971 struct ice_lock sq_lock; /* Send queue lock */
972 struct ice_lock rq_lock; /* Receive queue lock */
973 };
974
975 enum ice_mac_type {
976 ICE_MAC_UNKNOWN = 0,
977 ICE_MAC_VF,
978 ICE_MAC_E810,
979 ICE_MAC_GENERIC,
980 ICE_MAC_GENERIC_3K,
981 ICE_MAC_GENERIC_3K_E825,
982 };
983
984 /*
985 * Reset types used to determine which kind of reset was requested. These
986 * defines match what the RESET_TYPE field of the GLGEN_RSTAT register.
987 * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register
988 * because its reset source is different than the other types listed.
989 */
990 enum ice_reset_req {
991 ICE_RESET_POR = 0,
992 ICE_RESET_INVAL = 0,
993 ICE_RESET_CORER = 1,
994 ICE_RESET_GLOBR = 2,
995 ICE_RESET_EMPR = 3,
996 ICE_RESET_PFR = 4,
997 };
998
999 /* Common HW capabilities for SW use */
1000 struct ice_hw_common_caps {
1001 /* Write CSR protection */
1002 uint64_t wr_csr_prot;
1003 uint32_t switching_mode;
1004 /* switching mode supported - EVB switching (including cloud) */
1005 #define ICE_NVM_IMAGE_TYPE_EVB 0x0
1006
1007 /* Manageablity mode & supported protocols over MCTP */
1008 uint32_t mgmt_mode;
1009 #define ICE_MGMT_MODE_PASS_THRU_MODE_M 0xF
1010 #define ICE_MGMT_MODE_CTL_INTERFACE_M 0xF0
1011 #define ICE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00
1012
1013 uint32_t mgmt_protocols_mctp;
1014 #define ICE_MGMT_MODE_PROTO_RSVD BIT(0)
1015 #define ICE_MGMT_MODE_PROTO_PLDM BIT(1)
1016 #define ICE_MGMT_MODE_PROTO_OEM BIT(2)
1017 #define ICE_MGMT_MODE_PROTO_NC_SI BIT(3)
1018
1019 uint32_t os2bmc;
1020 uint32_t valid_functions;
1021 /* DCB capabilities */
1022 uint32_t active_tc_bitmap;
1023 uint32_t maxtc;
1024
1025 /* RSS related capabilities */
1026 uint32_t rss_table_size; /* 512 for PFs and 64 for VFs */
1027 uint32_t rss_table_entry_width; /* RSS Entry width in bits */
1028
1029 /* Tx/Rx queues */
1030 uint32_t num_rxq; /* Number/Total Rx queues */
1031 uint32_t rxq_first_id; /* First queue ID for Rx queues */
1032 uint32_t num_txq; /* Number/Total Tx queues */
1033 uint32_t txq_first_id; /* First queue ID for Tx queues */
1034
1035 /* MSI-X vectors */
1036 uint32_t num_msix_vectors;
1037 uint32_t msix_vector_first_id;
1038
1039 /* Max MTU for function or device */
1040 uint32_t max_mtu;
1041
1042 /* WOL related */
1043 uint32_t num_wol_proxy_fltr;
1044 uint32_t wol_proxy_vsi_seid;
1045
1046 /* LED/SDP pin count */
1047 uint32_t led_pin_num;
1048 uint32_t sdp_pin_num;
1049
1050 /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
1051 #define ICE_MAX_SUPPORTED_GPIO_LED 12
1052 #define ICE_MAX_SUPPORTED_GPIO_SDP 8
1053 uint8_t led[ICE_MAX_SUPPORTED_GPIO_LED];
1054 uint8_t sdp[ICE_MAX_SUPPORTED_GPIO_SDP];
1055
1056 /* SR-IOV virtualization */
1057 uint8_t sr_iov_1_1; /* SR-IOV enabled */
1058
1059 /* VMDQ */
1060 uint8_t vmdq; /* VMDQ supported */
1061
1062 /* EVB capabilities */
1063 uint8_t evb_802_1_qbg; /* Edge Virtual Bridging */
1064 uint8_t evb_802_1_qbh; /* Bridge Port Extension */
1065
1066 uint8_t dcb;
1067 uint8_t iscsi;
1068 uint8_t mgmt_cem;
1069 uint8_t iwarp;
1070 uint8_t roce_lag;
1071
1072 /* WoL and APM support */
1073 #define ICE_WOL_SUPPORT_M BIT(0)
1074 #define ICE_ACPI_PROG_MTHD_M BIT(1)
1075 #define ICE_PROXY_SUPPORT_M BIT(2)
1076 uint8_t apm_wol_support;
1077 uint8_t acpi_prog_mthd;
1078 uint8_t proxy_support;
1079 bool sec_rev_disabled;
1080 bool update_disabled;
1081 bool nvm_unified_update;
1082 bool netlist_auth;
1083 #define ICE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
1084 #define ICE_NVM_MGMT_UPDATE_DISABLED BIT(1)
1085 #define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
1086 #define ICE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
1087 /* PCIe reset avoidance */
1088 bool pcie_reset_avoidance; /* false: not supported, true: supported */
1089 /* Post update reset restriction */
1090 bool reset_restrict_support; /* false: not supported, true: supported */
1091
1092 /* External topology device images within the NVM */
1093 #define ICE_EXT_TOPO_DEV_IMG_COUNT 4
1094 uint32_t ext_topo_dev_img_ver_high[ICE_EXT_TOPO_DEV_IMG_COUNT];
1095 uint32_t ext_topo_dev_img_ver_low[ICE_EXT_TOPO_DEV_IMG_COUNT];
1096 uint8_t ext_topo_dev_img_part_num[ICE_EXT_TOPO_DEV_IMG_COUNT];
1097 #define ICE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
1098 #define ICE_EXT_TOPO_DEV_IMG_PART_NUM_M \
1099 MAKEMASK(0xFF, ICE_EXT_TOPO_DEV_IMG_PART_NUM_S)
1100 bool ext_topo_dev_img_load_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
1101 #define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
1102 bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
1103 #define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
1104 bool ext_topo_dev_img_ver_schema[ICE_EXT_TOPO_DEV_IMG_COUNT];
1105 #define ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA BIT(2)
1106 bool tx_sched_topo_comp_mode_en;
1107 bool dyn_flattening_en;
1108 /* Support for OROM update in Recovery Mode */
1109 bool orom_recovery_update;
1110 };
1111
1112 #define ICE_NAC_TOPO_PRIMARY_M BIT(0)
1113 #define ICE_NAC_TOPO_DUAL_M BIT(1)
1114 #define ICE_NAC_TOPO_ID_M MAKEMASK(0xf, 0)
1115
1116 enum ice_aq_res_ids {
1117 ICE_NVM_RES_ID = 1,
1118 ICE_SPD_RES_ID,
1119 ICE_CHANGE_LOCK_RES_ID,
1120 ICE_GLOBAL_CFG_LOCK_RES_ID
1121 };
1122
1123 /* FW update timeout definitions are in milliseconds */
1124 #define ICE_NVM_TIMEOUT 180000
1125 #define ICE_CHANGE_LOCK_TIMEOUT 1000
1126 #define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
1127
1128 struct ice_link_default_override_tlv {
1129 uint8_t options;
1130 #define ICE_LINK_OVERRIDE_OPT_M 0x3F
1131 #define ICE_LINK_OVERRIDE_STRICT_MODE BIT(0)
1132 #define ICE_LINK_OVERRIDE_EPCT_DIS BIT(1)
1133 #define ICE_LINK_OVERRIDE_PORT_DIS BIT(2)
1134 #define ICE_LINK_OVERRIDE_EN BIT(3)
1135 #define ICE_LINK_OVERRIDE_AUTO_LINK_DIS BIT(4)
1136 #define ICE_LINK_OVERRIDE_EEE_EN BIT(5)
1137 uint8_t phy_config;
1138 #define ICE_LINK_OVERRIDE_PHY_CFG_S 8
1139 #define ICE_LINK_OVERRIDE_PHY_CFG_M (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S)
1140 #define ICE_LINK_OVERRIDE_PAUSE_M 0x3
1141 #define ICE_LINK_OVERRIDE_LESM_EN BIT(6)
1142 #define ICE_LINK_OVERRIDE_AUTO_FEC_EN BIT(7)
1143 uint8_t fec_options;
1144 #define ICE_LINK_OVERRIDE_FEC_OPT_M 0xFF
1145 uint8_t rsvd1;
1146 uint64_t phy_type_low;
1147 uint64_t phy_type_high;
1148 };
1149
1150 #define ICE_NVM_VER_LEN 32
1151
1152 #define ICE_NVM_VER_LEN 32
1153
1154 #define ICE_MAX_TRAFFIC_CLASS 8
1155
1156 /* Max number of port to queue branches w.r.t topology */
1157 #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
1158
1159 #define ice_for_each_traffic_class(_i) \
1160 for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
1161
1162 #define ICE_INVAL_TEID 0xFFFFFFFF
1163 #define ICE_DFLT_AGG_ID 0
1164
1165 struct ice_sched_node {
1166 struct ice_sched_node *parent;
1167 struct ice_sched_node *sibling; /* next sibling in the same layer */
1168 struct ice_sched_node **children;
1169 struct ice_aqc_txsched_elem_data info;
1170 uint32_t agg_id; /* aggregator group ID */
1171 uint16_t vsi_handle;
1172 uint8_t in_use; /* suspended or in use */
1173 uint8_t tx_sched_layer; /* Logical Layer (1-9) */
1174 uint8_t num_children;
1175 uint8_t tc_num;
1176 uint8_t owner;
1177 #define ICE_SCHED_NODE_OWNER_LAN 0
1178 #define ICE_SCHED_NODE_OWNER_AE 1
1179 #define ICE_SCHED_NODE_OWNER_RDMA 2
1180 };
1181
1182 /* Access Macros for Tx Sched Elements data */
1183 #define ICE_TXSCHED_GET_NODE_TEID(x) le32toh((x)->info.node_teid)
1184 #define ICE_TXSCHED_GET_PARENT_TEID(x) le32toh((x)->info.parent_teid)
1185 #define ICE_TXSCHED_GET_CIR_RL_ID(x) \
1186 le16toh((x)->info.cir_bw.bw_profile_idx)
1187 #define ICE_TXSCHED_GET_EIR_RL_ID(x) \
1188 le16toh((x)->info.eir_bw.bw_profile_idx)
1189 #define ICE_TXSCHED_GET_SRL_ID(x) le16toh((x)->info.srl_id)
1190 #define ICE_TXSCHED_GET_CIR_BWALLOC(x) \
1191 le16toh((x)->info.cir_bw.bw_alloc)
1192 #define ICE_TXSCHED_GET_EIR_BWALLOC(x) \
1193 le16toh((x)->info.eir_bw.bw_alloc)
1194
1195 /* Rate limit types */
1196 enum ice_rl_type {
1197 ICE_UNKNOWN_BW = 0,
1198 ICE_MIN_BW, /* for CIR profile */
1199 ICE_MAX_BW, /* for EIR profile */
1200 ICE_SHARED_BW /* for shared profile */
1201 };
1202
1203 #define ICE_SCHED_MIN_BW 500 /* in Kbps */
1204 #define ICE_SCHED_MAX_BW 100000000 /* in Kbps */
1205 #define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
1206 #define ICE_SCHED_NO_PRIORITY 0
1207 #define ICE_SCHED_NO_BW_WT 0
1208 #define ICE_SCHED_DFLT_RL_PROF_ID 0
1209 #define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
1210 #define ICE_SCHED_DFLT_BW_WT 4
1211 #define ICE_SCHED_INVAL_PROF_ID 0xFFFF
1212 #define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
1213
1214 struct ice_driver_ver {
1215 uint8_t major_ver;
1216 uint8_t minor_ver;
1217 uint8_t build_ver;
1218 uint8_t subbuild_ver;
1219 uint8_t driver_string[32];
1220 };
1221
1222 enum ice_fc_mode {
1223 ICE_FC_NONE = 0,
1224 ICE_FC_RX_PAUSE,
1225 ICE_FC_TX_PAUSE,
1226 ICE_FC_FULL,
1227 ICE_FC_AUTO,
1228 ICE_FC_PFC,
1229 ICE_FC_DFLT
1230 };
1231
1232 enum ice_fec_mode {
1233 ICE_FEC_NONE = 0,
1234 ICE_FEC_RS,
1235 ICE_FEC_BASER,
1236 ICE_FEC_AUTO,
1237 ICE_FEC_DIS_AUTO
1238 };
1239
1240 /* Flow control (FC) parameters */
1241 struct ice_fc_info {
1242 enum ice_fc_mode current_mode; /* FC mode in effect */
1243 enum ice_fc_mode req_mode; /* FC mode requested by caller */
1244 };
1245
1246 /* Option ROM version information */
1247 struct ice_orom_info {
1248 uint8_t major; /* Major version of OROM */
1249 uint8_t patch; /* Patch version of OROM */
1250 uint16_t build; /* Build version of OROM */
1251 uint32_t srev; /* Security revision */
1252 };
1253
1254 /* NVM version information */
1255 struct ice_nvm_info {
1256 uint32_t eetrack;
1257 uint32_t srev;
1258 uint8_t major;
1259 uint8_t minor;
1260 };
1261
1262 /* Minimum Security Revision information */
1263 struct ice_minsrev_info {
1264 uint32_t nvm;
1265 uint32_t orom;
1266 uint8_t nvm_valid : 1;
1267 uint8_t orom_valid : 1;
1268 };
1269
1270 /* netlist version information */
1271 struct ice_netlist_info {
1272 uint32_t major; /* major high/low */
1273 uint32_t minor; /* minor high/low */
1274 uint32_t type; /* type high/low */
1275 uint32_t rev; /* revision high/low */
1276 uint32_t hash; /* SHA-1 hash word */
1277 uint16_t cust_ver; /* customer version */
1278 };
1279
1280 /* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
1281 * of the flash image.
1282 */
1283 enum ice_flash_bank {
1284 ICE_INVALID_FLASH_BANK,
1285 ICE_1ST_FLASH_BANK,
1286 ICE_2ND_FLASH_BANK,
1287 };
1288
1289 /* Enumeration of which flash bank is desired to read from, either the active
1290 * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
1291 * code which just wants to read the active or inactive flash bank.
1292 */
1293 enum ice_bank_select {
1294 ICE_ACTIVE_FLASH_BANK,
1295 ICE_INACTIVE_FLASH_BANK,
1296 };
1297
1298 /* information for accessing NVM, OROM, and Netlist flash banks */
1299 struct ice_bank_info {
1300 uint32_t nvm_ptr; /* Pointer to 1st NVM bank */
1301 uint32_t nvm_size; /* Size of NVM bank */
1302 uint32_t orom_ptr; /* Pointer to 1st OROM bank */
1303 uint32_t orom_size; /* Size of OROM bank */
1304 uint32_t netlist_ptr; /* Pointer to 1st Netlist bank */
1305 uint32_t netlist_size; /* Size of Netlist bank */
1306 enum ice_flash_bank nvm_bank; /* Active NVM bank */
1307 enum ice_flash_bank orom_bank; /* Active OROM bank */
1308 enum ice_flash_bank netlist_bank; /* Active Netlist bank */
1309 };
1310
1311 /* Flash Chip Information */
1312 struct ice_flash_info {
1313 struct ice_orom_info orom; /* Option ROM version info */
1314 struct ice_nvm_info nvm; /* NVM version information */
1315 struct ice_netlist_info netlist;/* Netlist version info */
1316 struct ice_bank_info banks; /* Flash Bank information */
1317 uint16_t sr_words; /* Shadow RAM size in words */
1318 uint32_t flash_size; /* Size of available flash in bytes */
1319 uint8_t blank_nvm_mode; /* is NVM empty (no FW present) */
1320 };
1321
1322 /* Checksum and Shadow RAM pointers */
1323 #define ICE_SR_NVM_CTRL_WORD 0x00
1324 #define ICE_SR_PHY_ANALOG_PTR 0x04
1325 #define ICE_SR_OPTION_ROM_PTR 0x05
1326 #define ICE_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
1327 #define ICE_SR_AUTO_GENERATED_POINTERS_PTR 0x07
1328 #define ICE_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
1329 #define ICE_SR_EMP_GLOBAL_MODULE_PTR 0x09
1330 #define ICE_SR_EMP_IMAGE_PTR 0x0B
1331 #define ICE_SR_PE_IMAGE_PTR 0x0C
1332 #define ICE_SR_CSR_PROTECTED_LIST_PTR 0x0D
1333 #define ICE_SR_MNG_CFG_PTR 0x0E
1334 #define ICE_SR_EMP_MODULE_PTR 0x0F
1335 #define ICE_SR_PBA_BLOCK_PTR 0x16
1336 #define ICE_SR_BOOT_CFG_PTR 0x132
1337 #define ICE_SR_NVM_WOL_CFG 0x19
1338 #define ICE_NVM_OROM_VER_OFF 0x02
1339 #define ICE_SR_NVM_DEV_STARTER_VER 0x18
1340 #define ICE_SR_ALTERNATE_SAN_MAC_ADDR_PTR 0x27
1341 #define ICE_SR_PERMANENT_SAN_MAC_ADDR_PTR 0x28
1342 #define ICE_SR_NVM_MAP_VER 0x29
1343 #define ICE_SR_NVM_IMAGE_VER 0x2A
1344 #define ICE_SR_NVM_STRUCTURE_VER 0x2B
1345 #define ICE_SR_NVM_EETRACK_LO 0x2D
1346 #define ICE_SR_NVM_EETRACK_HI 0x2E
1347 #define ICE_NVM_VER_LO_SHIFT 0
1348 #define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT)
1349 #define ICE_NVM_VER_HI_SHIFT 12
1350 #define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT)
1351 #define ICE_OEM_EETRACK_ID 0xffffffff
1352 #define ICE_OROM_VER_PATCH_SHIFT 0
1353 #define ICE_OROM_VER_PATCH_MASK (0xff << ICE_OROM_VER_PATCH_SHIFT)
1354 #define ICE_OROM_VER_BUILD_SHIFT 8
1355 #define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
1356 #define ICE_OROM_VER_SHIFT 24
1357 #define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
1358 #define ICE_SR_VPD_PTR 0x2F
1359 #define ICE_SR_PXE_SETUP_PTR 0x30
1360 #define ICE_SR_PXE_CFG_CUST_OPTIONS_PTR 0x31
1361 #define ICE_SR_NVM_ORIGINAL_EETRACK_LO 0x34
1362 #define ICE_SR_NVM_ORIGINAL_EETRACK_HI 0x35
1363 #define ICE_SR_VLAN_CFG_PTR 0x37
1364 #define ICE_SR_POR_REGS_AUTO_LOAD_PTR 0x38
1365 #define ICE_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
1366 #define ICE_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
1367 #define ICE_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
1368 #define ICE_SR_PHY_CFG_SCRIPT_PTR 0x3D
1369 #define ICE_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
1370 #define ICE_SR_SW_CHECKSUM_WORD 0x3F
1371 #define ICE_SR_PFA_PTR 0x40
1372 #define ICE_SR_1ST_SCRATCH_PAD_PTR 0x41
1373 #define ICE_SR_1ST_NVM_BANK_PTR 0x42
1374 #define ICE_SR_NVM_BANK_SIZE 0x43
1375 #define ICE_SR_1ST_OROM_BANK_PTR 0x44
1376 #define ICE_SR_OROM_BANK_SIZE 0x45
1377 #define ICE_SR_NETLIST_BANK_PTR 0x46
1378 #define ICE_SR_NETLIST_BANK_SIZE 0x47
1379 #define ICE_SR_EMP_SR_SETTINGS_PTR 0x48
1380 #define ICE_SR_CONFIGURATION_METADATA_PTR 0x4D
1381 #define ICE_SR_IMMEDIATE_VALUES_PTR 0x4E
1382 #define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134
1383 #define ICE_SR_POR_REGISTERS_AUTOLOAD_PTR 0x118
1384
1385 /* CSS Header words */
1386 #define ICE_NVM_CSS_HDR_LEN_L 0x02
1387 #define ICE_NVM_CSS_HDR_LEN_H 0x03
1388 #define ICE_NVM_CSS_SREV_L 0x14
1389 #define ICE_NVM_CSS_SREV_H 0x15
1390
1391 /* Length of Authentication header section in words */
1392 #define ICE_NVM_AUTH_HEADER_LEN 0x08
1393
1394 /* The Link Topology Netlist section is stored as a series of words. It is
1395 * stored in the NVM as a TLV, with the first two words containing the type
1396 * and length.
1397 */
1398 #define ICE_NETLIST_LINK_TOPO_MOD_ID 0x011B
1399 #define ICE_NETLIST_TYPE_OFFSET 0x0000
1400 #define ICE_NETLIST_LEN_OFFSET 0x0001
1401
1402 /* The Link Topology section follows the TLV header. When reading the netlist
1403 * using ice_read_netlist_module, we need to account for the 2-word TLV
1404 * header.
1405 */
1406 #define ICE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
1407
1408 #define ICE_LINK_TOPO_MODULE_LEN ICE_NETLIST_LINK_TOPO_OFFSET(0x0000)
1409 #define ICE_LINK_TOPO_NODE_COUNT ICE_NETLIST_LINK_TOPO_OFFSET(0x0001)
1410
1411 #define ICE_LINK_TOPO_NODE_COUNT_M MAKEMASK(0x3FF, 0)
1412
1413 /* The Netlist ID Block is located after all of the Link Topology nodes. */
1414 #define ICE_NETLIST_ID_BLK_SIZE 0x30
1415 #define ICE_NETLIST_ID_BLK_OFFSET(n) ICE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
1416
1417 /* netlist ID block field offsets (word offsets) */
1418 #define ICE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
1419 #define ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
1420 #define ICE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
1421 #define ICE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
1422 #define ICE_NETLIST_ID_BLK_TYPE_LOW 0x06
1423 #define ICE_NETLIST_ID_BLK_TYPE_HIGH 0x07
1424 #define ICE_NETLIST_ID_BLK_REV_LOW 0x08
1425 #define ICE_NETLIST_ID_BLK_REV_HIGH 0x09
1426 #define ICE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
1427 #define ICE_NETLIST_ID_BLK_CUST_VER 0x2F
1428
1429 /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
1430 #define ICE_SR_VPD_SIZE_WORDS 512
1431 #define ICE_SR_PCIE_ALT_SIZE_WORDS 512
1432 #define ICE_SR_CTRL_WORD_1_S 0x06
1433 #define ICE_SR_CTRL_WORD_1_M (0x03 << ICE_SR_CTRL_WORD_1_S)
1434 #define ICE_SR_CTRL_WORD_VALID 0x1
1435 #define ICE_SR_CTRL_WORD_OROM_BANK BIT(3)
1436 #define ICE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
1437 #define ICE_SR_CTRL_WORD_NVM_BANK BIT(5)
1438
1439 #define ICE_SR_NVM_PTR_4KB_UNITS BIT(15)
1440
1441 /* Shadow RAM related */
1442 #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
1443 #define ICE_SR_BUF_ALIGNMENT 4096
1444 #define ICE_SR_WORDS_IN_1KB 512
1445 /* Checksum should be calculated such that after adding all the words,
1446 * including the checksum word itself, the sum should be 0xBABA.
1447 */
1448 #define ICE_SR_SW_CHECKSUM_BASE 0xBABA
1449
1450 /* Link override related */
1451 #define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10
1452 #define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4
1453 #define ICE_SR_PFA_LINK_OVERRIDE_OFFSET 2
1454 #define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET 1
1455 #define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET 2
1456 #define ICE_FW_API_LINK_OVERRIDE_MAJ 1
1457 #define ICE_FW_API_LINK_OVERRIDE_MIN 5
1458 #define ICE_FW_API_LINK_OVERRIDE_PATCH 2
1459
1460 #define ICE_PBA_FLAG_DFLT 0xFAFA
1461 /* Hash redirection LUT for VSI - maximum array size */
1462 #define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
1463
1464 /*
1465 * Defines for values in the VF_PE_DB_SIZE bits in the GLPCI_LBARCTRL register.
1466 * This is needed to determine the BAR0 space for the VFs
1467 */
1468 #define GLPCI_LBARCTRL_VF_PE_DB_SIZE_0KB 0x0
1469 #define GLPCI_LBARCTRL_VF_PE_DB_SIZE_8KB 0x1
1470 #define GLPCI_LBARCTRL_VF_PE_DB_SIZE_64KB 0x2
1471
1472 /* AQ API version for LLDP_FILTER_CONTROL */
1473 #define ICE_FW_API_LLDP_FLTR_MAJ 1
1474 #define ICE_FW_API_LLDP_FLTR_MIN 7
1475 #define ICE_FW_API_LLDP_FLTR_PATCH 1
1476
1477 /* AQ API version for report default configuration */
1478 #define ICE_FW_API_REPORT_DFLT_CFG_MAJ 1
1479 #define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
1480 #define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
1481
1482 /* FW branch number for hardware families */
1483 #define ICE_FW_VER_BRANCH_E82X 0
1484 #define ICE_FW_VER_BRANCH_E810 1
1485
1486 /* FW version for FEC disable in Auto FEC mode */
1487 #define ICE_FW_FEC_DIS_AUTO_MAJ 7
1488 #define ICE_FW_FEC_DIS_AUTO_MIN 0
1489 #define ICE_FW_FEC_DIS_AUTO_PATCH 5
1490 #define ICE_FW_FEC_DIS_AUTO_MAJ_E82X 7
1491 #define ICE_FW_FEC_DIS_AUTO_MIN_E82X 1
1492 #define ICE_FW_FEC_DIS_AUTO_PATCH_E82X 2
1493
1494 /* AQ API version for FW health reports */
1495 #define ICE_FW_API_HEALTH_REPORT_MAJ 1
1496 #define ICE_FW_API_HEALTH_REPORT_MIN 7
1497 #define ICE_FW_API_HEALTH_REPORT_PATCH 6
1498
1499 /* AQ API version for FW auto drop reports */
1500 #define ICE_FW_API_AUTO_DROP_MAJ 1
1501 #define ICE_FW_API_AUTO_DROP_MIN 4
1502
1503 /* Function specific capabilities */
1504 struct ice_hw_func_caps {
1505 struct ice_hw_common_caps common_cap;
1506 uint32_t num_allocd_vfs; /* Number of allocated VFs */
1507 uint32_t vf_base_id; /* Logical ID of the first VF */
1508 uint32_t guar_num_vsi;
1509 };
1510
1511 struct ice_nac_topology {
1512 uint32_t mode;
1513 uint8_t id;
1514 };
1515
1516 /* Device wide capabilities */
1517 struct ice_hw_dev_caps {
1518 struct ice_hw_common_caps common_cap;
1519 uint32_t num_vfs_exposed; /* Total number of VFs exposed */
1520 uint32_t num_vsi_allocd_to_host; /* Excluding EMP VSI */
1521 uint32_t num_funcs;
1522 struct ice_nac_topology nac_topo;
1523 /* bitmap of supported sensors */
1524 uint32_t supported_sensors;
1525 #define ICE_SENSOR_SUPPORT_E810_INT_TEMP BIT(0)
1526 };
1527
1528 #define SCHED_NODE_NAME_MAX_LEN 32
1529
1530 #define ICE_SCHED_5_LAYERS 5
1531 #define ICE_SCHED_9_LAYERS 9
1532
1533 #define ICE_QGRP_LAYER_OFFSET 2
1534 #define ICE_VSI_LAYER_OFFSET 4
1535 #define ICE_AGG_LAYER_OFFSET 6
1536 #define ICE_SCHED_INVAL_LAYER_NUM 0xFF
1537 /* Burst size is a 12 bits register that is configured while creating the RL
1538 * profile(s). MSB is a granularity bit and tells the granularity type
1539 * 0 - LSB bits are in 64 bytes granularity
1540 * 1 - LSB bits are in 1K bytes granularity
1541 */
1542 #define ICE_64_BYTE_GRANULARITY 0
1543 #define ICE_KBYTE_GRANULARITY BIT(11)
1544 #define ICE_MIN_BURST_SIZE_ALLOWED 64 /* In Bytes */
1545 #define ICE_MAX_BURST_SIZE_ALLOWED \
1546 ((BIT(11) - 1) * 1024) /* In Bytes */
1547 #define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \
1548 ((BIT(11) - 1) * 64) /* In Bytes */
1549 #define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
1550
1551 #define ICE_RL_PROF_ACCURACY_BYTES 128
1552 #define ICE_RL_PROF_MULTIPLIER 10000
1553 #define ICE_RL_PROF_TS_MULTIPLIER 32
1554 #define ICE_RL_PROF_FRACTION 512
1555
1556 #define ICE_PSM_CLK_367MHZ_IN_HZ 367647059
1557 #define ICE_PSM_CLK_416MHZ_IN_HZ 416666667
1558 #define ICE_PSM_CLK_446MHZ_IN_HZ 446428571
1559 #define ICE_PSM_CLK_390MHZ_IN_HZ 390625000
1560
1561 #define PSM_CLK_SRC_367_MHZ 0x0
1562 #define PSM_CLK_SRC_416_MHZ 0x1
1563 #define PSM_CLK_SRC_446_MHZ 0x2
1564 #define PSM_CLK_SRC_390_MHZ 0x3
1565
1566 #define ICE_SCHED_MIN_BW 500 /* in Kbps */
1567 #define ICE_SCHED_MAX_BW 100000000 /* in Kbps */
1568 #define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
1569 #define ICE_SCHED_NO_PRIORITY 0
1570 #define ICE_SCHED_NO_BW_WT 0
1571 #define ICE_SCHED_DFLT_RL_PROF_ID 0
1572 #define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
1573 #define ICE_SCHED_DFLT_BW_WT 4
1574 #define ICE_SCHED_INVAL_PROF_ID 0xFFFF
1575 #define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
1576
1577 /* Access Macros for Tx Sched RL Profile data */
1578 #define ICE_TXSCHED_GET_RL_PROF_ID(p) le16toh((p)->info.profile_id)
1579 #define ICE_TXSCHED_GET_RL_MBS(p) le16toh((p)->info.max_burst_size)
1580 #define ICE_TXSCHED_GET_RL_MULTIPLIER(p) le16toh((p)->info.rl_multiply)
1581 #define ICE_TXSCHED_GET_RL_WAKEUP_MV(p) le16toh((p)->info.wake_up_calc)
1582 #define ICE_TXSCHED_GET_RL_ENCODE(p) le16toh((p)->info.rl_encode)
1583
1584 #define ICE_MAX_PORT_PER_PCI_DEV 8
1585
1586 /* The following tree example shows the naming conventions followed under
1587 * ice_port_info struct for default scheduler tree topology.
1588 *
1589 * A tree on a port
1590 * * ---> root node
1591 * (TC0)/ / / / \ \ \ \(TC7) ---> num_branches (range:1- 8)
1592 * * * * * * * * * |
1593 * / |
1594 * * |
1595 * / |-> num_elements (range:1 - 9)
1596 * * | implies num_of_layers
1597 * / |
1598 * (a)* |
1599 *
1600 * (a) is the last_node_teid(not of type Leaf). A leaf node is created under
1601 * (a) as child node where queues get added, add Tx/Rx queue admin commands;
1602 * need TEID of (a) to add queues.
1603 *
1604 * This tree
1605 * -> has 8 branches (one for each TC)
1606 * -> First branch (TC0) has 4 elements
1607 * -> has 4 layers
1608 * -> (a) is the topmost layer node created by firmware on branch 0
1609 *
1610 * Note: Above asterisk tree covers only basic terminology and scenario.
1611 * Refer to the documentation for more info.
1612 */
1613
1614 /* Data structure for saving BW information */
1615 enum ice_bw_type {
1616 ICE_BW_TYPE_PRIO,
1617 ICE_BW_TYPE_CIR,
1618 ICE_BW_TYPE_CIR_WT,
1619 ICE_BW_TYPE_EIR,
1620 ICE_BW_TYPE_EIR_WT,
1621 ICE_BW_TYPE_SHARED,
1622 ICE_BW_TYPE_CNT /* This must be last */
1623 };
1624
1625 struct ice_bw {
1626 uint32_t bw;
1627 uint16_t bw_alloc;
1628 };
1629
1630 struct ice_bw_type_info {
1631 ice_declare_bitmap(bw_t_bitmap, ICE_BW_TYPE_CNT);
1632 uint8_t generic;
1633 struct ice_bw cir_bw;
1634 struct ice_bw eir_bw;
1635 uint32_t shared_bw;
1636 };
1637
1638 /* VSI queue context structure for given TC */
1639 struct ice_q_ctx {
1640 uint16_t q_handle;
1641 uint32_t q_teid;
1642 /* bw_t_info saves queue BW information */
1643 struct ice_bw_type_info bw_t_info;
1644 };
1645
1646 struct ice_sched_agg_vsi_info {
1647 TAILQ_ENTRY(ice_sched_agg_vsi_info) list_entry;
1648 ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
1649 uint16_t vsi_handle;
1650 /* save aggregator VSI TC bitmap */
1651 ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
1652 };
1653
1654 /* VSI type list entry to locate corresponding VSI/aggregator nodes */
1655 struct ice_sched_vsi_info {
1656 struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
1657 struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
1658 uint16_t max_lanq[ICE_MAX_TRAFFIC_CLASS];
1659 uint16_t max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
1660 /* bw_t_info saves VSI BW information */
1661 struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
1662 };
1663
1664 /* The aggregator type determines if identifier is for a VSI group,
1665 * aggregator group, aggregator of queues, or queue group.
1666 */
1667 enum ice_agg_type {
1668 ICE_AGG_TYPE_UNKNOWN = 0,
1669 ICE_AGG_TYPE_TC,
1670 ICE_AGG_TYPE_AGG, /* aggregator */
1671 ICE_AGG_TYPE_VSI,
1672 ICE_AGG_TYPE_QG,
1673 ICE_AGG_TYPE_Q
1674 };
1675
1676 TAILQ_HEAD(ice_vsi_list_head, ice_sched_agg_vsi_info);
1677
1678 /*
1679 * For now, set this to the hardware maximum. Each function gets a smaller
1680 * number assigned to it in hw->func_caps.guar_num_vsi, though there
1681 * appears to be no guarantee that is the maximum number that a function
1682 * can use.
1683 */
1684 #define ICE_MAX_VSI_AVAILABLE 768
1685
1686 struct ice_sched_agg_info {
1687 struct ice_vsi_list_head agg_vsi_list;
1688 TAILQ_ENTRY(ice_sched_agg_info) list_entry;
1689 ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
1690 uint32_t agg_id;
1691 enum ice_agg_type agg_type;
1692 /* bw_t_info saves aggregator BW information */
1693 struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
1694 /* save aggregator TC bitmap */
1695 ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
1696 };
1697
1698 #define ICE_DCBX_OFFLOAD_DIS 0
1699 #define ICE_DCBX_OFFLOAD_ENABLED 1
1700
1701 #define ICE_DCBX_STATUS_NOT_STARTED 0
1702 #define ICE_DCBX_STATUS_IN_PROGRESS 1
1703 #define ICE_DCBX_STATUS_DONE 2
1704 #define ICE_DCBX_STATUS_MULTIPLE_PEERS 3
1705 #define ICE_DCBX_STATUS_DIS 7
1706
1707 #define ICE_TLV_TYPE_END 0
1708 #define ICE_TLV_TYPE_ORG 127
1709
1710 #define ICE_IEEE_8021QAZ_OUI 0x0080C2
1711 #define ICE_IEEE_SUBTYPE_ETS_CFG 9
1712 #define ICE_IEEE_SUBTYPE_ETS_REC 10
1713 #define ICE_IEEE_SUBTYPE_PFC_CFG 11
1714 #define ICE_IEEE_SUBTYPE_APP_PRI 12
1715
1716 #define ICE_CEE_DCBX_OUI 0x001B21
1717 #define ICE_CEE_DCBX_TYPE 2
1718
1719 #define ICE_DSCP_OUI 0xFFFFFF
1720 #define ICE_DSCP_SUBTYPE_DSCP2UP 0x41
1721 #define ICE_DSCP_SUBTYPE_ENFORCE 0x42
1722 #define ICE_DSCP_SUBTYPE_TCBW 0x43
1723 #define ICE_DSCP_SUBTYPE_PFC 0x44
1724 #define ICE_DSCP_IPV6_OFFSET 80
1725
1726 #define ICE_CEE_SUBTYPE_CTRL 1
1727 #define ICE_CEE_SUBTYPE_PG_CFG 2
1728 #define ICE_CEE_SUBTYPE_PFC_CFG 3
1729 #define ICE_CEE_SUBTYPE_APP_PRI 4
1730
1731 #define ICE_CEE_MAX_FEAT_TYPE 3
1732 #define ICE_LLDP_ADMINSTATUS_DIS 0
1733 #define ICE_LLDP_ADMINSTATUS_ENA_RX 1
1734 #define ICE_LLDP_ADMINSTATUS_ENA_TX 2
1735 #define ICE_LLDP_ADMINSTATUS_ENA_RXTX 3
1736
1737 /* Defines for LLDP TLV header */
1738 #define ICE_LLDP_TLV_LEN_S 0
1739 #define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
1740 #define ICE_LLDP_TLV_TYPE_S 9
1741 #define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
1742 #define ICE_LLDP_TLV_SUBTYPE_S 0
1743 #define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
1744 #define ICE_LLDP_TLV_OUI_S 8
1745 #define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
1746
1747 /* Defines for IEEE ETS TLV */
1748 #define ICE_IEEE_ETS_MAXTC_S 0
1749 #define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
1750 #define ICE_IEEE_ETS_CBS_S 6
1751 #define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
1752 #define ICE_IEEE_ETS_WILLING_S 7
1753 #define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
1754 #define ICE_IEEE_ETS_PRIO_0_S 0
1755 #define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
1756 #define ICE_IEEE_ETS_PRIO_1_S 4
1757 #define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
1758 #define ICE_CEE_PGID_PRIO_0_S 0
1759 #define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
1760 #define ICE_CEE_PGID_PRIO_1_S 4
1761 #define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
1762 #define ICE_CEE_PGID_STRICT 15
1763
1764 /* Defines for IEEE TSA types */
1765 #define ICE_IEEE_TSA_STRICT 0
1766 #define ICE_IEEE_TSA_CBS 1
1767 #define ICE_IEEE_TSA_ETS 2
1768 #define ICE_IEEE_TSA_VENDOR 255
1769
1770 /* Defines for IEEE PFC TLV */
1771 #define ICE_IEEE_PFC_CAP_S 0
1772 #define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
1773 #define ICE_IEEE_PFC_MBC_S 6
1774 #define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
1775 #define ICE_IEEE_PFC_WILLING_S 7
1776 #define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
1777
1778 /* Defines for IEEE APP TLV */
1779 #define ICE_IEEE_APP_SEL_S 0
1780 #define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
1781 #define ICE_IEEE_APP_PRIO_S 5
1782 #define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
1783
1784 /* TLV definitions for preparing MIB */
1785 #define ICE_TLV_ID_CHASSIS_ID 0
1786 #define ICE_TLV_ID_PORT_ID 1
1787 #define ICE_TLV_ID_TIME_TO_LIVE 2
1788 #define ICE_IEEE_TLV_ID_ETS_CFG 3
1789 #define ICE_IEEE_TLV_ID_ETS_REC 4
1790 #define ICE_IEEE_TLV_ID_PFC_CFG 5
1791 #define ICE_IEEE_TLV_ID_APP_PRI 6
1792 #define ICE_TLV_ID_END_OF_LLDPPDU 7
1793 #define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
1794 #define ICE_TLV_ID_DSCP_UP 3
1795 #define ICE_TLV_ID_DSCP_ENF 4
1796 #define ICE_TLV_ID_DSCP_TC_BW 5
1797 #define ICE_TLV_ID_DSCP_TO_PFC 6
1798
1799 #define ICE_IEEE_ETS_TLV_LEN 25
1800 #define ICE_IEEE_PFC_TLV_LEN 6
1801 #define ICE_IEEE_APP_TLV_LEN 11
1802
1803 #define ICE_DSCP_UP_TLV_LEN 148
1804 #define ICE_DSCP_ENF_TLV_LEN 132
1805 #define ICE_DSCP_TC_BW_TLV_LEN 25
1806 #define ICE_DSCP_PFC_TLV_LEN 6
1807
1808 /* IEEE 802.1AB LLDP Organization specific TLV */
1809 struct ice_lldp_org_tlv {
1810 uint16_t typelen;
1811 uint32_t ouisubtype;
1812 uint8_t tlvinfo[STRUCT_HACK_VAR_LEN];
1813 } __packed;
1814
1815 struct ice_cee_tlv_hdr {
1816 uint16_t typelen;
1817 uint8_t operver;
1818 uint8_t maxver;
1819 };
1820
1821 struct ice_cee_ctrl_tlv {
1822 struct ice_cee_tlv_hdr hdr;
1823 uint32_t seqno;
1824 uint32_t ackno;
1825 };
1826
1827 struct ice_cee_feat_tlv {
1828 struct ice_cee_tlv_hdr hdr;
1829 uint8_t en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
1830 #define ICE_CEE_FEAT_TLV_ENA_M 0x80
1831 #define ICE_CEE_FEAT_TLV_WILLING_M 0x40
1832 #define ICE_CEE_FEAT_TLV_ERR_M 0x20
1833 uint8_t subtype;
1834 uint8_t tlvinfo[STRUCT_HACK_VAR_LEN];
1835 };
1836
1837 struct ice_cee_app_prio {
1838 uint16_t protocol;
1839 uint8_t upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
1840 #define ICE_CEE_APP_SELECTOR_M 0x03
1841 uint16_t lower_oui;
1842 uint8_t prio_map;
1843 } __packed;
1844
1845 /* CEE or IEEE 802.1Qaz ETS Configuration data */
1846 struct ice_dcb_ets_cfg {
1847 uint8_t willing;
1848 uint8_t cbs;
1849 uint8_t maxtcs;
1850 uint8_t prio_table[ICE_MAX_TRAFFIC_CLASS];
1851 uint8_t tcbwtable[ICE_MAX_TRAFFIC_CLASS];
1852 uint8_t tsatable[ICE_MAX_TRAFFIC_CLASS];
1853 };
1854
1855 /* CEE or IEEE 802.1Qaz PFC Configuration data */
1856 struct ice_dcb_pfc_cfg {
1857 uint8_t willing;
1858 uint8_t mbc;
1859 uint8_t pfccap;
1860 uint8_t pfcena;
1861 };
1862
1863 /* CEE or IEEE 802.1Qaz Application Priority data */
1864 struct ice_dcb_app_priority_table {
1865 uint16_t prot_id;
1866 uint8_t priority;
1867 uint8_t selector;
1868 };
1869
1870 #define ICE_MAX_USER_PRIORITY 8
1871 #define ICE_DCBX_MAX_APPS 64
1872 #define ICE_DSCP_NUM_VAL 64
1873 #define ICE_LLDPDU_SIZE 1500
1874 #define ICE_TLV_STATUS_OPER 0x1
1875 #define ICE_TLV_STATUS_SYNC 0x2
1876 #define ICE_TLV_STATUS_ERR 0x4
1877 #define ICE_APP_PROT_ID_FCOE 0x8906
1878 #define ICE_APP_PROT_ID_ISCSI 0x0cbc
1879 #define ICE_APP_PROT_ID_ISCSI_860 0x035c
1880 #define ICE_APP_PROT_ID_FIP 0x8914
1881 #define ICE_APP_SEL_ETHTYPE 0x1
1882 #define ICE_APP_SEL_TCPIP 0x2
1883 #define ICE_CEE_APP_SEL_ETHTYPE 0x0
1884 #define ICE_CEE_APP_SEL_TCPIP 0x1
1885
1886 struct ice_dcbx_cfg {
1887 uint32_t numapps;
1888 uint32_t tlv_status; /* CEE mode TLV status */
1889 struct ice_dcb_ets_cfg etscfg;
1890 struct ice_dcb_ets_cfg etsrec;
1891 struct ice_dcb_pfc_cfg pfc;
1892 #define ICE_QOS_MODE_VLAN 0x0
1893 #define ICE_QOS_MODE_DSCP 0x1
1894 uint8_t pfc_mode;
1895 struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
1896 /* when DSCP mapping defined by user set its bit to 1 */
1897 ice_declare_bitmap(dscp_mapped, ICE_DSCP_NUM_VAL);
1898 /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
1899 uint8_t dscp_map[ICE_DSCP_NUM_VAL];
1900 uint8_t dcbx_mode;
1901 #define ICE_DCBX_MODE_CEE 0x1
1902 #define ICE_DCBX_MODE_IEEE 0x2
1903 uint8_t app_mode;
1904 #define ICE_DCBX_APPS_NON_WILLING 0x1
1905 };
1906
1907 struct ice_qos_cfg {
1908 struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
1909 struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
1910 struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
1911 uint8_t dcbx_status : 3; /* see ICE_DCBX_STATUS_DIS */
1912 uint8_t is_sw_lldp : 1;
1913 };
1914
1915 /* Information about MAC such as address, etc... */
1916 struct ice_mac_info {
1917 uint8_t lan_addr[ETHER_ADDR_LEN];
1918 uint8_t perm_addr[ETHER_ADDR_LEN];
1919 uint8_t port_addr[ETHER_ADDR_LEN];
1920 uint8_t wol_addr[ETHER_ADDR_LEN];
1921 };
1922
1923 /* Media Types */
1924 enum ice_media_type {
1925 ICE_MEDIA_NONE = 0,
1926 ICE_MEDIA_UNKNOWN,
1927 ICE_MEDIA_FIBER,
1928 ICE_MEDIA_BASET,
1929 ICE_MEDIA_BACKPLANE,
1930 ICE_MEDIA_DA,
1931 ICE_MEDIA_AUI,
1932 };
1933
1934 #define ICE_MEDIA_BASET_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_100BASE_TX | \
1935 ICE_PHY_TYPE_LOW_1000BASE_T | \
1936 ICE_PHY_TYPE_LOW_2500BASE_T | \
1937 ICE_PHY_TYPE_LOW_5GBASE_T | \
1938 ICE_PHY_TYPE_LOW_10GBASE_T | \
1939 ICE_PHY_TYPE_LOW_25GBASE_T)
1940
1941 #define ICE_MEDIA_C2M_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
1942 ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \
1943 ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \
1944 ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \
1945 ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \
1946 ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \
1947 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
1948 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC)
1949
1950 #define ICE_MEDIA_C2M_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \
1951 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC)
1952
1953 #define ICE_MEDIA_OPT_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_1000BASE_SX | \
1954 ICE_PHY_TYPE_LOW_1000BASE_LX | \
1955 ICE_PHY_TYPE_LOW_10GBASE_SR | \
1956 ICE_PHY_TYPE_LOW_10GBASE_LR | \
1957 ICE_PHY_TYPE_LOW_25GBASE_SR | \
1958 ICE_PHY_TYPE_LOW_25GBASE_LR | \
1959 ICE_PHY_TYPE_LOW_40GBASE_SR4 | \
1960 ICE_PHY_TYPE_LOW_40GBASE_LR4 | \
1961 ICE_PHY_TYPE_LOW_50GBASE_SR2 | \
1962 ICE_PHY_TYPE_LOW_50GBASE_LR2 | \
1963 ICE_PHY_TYPE_LOW_50GBASE_SR | \
1964 ICE_PHY_TYPE_LOW_50GBASE_LR | \
1965 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
1966 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
1967 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
1968 ICE_PHY_TYPE_LOW_50GBASE_FR | \
1969 ICE_PHY_TYPE_LOW_100GBASE_DR)
1970
1971 #define ICE_MEDIA_BP_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_1000BASE_KX | \
1972 ICE_PHY_TYPE_LOW_2500BASE_KX | \
1973 ICE_PHY_TYPE_LOW_5GBASE_KR | \
1974 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
1975 ICE_PHY_TYPE_LOW_25GBASE_KR | \
1976 ICE_PHY_TYPE_LOW_25GBASE_KR_S | \
1977 ICE_PHY_TYPE_LOW_25GBASE_KR1 | \
1978 ICE_PHY_TYPE_LOW_40GBASE_KR4 | \
1979 ICE_PHY_TYPE_LOW_50GBASE_KR2 | \
1980 ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \
1981 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
1982 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4)
1983
1984 #define ICE_MEDIA_BP_PHY_TYPE_HIGH_M ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4
1985
1986 #define ICE_MEDIA_DAC_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_10G_SFI_DA | \
1987 ICE_PHY_TYPE_LOW_25GBASE_CR | \
1988 ICE_PHY_TYPE_LOW_25GBASE_CR_S | \
1989 ICE_PHY_TYPE_LOW_25GBASE_CR1 | \
1990 ICE_PHY_TYPE_LOW_40GBASE_CR4 | \
1991 ICE_PHY_TYPE_LOW_50GBASE_CR2 | \
1992 ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
1993 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
1994 ICE_PHY_TYPE_LOW_50GBASE_CP | \
1995 ICE_PHY_TYPE_LOW_100GBASE_CP2)
1996
1997 #define ICE_MEDIA_C2C_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_100M_SGMII | \
1998 ICE_PHY_TYPE_LOW_1G_SGMII | \
1999 ICE_PHY_TYPE_LOW_2500BASE_X | \
2000 ICE_PHY_TYPE_LOW_10G_SFI_C2C | \
2001 ICE_PHY_TYPE_LOW_25G_AUI_C2C | \
2002 ICE_PHY_TYPE_LOW_40G_XLAUI | \
2003 ICE_PHY_TYPE_LOW_50G_LAUI2 | \
2004 ICE_PHY_TYPE_LOW_50G_AUI2 | \
2005 ICE_PHY_TYPE_LOW_50G_AUI1 | \
2006 ICE_PHY_TYPE_LOW_100G_CAUI4 | \
2007 ICE_PHY_TYPE_LOW_100G_AUI4)
2008
2009 #define ICE_MEDIA_C2C_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
2010 ICE_PHY_TYPE_HIGH_100G_AUI2)
2011
2012 #define ICE_IPV6_ADDR_LENGTH 16
2013
2014 /* Each recipe can match up to 5 different fields. Fields to match can be meta-
2015 * data, values extracted from packet headers, or results from other recipes.
2016 * One of the 5 fields is reserved for matching the switch ID. So, up to 4
2017 * recipes can provide intermediate results to another one through chaining,
2018 * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
2019 */
2020 #define ICE_NUM_WORDS_RECIPE 4
2021
2022 /* Max recipes that can be chained */
2023 #define ICE_MAX_CHAIN_RECIPE 5
2024
2025 /* 1 word reserved for switch ID from allowed 5 words.
2026 * So a recipe can have max 4 words. And you can chain 5 such recipes
2027 * together. So maximum words that can be programmed for look up is 5 * 4.
2028 */
2029 #define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
2030
2031 /* Field vector index corresponding to chaining */
2032 #define ICE_CHAIN_FV_INDEX_START 47
2033
2034 enum ice_protocol_type {
2035 ICE_MAC_OFOS = 0,
2036 ICE_MAC_IL,
2037 ICE_ETYPE_OL,
2038 ICE_ETYPE_IL,
2039 ICE_VLAN_OFOS,
2040 ICE_IPV4_OFOS,
2041 ICE_IPV4_IL,
2042 ICE_IPV6_OFOS,
2043 ICE_IPV6_IL,
2044 ICE_TCP_IL,
2045 ICE_UDP_OF,
2046 ICE_UDP_ILOS,
2047 ICE_SCTP_IL,
2048 ICE_VXLAN,
2049 ICE_GENEVE,
2050 ICE_VXLAN_GPE,
2051 ICE_NVGRE,
2052 ICE_GTP,
2053 ICE_GTP_NO_PAY,
2054 ICE_PPPOE,
2055 ICE_L2TPV3,
2056 ICE_PROTOCOL_LAST
2057 };
2058
2059 enum ice_sw_tunnel_type {
2060 ICE_NON_TUN = 0,
2061 ICE_SW_TUN_AND_NON_TUN,
2062 ICE_SW_TUN_VXLAN_GPE,
2063 ICE_SW_TUN_GENEVE, /* GENEVE matches only non-VLAN pkts */
2064 ICE_SW_TUN_GENEVE_VLAN, /* GENEVE matches both VLAN and non-VLAN pkts */
2065 ICE_SW_TUN_VXLAN, /* VXLAN matches only non-VLAN pkts */
2066 ICE_SW_TUN_VXLAN_VLAN, /* VXLAN matches both VLAN and non-VLAN pkts */
2067 ICE_SW_TUN_NVGRE,
2068 ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
2069 * and GENEVE
2070 */
2071 ICE_SW_TUN_GTPU,
2072 ICE_SW_TUN_GTPC,
2073 ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
2074 };
2075
2076 /* Decoders for ice_prot_id:
2077 * - F: First
2078 * - I: Inner
2079 * - L: Last
2080 * - O: Outer
2081 * - S: Single
2082 */
2083 enum ice_prot_id {
2084 ICE_PROT_ID_INVAL = 0,
2085 ICE_PROT_MAC_OF_OR_S = 1,
2086 ICE_PROT_MAC_O2 = 2,
2087 ICE_PROT_MAC_IL = 4,
2088 ICE_PROT_MAC_IN_MAC = 7,
2089 ICE_PROT_ETYPE_OL = 9,
2090 ICE_PROT_ETYPE_IL = 10,
2091 ICE_PROT_PAY = 15,
2092 ICE_PROT_EVLAN_O = 16,
2093 ICE_PROT_VLAN_O = 17,
2094 ICE_PROT_VLAN_IF = 18,
2095 ICE_PROT_MPLS_OL_MINUS_1 = 27,
2096 ICE_PROT_MPLS_OL_OR_OS = 28,
2097 ICE_PROT_MPLS_IL = 29,
2098 ICE_PROT_IPV4_OF_OR_S = 32,
2099 ICE_PROT_IPV4_IL = 33,
2100 ICE_PROT_IPV4_IL_IL = 34,
2101 ICE_PROT_IPV6_OF_OR_S = 40,
2102 ICE_PROT_IPV6_IL = 41,
2103 ICE_PROT_IPV6_IL_IL = 42,
2104 ICE_PROT_IPV6_NEXT_PROTO = 43,
2105 ICE_PROT_IPV6_FRAG = 47,
2106 ICE_PROT_TCP_IL = 49,
2107 ICE_PROT_UDP_OF = 52,
2108 ICE_PROT_UDP_IL_OR_S = 53,
2109 ICE_PROT_GRE_OF = 64,
2110 ICE_PROT_NSH_F = 84,
2111 ICE_PROT_ESP_F = 88,
2112 ICE_PROT_ESP_2 = 89,
2113 ICE_PROT_SCTP_IL = 96,
2114 ICE_PROT_ICMP_IL = 98,
2115 ICE_PROT_ICMPV6_IL = 100,
2116 ICE_PROT_VRRP_F = 101,
2117 ICE_PROT_OSPF = 102,
2118 ICE_PROT_ATAOE_OF = 114,
2119 ICE_PROT_CTRL_OF = 116,
2120 ICE_PROT_LLDP_OF = 117,
2121 ICE_PROT_ARP_OF = 118,
2122 ICE_PROT_EAPOL_OF = 120,
2123 ICE_PROT_META_ID = 255, /* when offset == metaddata */
2124 ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
2125 };
2126
2127 #define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
2128
2129 #define ICE_NAN_OFFSET 511
2130 #define ICE_MAC_OFOS_HW 1
2131 #define ICE_MAC_IL_HW 4
2132 #define ICE_ETYPE_OL_HW 9
2133 #define ICE_ETYPE_IL_HW 10
2134 #define ICE_VLAN_OF_HW 16
2135 #define ICE_VLAN_OL_HW 17
2136 #define ICE_IPV4_OFOS_HW 32
2137 #define ICE_IPV4_IL_HW 33
2138 #define ICE_IPV6_OFOS_HW 40
2139 #define ICE_IPV6_IL_HW 41
2140 #define ICE_TCP_IL_HW 49
2141 #define ICE_UDP_ILOS_HW 53
2142 #define ICE_SCTP_IL_HW 96
2143 #define ICE_PPPOE_HW 103
2144 #define ICE_L2TPV3_HW 104
2145
2146 /* ICE_UDP_OF is used to identify all 3 tunnel types
2147 * VXLAN, GENEVE and VXLAN_GPE. To differentiate further
2148 * need to use flags from the field vector
2149 */
2150 #define ICE_UDP_OF_HW 52 /* UDP Tunnels */
2151 #define ICE_GRE_OF_HW 64 /* NVGRE */
2152 #define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
2153
2154 #define ICE_MDID_SIZE 2
2155 #define ICE_TUN_FLAG_MDID 20
2156 #define ICE_TUN_FLAG_MDID_OFF(word) \
2157 (ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word)))
2158 #define ICE_TUN_FLAG_MASK 0xFF
2159 #define ICE_FROM_NETWORK_FLAG_MASK 0x8
2160 #define ICE_DIR_FLAG_MASK 0x10
2161 #define ICE_TUN_FLAG_IN_VLAN_MASK 0x80 /* VLAN inside tunneled header */
2162 #define ICE_TUN_FLAG_VLAN_MASK 0x01
2163 #define ICE_TUN_FLAG_FV_IND 2
2164
2165 #define ICE_VLAN_FLAG_MDID 20
2166 #define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
2167 #define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
2168
2169 #define ICE_PROTOCOL_MAX_ENTRIES 16
2170
2171 /* Mapping of software defined protocol ID to hardware defined protocol ID */
2172 struct ice_protocol_entry {
2173 enum ice_protocol_type type;
2174 uint8_t protocol_id;
2175 };
2176
2177 struct ice_ether_hdr {
2178 uint8_t dst_addr[ETHER_ADDR_LEN];
2179 uint8_t src_addr[ETHER_ADDR_LEN];
2180 };
2181
2182 struct ice_ethtype_hdr {
2183 uint16_t ethtype_id;
2184 };
2185
2186 struct ice_ether_vlan_hdr {
2187 uint8_t dst_addr[ETHER_ADDR_LEN];
2188 uint8_t src_addr[ETHER_ADDR_LEN];
2189 uint32_t vlan_id;
2190 };
2191
2192 struct ice_vlan_hdr {
2193 uint16_t type;
2194 uint16_t vlan;
2195 };
2196
2197 struct ice_ipv4_hdr {
2198 uint8_t version;
2199 uint8_t tos;
2200 uint16_t total_length;
2201 uint16_t id;
2202 uint16_t frag_off;
2203 uint8_t time_to_live;
2204 uint8_t protocol;
2205 uint16_t check;
2206 uint32_t src_addr;
2207 uint32_t dst_addr;
2208 };
2209
2210 struct ice_le_ver_tc_flow {
2211 union {
2212 struct {
2213 uint32_t flow_label : 20;
2214 uint32_t tc : 8;
2215 uint32_t version : 4;
2216 } fld;
2217 uint32_t val;
2218 } u;
2219 };
2220
2221 struct ice_ipv6_hdr {
2222 uint32_t be_ver_tc_flow;
2223 uint16_t payload_len;
2224 uint8_t next_hdr;
2225 uint8_t hop_limit;
2226 uint8_t src_addr[ICE_IPV6_ADDR_LENGTH];
2227 uint8_t dst_addr[ICE_IPV6_ADDR_LENGTH];
2228 };
2229
2230 struct ice_sctp_hdr {
2231 uint16_t src_port;
2232 uint16_t dst_port;
2233 uint32_t verification_tag;
2234 uint32_t check;
2235 };
2236
2237 struct ice_l4_hdr {
2238 uint16_t src_port;
2239 uint16_t dst_port;
2240 uint16_t len;
2241 uint16_t check;
2242 };
2243
2244 struct ice_udp_tnl_hdr {
2245 uint16_t field;
2246 uint16_t proto_type;
2247 uint32_t vni; /* only use lower 24-bits */
2248 };
2249
2250 struct ice_udp_gtp_hdr {
2251 uint8_t flags;
2252 uint8_t msg_type;
2253 uint16_t rsrvd_len;
2254 uint32_t teid;
2255 uint16_t rsrvd_seq_nbr;
2256 uint8_t rsrvd_n_pdu_nbr;
2257 uint8_t rsrvd_next_ext;
2258 uint8_t rsvrd_ext_len;
2259 uint8_t pdu_type;
2260 uint8_t qfi;
2261 uint8_t rsvrd;
2262 };
2263 struct ice_pppoe_hdr {
2264 uint8_t rsrvd_ver_type;
2265 uint8_t rsrvd_code;
2266 uint16_t session_id;
2267 uint16_t length;
2268 uint16_t ppp_prot_id; /* control and data only */
2269 };
2270
2271 struct ice_l2tpv3_sess_hdr {
2272 uint32_t session_id;
2273 uint64_t cookie;
2274 };
2275
2276 struct ice_nvgre {
2277 uint16_t flags;
2278 uint16_t protocol;
2279 uint32_t tni_flow;
2280 };
2281
2282 union ice_prot_hdr {
2283 struct ice_ether_hdr eth_hdr;
2284 struct ice_ethtype_hdr ethertype;
2285 struct ice_vlan_hdr vlan_hdr;
2286 struct ice_ipv4_hdr ipv4_hdr;
2287 struct ice_ipv6_hdr ipv6_hdr;
2288 struct ice_l4_hdr l4_hdr;
2289 struct ice_sctp_hdr sctp_hdr;
2290 struct ice_udp_tnl_hdr tnl_hdr;
2291 struct ice_nvgre nvgre_hdr;
2292 struct ice_udp_gtp_hdr gtp_hdr;
2293 struct ice_pppoe_hdr pppoe_hdr;
2294 struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
2295 };
2296
2297 /* This is mapping table entry that maps every word within a given protocol
2298 * structure to the real byte offset as per the specification of that
2299 * protocol header.
2300 * for e.g. dst address is 3 words in ethertype header and corresponding bytes
2301 * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
2302 */
2303 struct ice_prot_ext_tbl_entry {
2304 enum ice_protocol_type prot_type;
2305 /* Byte offset into header of given protocol type */
2306 uint8_t offs[sizeof(union ice_prot_hdr)];
2307 };
2308
2309 /* Extraction Sequence (Field Vector) Table */
2310 struct ice_fv_word {
2311 uint8_t prot_id;
2312 uint16_t off; /* Offset within the protocol header */
2313 uint8_t nresvrd;
2314 } __packed;
2315
2316 #define ICE_MAX_FV_WORDS 48
2317
2318 struct ice_fv {
2319 struct ice_fv_word ew[ICE_MAX_FV_WORDS];
2320 };
2321
2322 /* Extractions to be looked up for a given recipe */
2323 struct ice_prot_lkup_ext {
2324 uint16_t prot_type;
2325 uint8_t n_val_words;
2326 /* create a buffer to hold max words per recipe */
2327 uint16_t field_off[ICE_MAX_CHAIN_WORDS];
2328 uint16_t field_mask[ICE_MAX_CHAIN_WORDS];
2329
2330 struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
2331
2332 /* Indicate field offsets that have field vector indices assigned */
2333 ice_declare_bitmap(done, ICE_MAX_CHAIN_WORDS);
2334 };
2335
2336 struct ice_pref_recipe_group {
2337 uint8_t n_val_pairs; /* Number of valid pairs */
2338 struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
2339 uint16_t mask[ICE_NUM_WORDS_RECIPE];
2340 };
2341
2342 struct ice_recp_grp_entry {
2343 TAILQ_ENTRY(ice_recp_grp_entry) l_entry;
2344 #define ICE_INVAL_CHAIN_IND 0xFF
2345 uint16_t rid;
2346 uint8_t chain_idx;
2347 uint16_t fv_idx[ICE_NUM_WORDS_RECIPE];
2348 uint16_t fv_mask[ICE_NUM_WORDS_RECIPE];
2349 struct ice_pref_recipe_group r_group;
2350 };
2351
2352 /* Software VSI types. */
2353 enum ice_vsi_type {
2354 ICE_VSI_PF = 0,
2355 ICE_VSI_VF = 1,
2356 ICE_VSI_VMDQ2 = 2,
2357 ICE_VSI_LB = 6,
2358 };
2359
2360
2361 struct ice_link_status {
2362 /* Refer to ice_aq_phy_type for bits definition */
2363 uint64_t phy_type_low;
2364 uint64_t phy_type_high;
2365 uint8_t topo_media_conflict;
2366 uint16_t max_frame_size;
2367 uint16_t link_speed;
2368 uint16_t req_speeds;
2369 uint8_t link_cfg_err;
2370 uint8_t lse_ena; /* Link Status Event notification */
2371 uint8_t link_info;
2372 uint8_t an_info;
2373 uint8_t ext_info;
2374 uint8_t fec_info;
2375 uint8_t pacing;
2376 /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
2377 * ice_aqc_get_phy_caps structure
2378 */
2379 uint8_t module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
2380 };
2381
2382
2383 /* PHY info such as phy_type, etc... */
2384 struct ice_phy_info {
2385 struct ice_link_status link_info;
2386 struct ice_link_status link_info_old;
2387 uint64_t phy_type_low;
2388 uint64_t phy_type_high;
2389 enum ice_media_type media_type;
2390 uint8_t get_link_info;
2391 /* Please refer to struct ice_aqc_get_link_status_data to get
2392 * detail of enable bit in curr_user_speed_req
2393 */
2394 uint16_t curr_user_speed_req;
2395 enum ice_fec_mode curr_user_fec_req;
2396 enum ice_fc_mode curr_user_fc_req;
2397 struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg;
2398 };
2399
2400 struct ice_port_info {
2401 struct ice_sched_node *root; /* Root Node per Port */
2402 struct ice_hw *hw; /* back pointer to HW instance */
2403 uint32_t last_node_teid; /* scheduler last node info */
2404 uint16_t sw_id; /* Initial switch ID belongs to port */
2405 uint16_t pf_vf_num;
2406 uint8_t port_state;
2407 #define ICE_SCHED_PORT_STATE_INIT 0x0
2408 #define ICE_SCHED_PORT_STATE_READY 0x1
2409 uint8_t lport;
2410 #define ICE_LPORT_MASK 0xff
2411 struct ice_fc_info fc;
2412 struct ice_mac_info mac;
2413 struct ice_phy_info phy;
2414 struct ice_lock sched_lock; /* protect access to TXSched tree */
2415 struct ice_sched_node *
2416 sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
2417 struct ice_bw_type_info root_node_bw_t_info;
2418 struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS];
2419 struct ice_qos_cfg qos_cfg;
2420 uint8_t is_vf:1;
2421 uint8_t is_custom_tx_enabled:1;
2422 };
2423
2424 TAILQ_HEAD(ice_vsi_list_map_head, ice_vsi_list_map_info);
2425
2426 #define ICE_MAX_NUM_PROFILES 256
2427
2428 #define ICE_SW_CFG_MAX_BUF_LEN 2048
2429 #define ICE_MAX_SW 256
2430 #define ICE_DFLT_VSI_INVAL 0xff
2431
2432 #define ICE_VSI_INVAL_ID 0xFFFF
2433 #define ICE_INVAL_Q_HANDLE 0xFFFF
2434
2435 #define ICE_FLTR_RX BIT(0)
2436 #define ICE_FLTR_TX BIT(1)
2437 #define ICE_FLTR_RX_LB BIT(2)
2438 #define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
2439
2440 #define ICE_DUMMY_ETH_HDR_LEN 16
2441
2442 /* VSI context structure for add/get/update/free operations */
2443 struct ice_vsi_ctx {
2444 uint16_t vsi_num;
2445 uint16_t vsis_allocd;
2446 uint16_t vsis_unallocated;
2447 uint16_t flags;
2448 struct ice_aqc_vsi_props info;
2449 struct ice_sched_vsi_info sched;
2450 uint8_t alloc_from_pool;
2451 uint8_t vf_num;
2452 uint16_t num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
2453 struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
2454 uint16_t num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS];
2455 struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
2456 };
2457
2458
2459 struct ice_switch_info {
2460 struct ice_vsi_list_map_head vsi_list_map_head;
2461 struct ice_sw_recipe *recp_list;
2462 uint16_t prof_res_bm_init;
2463 uint16_t max_used_prof_index;
2464
2465 ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
2466 };
2467
2468 TAILQ_HEAD(ice_rl_prof_list_head, ice_aqc_rl_profile_info);
2469 TAILQ_HEAD(ice_agg_list_head, ice_sched_agg_info);
2470
2471 /* BW rate limit profile parameters list entry along
2472 * with bandwidth maintained per layer in port info
2473 */
2474 struct ice_aqc_rl_profile_info {
2475 struct ice_aqc_rl_profile_elem profile;
2476 TAILQ_ENTRY(ice_aqc_rl_profile_info) list_entry;
2477 uint32_t bw; /* requested */
2478 uint16_t prof_id_ref; /* profile ID to node association ref count */
2479 };
2480
2481 /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
2482 struct ice_vsi_list_map_info {
2483 TAILQ_ENTRY(ice_vsi_list_map_info) list_entry;
2484 ice_declare_bitmap(vsi_map, ICE_MAX_VSI);
2485 uint16_t vsi_list_id;
2486 /* counter to track how many rules are reusing this VSI list */
2487 uint16_t ref_cnt;
2488 };
2489
2490 struct ice_adv_lkup_elem {
2491 enum ice_protocol_type type;
2492 union ice_prot_hdr h_u; /* Header values */
2493 union ice_prot_hdr m_u; /* Mask of header values to match */
2494 };
2495
2496 /*
2497 * This structure allows to pass info about lb_en and lan_en
2498 * flags to ice_add_adv_rule. Values in act would be used
2499 * only if act_valid was set to true, otherwise dflt
2500 * values would be used.
2501 */
2502 struct ice_adv_rule_flags_info {
2503 uint32_t act;
2504 uint8_t act_valid; /* indicate if flags in act are valid */
2505 };
2506
2507 enum ice_sw_fwd_act_type {
2508 ICE_FWD_TO_VSI = 0,
2509 ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
2510 ICE_FWD_TO_Q,
2511 ICE_FWD_TO_QGRP,
2512 ICE_DROP_PACKET,
2513 ICE_LG_ACTION,
2514 ICE_INVAL_ACT
2515 };
2516
2517 struct ice_sw_act_ctrl {
2518 /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
2519 uint16_t src;
2520 uint16_t flag;
2521 enum ice_sw_fwd_act_type fltr_act;
2522 /* Depending on filter action */
2523 union {
2524 /* This is a queue ID in case of ICE_FWD_TO_Q and starting
2525 * queue ID in case of ICE_FWD_TO_QGRP.
2526 */
2527 uint16_t q_id:11;
2528 uint16_t vsi_id:10;
2529 uint16_t hw_vsi_id:10;
2530 uint16_t vsi_list_id:10;
2531 } fwd_id;
2532 /* software VSI handle */
2533 uint16_t vsi_handle;
2534 uint8_t qgrp_size;
2535 };
2536
2537 struct ice_adv_rule_info {
2538 enum ice_sw_tunnel_type tun_type;
2539 struct ice_sw_act_ctrl sw_act;
2540 uint32_t priority;
2541 uint8_t rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
2542 uint8_t add_dir_lkup;
2543 uint16_t fltr_rule_id;
2544 uint16_t lg_id;
2545 uint16_t vlan_type;
2546 struct ice_adv_rule_flags_info flags_info;
2547 };
2548
2549 struct ice_adv_fltr_mgmt_list_entry {
2550 TAILQ_ENTRY(ice_adv_fltr_mgmt_list_entry) list_entry;
2551
2552 struct ice_adv_lkup_elem *lkups;
2553 struct ice_adv_rule_info rule_info;
2554 uint16_t lkups_cnt;
2555 struct ice_vsi_list_map_info *vsi_list_info;
2556 uint16_t vsi_count;
2557 };
2558
2559 enum ice_promisc_flags {
2560 ICE_PROMISC_UCAST_RX = 0,
2561 ICE_PROMISC_UCAST_TX,
2562 ICE_PROMISC_MCAST_RX,
2563 ICE_PROMISC_MCAST_TX,
2564 ICE_PROMISC_BCAST_RX,
2565 ICE_PROMISC_BCAST_TX,
2566 ICE_PROMISC_VLAN_RX,
2567 ICE_PROMISC_VLAN_TX,
2568 ICE_PROMISC_UCAST_RX_LB,
2569 /* Max value */
2570 ICE_PROMISC_MAX,
2571 };
2572
2573 /* type of filter src ID */
2574 enum ice_src_id {
2575 ICE_SRC_ID_UNKNOWN = 0,
2576 ICE_SRC_ID_VSI,
2577 ICE_SRC_ID_QUEUE,
2578 ICE_SRC_ID_LPORT,
2579 };
2580
2581 struct ice_fltr_info {
2582 /* Look up information: how to look up packet */
2583 enum ice_sw_lkup_type lkup_type;
2584 /* Forward action: filter action to do after lookup */
2585 enum ice_sw_fwd_act_type fltr_act;
2586 /* rule ID returned by firmware once filter rule is created */
2587 uint16_t fltr_rule_id;
2588 uint16_t flag;
2589
2590 /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
2591 uint16_t src;
2592 enum ice_src_id src_id;
2593
2594 union {
2595 struct {
2596 uint8_t mac_addr[ETHER_ADDR_LEN];
2597 } mac;
2598 struct {
2599 uint8_t mac_addr[ETHER_ADDR_LEN];
2600 uint16_t vlan_id;
2601 } mac_vlan;
2602 struct {
2603 uint16_t vlan_id;
2604 uint16_t tpid;
2605 uint8_t tpid_valid;
2606 } vlan;
2607 /* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
2608 * if just using ethertype as filter. Set lkup_type as
2609 * ICE_SW_LKUP_ETHERTYPE_MAC if MAC also needs to be
2610 * passed in as filter.
2611 */
2612 struct {
2613 uint16_t ethertype;
2614 uint8_t mac_addr[ETHER_ADDR_LEN]; /* optional */
2615 } ethertype_mac;
2616 } l_data; /* Make sure to zero out the memory of l_data before using
2617 * it or only set the data associated with lookup match
2618 * rest everything should be zero
2619 */
2620
2621 /* Depending on filter action */
2622 union {
2623 /* queue ID in case of ICE_FWD_TO_Q and starting
2624 * queue ID in case of ICE_FWD_TO_QGRP.
2625 */
2626 uint16_t q_id:11;
2627 uint16_t hw_vsi_id:10;
2628 uint16_t vsi_list_id:10;
2629 } fwd_id;
2630
2631 /* Sw VSI handle */
2632 uint16_t vsi_handle;
2633
2634 /* Set to num_queues if action is ICE_FWD_TO_QGRP. This field
2635 * determines the range of queues the packet needs to be forwarded to.
2636 * Note that qgrp_size must be set to a power of 2.
2637 */
2638 uint8_t qgrp_size;
2639
2640 /* Rule creations populate these indicators basing on the switch type */
2641 uint8_t lb_en; /* Indicate if packet can be looped back */
2642 uint8_t lan_en; /* Indicate if packet can be forwarded to the uplink */
2643 };
2644
2645 /**
2646 * enum ice_fltr_marker - Marker for syncing OS and driver filter lists
2647 * @ICE_FLTR_NOT_FOUND: initial state, indicates filter has not been found
2648 * @ICE_FLTR_FOUND: set when a filter has been found in both lists
2649 *
2650 * This enumeration is used to help sync an operating system provided filter
2651 * list with the filters previously added.
2652 *
2653 * This is required for FreeBSD because the operating system does not provide
2654 * individual indications of whether a filter has been added or deleted, but
2655 * instead just notifies the driver with the entire new list.
2656 *
2657 * To use this marker state, the driver shall initially reset all filters to
2658 * the ICE_FLTR_NOT_FOUND state. Then, for each filter in the OS list, it
2659 * shall search the driver list for the filter. If found, the filter state
2660 * will be set to ICE_FLTR_FOUND. If not found, that filter will be added.
2661 * Finally, the driver shall search the internal filter list for all filters
2662 * still marked as ICE_FLTR_NOT_FOUND and remove them.
2663 */
2664 enum ice_fltr_marker {
2665 ICE_FLTR_NOT_FOUND,
2666 ICE_FLTR_FOUND,
2667 };
2668
2669 struct ice_fltr_list_entry {
2670 TAILQ_ENTRY(ice_fltr_list_entry) list_entry;
2671 enum ice_status status;
2672 struct ice_fltr_info fltr_info;
2673 };
2674
2675 /* This defines an entry in the list that maintains MAC or VLAN membership
2676 * to HW list mapping, since multiple VSIs can subscribe to the same MAC or
2677 * VLAN. As an optimization the VSI list should be created only when a
2678 * second VSI becomes a subscriber to the same MAC address. VSI lists are always
2679 * used for VLAN membership.
2680 */
2681 struct ice_fltr_mgmt_list_entry {
2682 /* back pointer to VSI list ID to VSI list mapping */
2683 struct ice_vsi_list_map_info *vsi_list_info;
2684 uint16_t vsi_count;
2685 #define ICE_INVAL_LG_ACT_INDEX 0xffff
2686 uint16_t lg_act_idx;
2687 #define ICE_INVAL_SW_MARKER_ID 0xffff
2688 uint16_t sw_marker_id;
2689 TAILQ_ENTRY(ice_fltr_mgmt_list_entry) list_entry;
2690 struct ice_fltr_info fltr_info;
2691 #define ICE_INVAL_COUNTER_ID 0xff
2692 uint8_t counter_index;
2693 enum ice_fltr_marker marker;
2694 };
2695
2696
2697 #define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((uint32_t)((~0ULL) << (32 - (prefix))))
2698 #define ICE_FLOW_PROF_ID_INVAL 0xfffffffffffffffful
2699 #define ICE_FLOW_PROF_ID_BYPASS 0
2700 #define ICE_FLOW_PROF_ID_DEFAULT 1
2701 #define ICE_FLOW_ENTRY_HANDLE_INVAL 0
2702 #define ICE_FLOW_VSI_INVAL 0xffff
2703 #define ICE_FLOW_FLD_OFF_INVAL 0xffff
2704
2705 /* Generate flow hash field from flow field type(s) */
2706 #define ICE_FLOW_HASH_IPV4 \
2707 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
2708 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
2709 #define ICE_FLOW_HASH_IPV6 \
2710 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
2711 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
2712 #define ICE_FLOW_HASH_TCP_PORT \
2713 (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
2714 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
2715 #define ICE_FLOW_HASH_UDP_PORT \
2716 (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
2717 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
2718 #define ICE_FLOW_HASH_SCTP_PORT \
2719 (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
2720 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
2721
2722 #define ICE_HASH_INVALID 0
2723 #define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
2724 #define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
2725 #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
2726 #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
2727 #define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
2728 #define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
2729
2730 /* Protocol header fields within a packet segment. A segment consists of one or
2731 * more protocol headers that make up a logical group of protocol headers. Each
2732 * logical group of protocol headers encapsulates or is encapsulated using/by
2733 * tunneling or encapsulation protocols for network virtualization such as GRE,
2734 * VxLAN, etc.
2735 */
2736 enum ice_flow_seg_hdr {
2737 ICE_FLOW_SEG_HDR_NONE = 0x00000000,
2738 ICE_FLOW_SEG_HDR_ETH = 0x00000001,
2739 ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
2740 ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
2741 ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
2742 ICE_FLOW_SEG_HDR_ARP = 0x00000010,
2743 ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
2744 ICE_FLOW_SEG_HDR_TCP = 0x00000040,
2745 ICE_FLOW_SEG_HDR_UDP = 0x00000080,
2746 ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
2747 ICE_FLOW_SEG_HDR_GRE = 0x00000200,
2748 /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
2749 * ICE_FLOW_SEG_HDR_IPV6.
2750 */
2751 ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000,
2752 ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000,
2753 };
2754
2755 enum ice_flow_field {
2756 /* L2 */
2757 ICE_FLOW_FIELD_IDX_ETH_DA,
2758 ICE_FLOW_FIELD_IDX_ETH_SA,
2759 ICE_FLOW_FIELD_IDX_S_VLAN,
2760 ICE_FLOW_FIELD_IDX_C_VLAN,
2761 ICE_FLOW_FIELD_IDX_ETH_TYPE,
2762 /* L3 */
2763 ICE_FLOW_FIELD_IDX_IPV4_DSCP,
2764 ICE_FLOW_FIELD_IDX_IPV6_DSCP,
2765 ICE_FLOW_FIELD_IDX_IPV4_TTL,
2766 ICE_FLOW_FIELD_IDX_IPV4_PROT,
2767 ICE_FLOW_FIELD_IDX_IPV6_TTL,
2768 ICE_FLOW_FIELD_IDX_IPV6_PROT,
2769 ICE_FLOW_FIELD_IDX_IPV4_SA,
2770 ICE_FLOW_FIELD_IDX_IPV4_DA,
2771 ICE_FLOW_FIELD_IDX_IPV6_SA,
2772 ICE_FLOW_FIELD_IDX_IPV6_DA,
2773 /* L4 */
2774 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
2775 ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
2776 ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
2777 ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
2778 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
2779 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
2780 ICE_FLOW_FIELD_IDX_TCP_FLAGS,
2781 /* ARP */
2782 ICE_FLOW_FIELD_IDX_ARP_SIP,
2783 ICE_FLOW_FIELD_IDX_ARP_DIP,
2784 ICE_FLOW_FIELD_IDX_ARP_SHA,
2785 ICE_FLOW_FIELD_IDX_ARP_DHA,
2786 ICE_FLOW_FIELD_IDX_ARP_OP,
2787 /* ICMP */
2788 ICE_FLOW_FIELD_IDX_ICMP_TYPE,
2789 ICE_FLOW_FIELD_IDX_ICMP_CODE,
2790 /* GRE */
2791 ICE_FLOW_FIELD_IDX_GRE_KEYID,
2792 /* The total number of enums must not exceed 64 */
2793 ICE_FLOW_FIELD_IDX_MAX
2794 };
2795
2796 /* Flow headers and fields for AVF support */
2797 enum ice_flow_avf_hdr_field {
2798 /* Values 0 - 28 are reserved for future use */
2799 ICE_AVF_FLOW_FIELD_INVALID = 0,
2800 ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
2801 ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
2802 ICE_AVF_FLOW_FIELD_IPV4_UDP,
2803 ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
2804 ICE_AVF_FLOW_FIELD_IPV4_TCP,
2805 ICE_AVF_FLOW_FIELD_IPV4_SCTP,
2806 ICE_AVF_FLOW_FIELD_IPV4_OTHER,
2807 ICE_AVF_FLOW_FIELD_FRAG_IPV4,
2808 /* Values 37-38 are reserved */
2809 ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
2810 ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
2811 ICE_AVF_FLOW_FIELD_IPV6_UDP,
2812 ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
2813 ICE_AVF_FLOW_FIELD_IPV6_TCP,
2814 ICE_AVF_FLOW_FIELD_IPV6_SCTP,
2815 ICE_AVF_FLOW_FIELD_IPV6_OTHER,
2816 ICE_AVF_FLOW_FIELD_FRAG_IPV6,
2817 ICE_AVF_FLOW_FIELD_RSVD47,
2818 ICE_AVF_FLOW_FIELD_FCOE_OX,
2819 ICE_AVF_FLOW_FIELD_FCOE_RX,
2820 ICE_AVF_FLOW_FIELD_FCOE_OTHER,
2821 /* Values 51-62 are reserved */
2822 ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
2823 ICE_AVF_FLOW_FIELD_MAX
2824 };
2825
2826 /* Supported RSS offloads This macro is defined to support
2827 * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
2828 * capabilities to the caller of this ops.
2829 */
2830 #define ICE_DEFAULT_RSS_HENA ( \
2831 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
2832 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
2833 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
2834 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
2835 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
2836 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
2837 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
2838 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
2839 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
2840 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
2841 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
2842 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
2843 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
2844 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
2845 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
2846 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
2847
2848 enum ice_rss_cfg_hdr_type {
2849 ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
2850 ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
2851 /* take inner headers as inputset for packet with outer IPv4. */
2852 ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
2853 /* take inner headers as inputset for packet with outer IPv6. */
2854 ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
2855 /* take outer headers first then inner headers as inputset */
2856 /* take inner as inputset for GTPoGRE with outer IPv4 + GRE. */
2857 ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE,
2858 /* take inner as inputset for GTPoGRE with outer IPv6 + GRE. */
2859 ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE,
2860 ICE_RSS_ANY_HEADERS
2861 };
2862
2863 struct ice_rss_hash_cfg {
2864 uint32_t addl_hdrs; /* protocol header fields */
2865 uint64_t hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
2866 enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
2867 bool symm; /* symmetric or asymmetric hash */
2868 };
2869
2870 enum ice_flow_dir {
2871 ICE_FLOW_DIR_UNDEFINED = 0,
2872 ICE_FLOW_TX = 0x01,
2873 ICE_FLOW_RX = 0x02,
2874 ICE_FLOW_TX_RX = ICE_FLOW_RX | ICE_FLOW_TX
2875 };
2876
2877 enum ice_flow_priority {
2878 ICE_FLOW_PRIO_LOW,
2879 ICE_FLOW_PRIO_NORMAL,
2880 ICE_FLOW_PRIO_HIGH
2881 };
2882
2883 #define ICE_FLOW_SEG_SINGLE 1
2884 #define ICE_FLOW_SEG_MAX 2
2885 #define ICE_FLOW_PROFILE_MAX 1024
2886 #define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32
2887 #define ICE_FLOW_FV_EXTRACT_SZ 2
2888
2889 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (uint32_t)(val))
2890
2891 struct ice_flow_seg_xtrct {
2892 uint8_t prot_id; /* Protocol ID of extracted header field */
2893 uint16_t off; /* Starting offset of the field in header in bytes */
2894 uint8_t idx; /* Index of FV entry used */
2895 uint8_t disp; /* Displacement of field in bits fr. FV entry's start */
2896 };
2897
2898 enum ice_flow_fld_match_type {
2899 ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
2900 ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
2901 ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
2902 ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
2903 };
2904
2905 struct ice_flow_fld_loc {
2906 /* Describe offsets of field information relative to the beginning of
2907 * input buffer provided when adding flow entries.
2908 */
2909 uint16_t val; /* Offset where the value is located */
2910 uint16_t mask; /* Offset where the mask/prefix value is located */
2911 uint16_t last; /* Length or offset where the upper value is located */
2912 };
2913
2914 struct ice_flow_fld_info {
2915 enum ice_flow_fld_match_type type;
2916 /* Location where to retrieve data from an input buffer */
2917 struct ice_flow_fld_loc src;
2918 /* Location where to put the data into the final entry buffer */
2919 struct ice_flow_fld_loc entry;
2920 struct ice_flow_seg_xtrct xtrct;
2921 };
2922
2923 struct ice_flow_seg_info {
2924 uint32_t hdrs; /* Bitmask indicating protocol headers present */
2925 /* Bitmask indicating header fields to be matched */
2926 ice_declare_bitmap(match, ICE_FLOW_FIELD_IDX_MAX);
2927 /* Bitmask indicating header fields matched as ranges */
2928 ice_declare_bitmap(range, ICE_FLOW_FIELD_IDX_MAX);
2929
2930 struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
2931 };
2932
2933 #define ICE_FLOW_ENTRY_HNDL(e) ((uint64_t)e)
2934
2935 struct ice_flow_prof {
2936 TAILQ_ENTRY(ice_flow_prof) l_entry;
2937
2938 uint64_t id;
2939 enum ice_flow_dir dir;
2940 uint8_t segs_cnt;
2941
2942 struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
2943
2944 /* software VSI handles referenced by this flow profile */
2945 ice_declare_bitmap(vsis, ICE_MAX_VSI);
2946
2947 union {
2948 /* struct sw_recipe */
2949 bool symm; /* Symmetric Hash for RSS */
2950 } cfg;
2951 };
2952
2953 struct ice_rss_cfg {
2954 TAILQ_ENTRY(ice_rss_cfg) l_entry;
2955 /* bitmap of VSIs added to the RSS entry */
2956 ice_declare_bitmap(vsis, ICE_MAX_VSI);
2957 struct ice_rss_hash_cfg hash;
2958 };
2959
2960 TAILQ_HEAD(ice_rss_cfg_head, ice_rss_cfg);
2961
2962 enum ice_flow_action_type {
2963 ICE_FLOW_ACT_NOP,
2964 ICE_FLOW_ACT_ALLOW,
2965 ICE_FLOW_ACT_DROP,
2966 ICE_FLOW_ACT_CNTR_PKT,
2967 ICE_FLOW_ACT_FWD_VSI,
2968 ICE_FLOW_ACT_FWD_VSI_LIST, /* Should be abstracted away */
2969 ICE_FLOW_ACT_FWD_QUEUE, /* Can Queues be abstracted away? */
2970 ICE_FLOW_ACT_FWD_QUEUE_GROUP, /* Can Queues be abstracted away? */
2971 ICE_FLOW_ACT_PUSH,
2972 ICE_FLOW_ACT_POP,
2973 ICE_FLOW_ACT_MODIFY,
2974 ICE_FLOW_ACT_CNTR_BYTES,
2975 ICE_FLOW_ACT_CNTR_PKT_BYTES,
2976 ICE_FLOW_ACT_GENERIC_0,
2977 ICE_FLOW_ACT_GENERIC_1,
2978 ICE_FLOW_ACT_GENERIC_2,
2979 ICE_FLOW_ACT_GENERIC_3,
2980 ICE_FLOW_ACT_GENERIC_4,
2981 ICE_FLOW_ACT_RPT_FLOW_ID,
2982 ICE_FLOW_ACT_BUILD_PROF_IDX,
2983 };
2984
2985 struct ice_flow_action {
2986 enum ice_flow_action_type type;
2987 union {
2988 uint32_t dummy;
2989 } data;
2990 };
2991
2992 TAILQ_HEAD(ice_recp_grp_entry_head, ice_recp_grp_entry);
2993 TAILQ_HEAD(ice_fltr_list_head, ice_fltr_list_entry);
2994 TAILQ_HEAD(ice_fltr_mgmt_list_head, ice_fltr_mgmt_list_entry);
2995 TAILQ_HEAD(ice_adv_fltr_mgmt_list_head, ice_adv_fltr_mgmt_list_entry);
2996
2997 /* Package minimal version supported */
2998 #define ICE_PKG_SUPP_VER_MAJ 1
2999 #define ICE_PKG_SUPP_VER_MNR 3
3000
3001 /* Package format version */
3002 #define ICE_PKG_FMT_VER_MAJ 1
3003 #define ICE_PKG_FMT_VER_MNR 0
3004 #define ICE_PKG_FMT_VER_UPD 0
3005 #define ICE_PKG_FMT_VER_DFT 0
3006
3007 #define ICE_PKG_CNT 4
3008
3009 enum ice_ddp_state {
3010 /* Indicates that this call to ice_init_pkg
3011 * successfully loaded the requested DDP package
3012 */
3013 ICE_DDP_PKG_SUCCESS = 0,
3014
3015 /* Generic error for already loaded errors, it is mapped later to
3016 * the more specific one (one of the next 3)
3017 */
3018 ICE_DDP_PKG_ALREADY_LOADED = -1,
3019
3020 /* Indicates that a DDP package of the same version has already been
3021 * loaded onto the device by a previous call or by another PF
3022 */
3023 ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
3024
3025 /* The device has a DDP package that is not supported by the driver */
3026 ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
3027
3028 /* The device has a compatible package
3029 * (but different from the request) already loaded
3030 */
3031 ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
3032
3033 /* The firmware loaded on the device is not compatible with
3034 * the DDP package loaded
3035 */
3036 ICE_DDP_PKG_FW_MISMATCH = -5,
3037
3038 /* The DDP package file is invalid */
3039 ICE_DDP_PKG_INVALID_FILE = -6,
3040
3041 /* The version of the DDP package provided is higher than
3042 * the driver supports
3043 */
3044 ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
3045
3046 /* The version of the DDP package provided is lower than the
3047 * driver supports
3048 */
3049 ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
3050
3051 /* Missing security manifest in DDP pkg */
3052 ICE_DDP_PKG_NO_SEC_MANIFEST = -9,
3053
3054 /* The RSA signature of the DDP package file provided is invalid */
3055 ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10,
3056
3057 /* The DDP package file security revision is too low and not
3058 * supported by firmware
3059 */
3060 ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11,
3061
3062 /* Manifest hash mismatch */
3063 ICE_DDP_PKG_MANIFEST_INVALID = -12,
3064
3065 /* Buffer hash mismatches manifest */
3066 ICE_DDP_PKG_BUFFER_INVALID = -13,
3067
3068 /* Other errors */
3069 ICE_DDP_PKG_ERR = -14,
3070 };
3071
3072
3073 /* ice package section IDs */
3074 #define ICE_SID_METADATA 1
3075 #define ICE_SID_XLT0_SW 10
3076 #define ICE_SID_XLT_KEY_BUILDER_SW 11
3077 #define ICE_SID_XLT1_SW 12
3078 #define ICE_SID_XLT2_SW 13
3079 #define ICE_SID_PROFID_TCAM_SW 14
3080 #define ICE_SID_PROFID_REDIR_SW 15
3081 #define ICE_SID_FLD_VEC_SW 16
3082 #define ICE_SID_CDID_KEY_BUILDER_SW 17
3083 #define ICE_SID_CDID_REDIR_SW 18
3084
3085 #define ICE_SID_XLT0_ACL 20
3086 #define ICE_SID_XLT_KEY_BUILDER_ACL 21
3087 #define ICE_SID_XLT1_ACL 22
3088 #define ICE_SID_XLT2_ACL 23
3089 #define ICE_SID_PROFID_TCAM_ACL 24
3090 #define ICE_SID_PROFID_REDIR_ACL 25
3091 #define ICE_SID_FLD_VEC_ACL 26
3092 #define ICE_SID_CDID_KEY_BUILDER_ACL 27
3093 #define ICE_SID_CDID_REDIR_ACL 28
3094
3095 #define ICE_SID_XLT0_FD 30
3096 #define ICE_SID_XLT_KEY_BUILDER_FD 31
3097 #define ICE_SID_XLT1_FD 32
3098 #define ICE_SID_XLT2_FD 33
3099 #define ICE_SID_PROFID_TCAM_FD 34
3100 #define ICE_SID_PROFID_REDIR_FD 35
3101 #define ICE_SID_FLD_VEC_FD 36
3102 #define ICE_SID_CDID_KEY_BUILDER_FD 37
3103 #define ICE_SID_CDID_REDIR_FD 38
3104
3105 #define ICE_SID_XLT0_RSS 40
3106 #define ICE_SID_XLT_KEY_BUILDER_RSS 41
3107 #define ICE_SID_XLT1_RSS 42
3108 #define ICE_SID_XLT2_RSS 43
3109 #define ICE_SID_PROFID_TCAM_RSS 44
3110 #define ICE_SID_PROFID_REDIR_RSS 45
3111 #define ICE_SID_FLD_VEC_RSS 46
3112 #define ICE_SID_CDID_KEY_BUILDER_RSS 47
3113 #define ICE_SID_CDID_REDIR_RSS 48
3114
3115 #define ICE_SID_RXPARSER_CAM 50
3116 #define ICE_SID_RXPARSER_NOMATCH_CAM 51
3117 #define ICE_SID_RXPARSER_IMEM 52
3118 #define ICE_SID_RXPARSER_XLT0_BUILDER 53
3119 #define ICE_SID_RXPARSER_NODE_PTYPE 54
3120 #define ICE_SID_RXPARSER_MARKER_PTYPE 55
3121 #define ICE_SID_RXPARSER_BOOST_TCAM 56
3122 #define ICE_SID_RXPARSER_PROTO_GRP 57
3123 #define ICE_SID_RXPARSER_METADATA_INIT 58
3124 #define ICE_SID_RXPARSER_XLT0 59
3125
3126 #define ICE_SID_TXPARSER_CAM 60
3127 #define ICE_SID_TXPARSER_NOMATCH_CAM 61
3128 #define ICE_SID_TXPARSER_IMEM 62
3129 #define ICE_SID_TXPARSER_XLT0_BUILDER 63
3130 #define ICE_SID_TXPARSER_NODE_PTYPE 64
3131 #define ICE_SID_TXPARSER_MARKER_PTYPE 65
3132 #define ICE_SID_TXPARSER_BOOST_TCAM 66
3133 #define ICE_SID_TXPARSER_PROTO_GRP 67
3134 #define ICE_SID_TXPARSER_METADATA_INIT 68
3135 #define ICE_SID_TXPARSER_XLT0 69
3136
3137 #define ICE_SID_RXPARSER_INIT_REDIR 70
3138 #define ICE_SID_TXPARSER_INIT_REDIR 71
3139 #define ICE_SID_RXPARSER_MARKER_GRP 72
3140 #define ICE_SID_TXPARSER_MARKER_GRP 73
3141 #define ICE_SID_RXPARSER_LAST_PROTO 74
3142 #define ICE_SID_TXPARSER_LAST_PROTO 75
3143 #define ICE_SID_RXPARSER_PG_SPILL 76
3144 #define ICE_SID_TXPARSER_PG_SPILL 77
3145 #define ICE_SID_RXPARSER_NOMATCH_SPILL 78
3146 #define ICE_SID_TXPARSER_NOMATCH_SPILL 79
3147
3148 #define ICE_SID_XLT0_PE 80
3149 #define ICE_SID_XLT_KEY_BUILDER_PE 81
3150 #define ICE_SID_XLT1_PE 82
3151 #define ICE_SID_XLT2_PE 83
3152 #define ICE_SID_PROFID_TCAM_PE 84
3153 #define ICE_SID_PROFID_REDIR_PE 85
3154 #define ICE_SID_FLD_VEC_PE 86
3155 #define ICE_SID_CDID_KEY_BUILDER_PE 87
3156 #define ICE_SID_CDID_REDIR_PE 88
3157
3158 #define ICE_SID_RXPARSER_FLAG_REDIR 97
3159
3160 /* Label Metadata section IDs */
3161 #define ICE_SID_LBL_FIRST 0x80000010
3162 #define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
3163 #define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
3164 #define ICE_SID_LBL_RESERVED_12 0x80000012
3165 #define ICE_SID_LBL_RESERVED_13 0x80000013
3166 #define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
3167 #define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
3168 #define ICE_SID_LBL_PTYPE 0x80000016
3169 #define ICE_SID_LBL_PROTOCOL_ID 0x80000017
3170 #define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
3171 #define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
3172 #define ICE_SID_LBL_RXPARSER_PG 0x8000001A
3173 #define ICE_SID_LBL_TXPARSER_PG 0x8000001B
3174 #define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
3175 #define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
3176 #define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
3177 #define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
3178 #define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
3179 #define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
3180 #define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
3181 #define ICE_SID_LBL_FLAG 0x80000023
3182 #define ICE_SID_LBL_REG 0x80000024
3183 #define ICE_SID_LBL_SW_PTG 0x80000025
3184 #define ICE_SID_LBL_ACL_PTG 0x80000026
3185 #define ICE_SID_LBL_PE_PTG 0x80000027
3186 #define ICE_SID_LBL_RSS_PTG 0x80000028
3187 #define ICE_SID_LBL_FD_PTG 0x80000029
3188 #define ICE_SID_LBL_SW_VSIG 0x8000002A
3189 #define ICE_SID_LBL_ACL_VSIG 0x8000002B
3190 #define ICE_SID_LBL_PE_VSIG 0x8000002C
3191 #define ICE_SID_LBL_RSS_VSIG 0x8000002D
3192 #define ICE_SID_LBL_FD_VSIG 0x8000002E
3193 #define ICE_SID_LBL_PTYPE_META 0x8000002F
3194 #define ICE_SID_LBL_SW_PROFID 0x80000030
3195 #define ICE_SID_LBL_ACL_PROFID 0x80000031
3196 #define ICE_SID_LBL_PE_PROFID 0x80000032
3197 #define ICE_SID_LBL_RSS_PROFID 0x80000033
3198 #define ICE_SID_LBL_FD_PROFID 0x80000034
3199 #define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
3200 #define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
3201 #define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
3202 #define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
3203 /* The following define MUST be updated to reflect the last label section ID */
3204 #define ICE_SID_LBL_LAST 0x80000038
3205
3206 /* Label ICE runtime configuration section IDs */
3207 #define ICE_SID_TX_5_LAYER_TOPO 0x10
3208
3209 enum ice_block {
3210 ICE_BLK_SW = 0,
3211 ICE_BLK_ACL,
3212 ICE_BLK_FD,
3213 ICE_BLK_RSS,
3214 ICE_BLK_PE,
3215 ICE_BLK_COUNT
3216 };
3217
3218 /* Tunnel enabling */
3219
3220 enum ice_tunnel_type {
3221 TNL_VXLAN = 0,
3222 TNL_GENEVE,
3223 TNL_GRETAP,
3224 TNL_GTP,
3225 TNL_GTPC,
3226 TNL_GTPU,
3227 TNL_LAST = 0xFF,
3228 TNL_ALL = 0xFF,
3229 };
3230
3231 struct ice_tunnel_type_scan {
3232 enum ice_tunnel_type type;
3233 const char *label_prefix;
3234 };
3235
3236 struct ice_tunnel_entry {
3237 enum ice_tunnel_type type;
3238 uint16_t boost_addr;
3239 uint16_t port;
3240 uint16_t ref;
3241 struct ice_boost_tcam_entry *boost_entry;
3242 uint8_t valid;
3243 uint8_t in_use;
3244 uint8_t marked;
3245 };
3246
3247 #define ICE_TUNNEL_MAX_ENTRIES 16
3248
3249 struct ice_tunnel_table {
3250 struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
3251 uint16_t count;
3252 };
3253
3254 struct ice_pkg_es {
3255 uint16_t count;
3256 uint16_t offset;
3257 struct ice_fv_word es[STRUCT_HACK_VAR_LEN];
3258 };
3259
3260 TAILQ_HEAD(ice_prof_map_head, ice_prof_map);
3261
3262 struct ice_es {
3263 uint32_t sid;
3264 uint16_t count;
3265 uint16_t fvw;
3266 uint16_t *ref_count;
3267 struct ice_prof_map_head prof_map;
3268 struct ice_fv_word *t;
3269 struct ice_lock prof_map_lock; /* protect access to profiles list */
3270 uint8_t *written;
3271 uint8_t reverse; /* set to true to reverse FV order */
3272 };
3273
3274 /* PTYPE Group management */
3275
3276 /* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type
3277 * group (PTG) ID as output.
3278 *
3279 * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE
3280 * are a part of this group until moved to a new PTG.
3281 */
3282 #define ICE_DEFAULT_PTG 0
3283
3284 struct ice_ptg_entry {
3285 struct ice_ptg_ptype *first_ptype;
3286 uint8_t in_use;
3287 };
3288
3289 struct ice_ptg_ptype {
3290 struct ice_ptg_ptype *next_ptype;
3291 uint8_t ptg;
3292 };
3293
3294 #define ICE_MAX_TCAM_PER_PROFILE 32
3295 #define ICE_MAX_PTG_PER_PROFILE 32
3296
3297 struct ice_prof_map {
3298 TAILQ_ENTRY(ice_prof_map) list;
3299 uint64_t profile_cookie;
3300 uint64_t context;
3301 uint8_t prof_id;
3302 uint8_t ptg_cnt;
3303 uint8_t ptg[ICE_MAX_PTG_PER_PROFILE];
3304 };
3305
3306 #define ICE_INVALID_TCAM 0xFFFF
3307
3308 struct ice_tcam_inf {
3309 uint16_t tcam_idx;
3310 uint8_t ptg;
3311 uint8_t prof_id;
3312 uint8_t in_use;
3313 };
3314
3315 struct ice_vsig_prof {
3316 TAILQ_ENTRY(ice_vsig_prof) list;
3317 uint64_t profile_cookie;
3318 uint8_t prof_id;
3319 uint8_t tcam_count;
3320 struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
3321 };
3322
3323 TAILQ_HEAD(ice_vsig_prof_head, ice_vsig_prof);
3324
3325 struct ice_vsig_entry {
3326 struct ice_vsig_prof_head prop_lst;
3327 struct ice_vsig_vsi *first_vsi;
3328 uint8_t in_use;
3329 };
3330
3331 struct ice_vsig_vsi {
3332 struct ice_vsig_vsi *next_vsi;
3333 uint32_t prop_mask;
3334 uint16_t changed;
3335 uint16_t vsig;
3336 };
3337
3338 #define ICE_XLT1_CNT 1024
3339 #define ICE_MAX_PTGS 256
3340
3341 /* XLT1 Table */
3342 struct ice_xlt1 {
3343 struct ice_ptg_entry *ptg_tbl;
3344 struct ice_ptg_ptype *ptypes;
3345 uint8_t *t;
3346 uint32_t sid;
3347 uint16_t count;
3348 };
3349
3350
3351 #define ICE_XLT2_CNT 768
3352 #define ICE_MAX_VSIGS 768
3353
3354 /* VSIG bit layout:
3355 * [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS
3356 * [13:15]: PF number of device
3357 */
3358 #define ICE_VSIG_IDX_M (0x1FFF)
3359 #define ICE_PF_NUM_S 13
3360 #define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S)
3361 #define ICE_VSIG_VALUE(vsig, pf_id) \
3362 ((uint16_t)((((uint16_t)(vsig)) & ICE_VSIG_IDX_M) | \
3363 (((uint16_t)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)))
3364 #define ICE_DEFAULT_VSIG 0
3365
3366 /* XLT2 Table */
3367 struct ice_xlt2 {
3368 struct ice_vsig_entry *vsig_tbl;
3369 struct ice_vsig_vsi *vsis;
3370 uint16_t *t;
3371 uint32_t sid;
3372 uint16_t count;
3373 };
3374
3375 /* Extraction sequence - list of match fields:
3376 * protocol ID, offset, profile length
3377 */
3378 union ice_match_fld {
3379 struct {
3380 uint8_t prot_id;
3381 uint8_t offset;
3382 uint8_t length;
3383 uint8_t reserved; /* must be zero */
3384 } fld;
3385 uint32_t val;
3386 };
3387
3388 #define ICE_MATCH_LIST_SZ 20
3389 #pragma pack(1)
3390 struct ice_match {
3391 uint8_t count;
3392 union ice_match_fld list[ICE_MATCH_LIST_SZ];
3393 };
3394
3395 /* Profile ID Management */
3396 struct ice_prof_id_key {
3397 uint16_t flags;
3398 uint8_t xlt1;
3399 uint16_t xlt2_cdid;
3400 };
3401
3402 /* Keys are made up of two values, each one-half the size of the key.
3403 * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
3404 */
3405 #define ICE_TCAM_KEY_VAL_SZ 5
3406 #define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ)
3407
3408 struct ice_prof_tcam_entry {
3409 uint16_t addr;
3410 uint8_t key[ICE_TCAM_KEY_SZ];
3411 uint8_t prof_id;
3412 };
3413 #pragma pack()
3414
3415 struct ice_prof_id_section {
3416 uint16_t count;
3417 struct ice_prof_tcam_entry entry[STRUCT_HACK_VAR_LEN];
3418 };
3419
3420 struct ice_prof_tcam {
3421 uint32_t sid;
3422 uint16_t count;
3423 uint16_t max_prof_id;
3424 struct ice_prof_tcam_entry *t;
3425 uint8_t cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */
3426 };
3427
3428 enum ice_chg_type {
3429 ICE_TCAM_NONE = 0,
3430 ICE_PTG_ES_ADD,
3431 ICE_TCAM_ADD,
3432 ICE_VSIG_ADD,
3433 ICE_VSIG_REM,
3434 ICE_VSI_MOVE,
3435 };
3436
3437 TAILQ_HEAD(ice_chs_chg_head, ice_chs_chg);
3438
3439 struct ice_chs_chg {
3440 TAILQ_ENTRY(ice_chs_chg) list_entry;
3441 enum ice_chg_type type;
3442
3443 uint8_t add_ptg;
3444 uint8_t add_vsig;
3445 uint8_t add_tcam_idx;
3446 uint8_t add_prof;
3447 uint16_t ptype;
3448 uint8_t ptg;
3449 uint8_t prof_id;
3450 uint16_t vsi;
3451 uint16_t vsig;
3452 uint16_t orig_vsig;
3453 uint16_t tcam_idx;
3454 };
3455
3456 #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
3457
3458 struct ice_prof_redir {
3459 uint8_t *t;
3460 uint32_t sid;
3461 uint16_t count;
3462 };
3463
3464 /* Tables per block */
3465 struct ice_blk_info {
3466 struct ice_xlt1 xlt1;
3467 struct ice_xlt2 xlt2;
3468 struct ice_prof_tcam prof;
3469 struct ice_prof_redir prof_redir;
3470 struct ice_es es;
3471 uint8_t overwrite; /* set to true to allow overwrite of table entries */
3472 uint8_t is_list_init;
3473 };
3474
3475
3476 struct ice_sw_recipe {
3477 /* For a chained recipe the root recipe is what should be used for
3478 * programming rules
3479 */
3480 uint8_t is_root;
3481 uint8_t root_rid;
3482 uint8_t recp_created;
3483
3484 /* Number of extraction words */
3485 uint8_t n_ext_words;
3486 /* Protocol ID and Offset pair (extraction word) to describe the
3487 * recipe
3488 */
3489 struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
3490 uint16_t word_masks[ICE_MAX_CHAIN_WORDS];
3491
3492 /* if this recipe is a collection of other recipe */
3493 uint8_t big_recp;
3494
3495 /* if this recipe is part of another bigger recipe then chain index
3496 * corresponding to this recipe
3497 */
3498 uint8_t chain_idx;
3499
3500 /* if this recipe is a collection of other recipe then count of other
3501 * recipes and recipe IDs of those recipes
3502 */
3503 uint8_t n_grp_count;
3504
3505 /* Bit map specifying the IDs associated with this group of recipe */
3506 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
3507 #if 0
3508 enum ice_sw_tunnel_type tun_type;
3509 #endif
3510 /* List of type ice_fltr_mgmt_list_entry or adv_rule */
3511 uint8_t adv_rule;
3512 struct ice_fltr_mgmt_list_head filt_rules;
3513 struct ice_adv_fltr_mgmt_list_head adv_filt_rules;
3514 struct ice_fltr_mgmt_list_head filt_replay_rules;
3515 struct ice_lock filt_rule_lock; /* protect filter rule structure */
3516 #if 0
3517 /* Profiles this recipe should be associated with */
3518 struct LIST_HEAD_TYPE fv_list;
3519 #endif
3520 /* Profiles this recipe is associated with */
3521 uint8_t num_profs, *prof_ids;
3522
3523 /* Bit map for possible result indexes */
3524 ice_declare_bitmap(res_idxs, ICE_MAX_FV_WORDS);
3525
3526 /* This allows user to specify the recipe priority.
3527 * For now, this becomes 'fwd_priority' when recipe
3528 * is created, usually recipes can have 'fwd' and 'join'
3529 * priority.
3530 */
3531 uint8_t priority;
3532
3533 struct ice_recp_grp_entry_head rg_list;
3534
3535 /* AQ buffer associated with this recipe */
3536 struct ice_aqc_recipe_data_elem *root_buf;
3537 #if 0
3538 /* This struct saves the fv_words for a given lookup */
3539 struct ice_prot_lkup_ext lkup_exts;
3540 #endif
3541 };
3542
3543 TAILQ_HEAD(ice_flow_prof_head, ice_flow_prof);
3544
3545 /* Port hardware description */
3546 struct ice_hw {
3547 struct ice_softc *hw_sc;
3548 #if 0
3549 uint8_t *hw_addr;
3550 void *back;
3551 #endif
3552 struct ice_aqc_layer_props *layer_info;
3553 struct ice_port_info *port_info;
3554 #if 0
3555 /* 2D Array for each Tx Sched RL Profile type */
3556 struct ice_sched_rl_profile **cir_profiles;
3557 struct ice_sched_rl_profile **eir_profiles;
3558 struct ice_sched_rl_profile **srl_profiles;
3559 #endif
3560 /* PSM clock frequency for calculating RL profile params */
3561 uint32_t psm_clk_freq;
3562 enum ice_mac_type mac_type;
3563 #if 0
3564 /* pci info */
3565 uint16_t device_id;
3566 uint16_t vendor_id;
3567 uint16_t subsystem_device_id;
3568 uint16_t subsystem_vendor_id;
3569 uint8_t revision_id;
3570 #endif
3571 uint8_t pf_id; /* device profile info */
3572 #if 0
3573 enum ice_phy_model phy_model;
3574 uint8_t phy_ports;
3575 uint8_t max_phy_port;
3576
3577 #endif
3578 uint16_t max_burst_size; /* driver sets this value */
3579
3580 /* Tx Scheduler values */
3581 uint8_t num_tx_sched_layers;
3582 uint8_t num_tx_sched_phys_layers;
3583 uint8_t flattened_layers;
3584 uint8_t max_cgds;
3585 uint8_t sw_entry_point_layer;
3586 uint16_t max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
3587 struct ice_agg_list_head agg_list; /* lists all aggregator */
3588 /* List contain profile ID(s) and other params per layer */
3589 struct ice_rl_prof_list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
3590 struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
3591 uint8_t evb_veb; /* true for VEB, false for VEPA */
3592 uint8_t reset_ongoing; /* true if HW is in reset, false otherwise */
3593 #if 0
3594 struct ice_bus_info bus;
3595 #endif
3596 struct ice_flash_info flash;
3597 struct ice_hw_dev_caps dev_caps; /* device capabilities */
3598 struct ice_hw_func_caps func_caps; /* function capabilities */
3599 struct ice_switch_info *switch_info; /* switch filter lists */
3600
3601 /* Control Queue info */
3602 struct ice_ctl_q_info adminq;
3603 struct ice_ctl_q_info mailboxq;
3604 uint8_t api_branch; /* API branch version */
3605 uint8_t api_maj_ver; /* API major version */
3606 uint8_t api_min_ver; /* API minor version */
3607 uint8_t api_patch; /* API patch version */
3608 uint8_t fw_branch; /* firmware branch version */
3609 uint8_t fw_maj_ver; /* firmware major version */
3610 uint8_t fw_min_ver; /* firmware minor version */
3611 uint8_t fw_patch; /* firmware patch version */
3612 uint32_t fw_build; /* firmware build number */
3613 struct ice_fwlog_cfg fwlog_cfg;
3614 bool fwlog_support_ena; /* does hardware support FW logging? */
3615
3616 /* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
3617 * register. Used for determining the ITR/INTRL granularity during
3618 * initialization.
3619 */
3620 #define ICE_MAX_AGG_BW_200G 0x0
3621 #define ICE_MAX_AGG_BW_100G 0X1
3622 #define ICE_MAX_AGG_BW_50G 0x2
3623 #define ICE_MAX_AGG_BW_25G 0x3
3624 /* ITR granularity for different speeds */
3625 #define ICE_ITR_GRAN_ABOVE_25 2
3626 #define ICE_ITR_GRAN_MAX_25 4
3627 /* ITR granularity in 1 us */
3628 uint8_t itr_gran;
3629 /* INTRL granularity for different speeds */
3630 #define ICE_INTRL_GRAN_ABOVE_25 4
3631 #define ICE_INTRL_GRAN_MAX_25 8
3632 /* INTRL granularity in 1 us */
3633 uint8_t intrl_gran;
3634
3635 /* true if VSIs can share unicast MAC addr */
3636 uint8_t umac_shared;
3637 #if 0
3638
3639 #define ICE_PHY_PER_NAC_E822 1
3640 #define ICE_MAX_QUAD 2
3641 #define ICE_QUADS_PER_PHY_E822 2
3642 #define ICE_PORTS_PER_PHY_E822 8
3643 #define ICE_PORTS_PER_QUAD 4
3644 #define ICE_PORTS_PER_PHY_E810 4
3645 #define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
3646 #endif
3647 /* Active package version (currently active) */
3648 struct ice_pkg_ver active_pkg_ver;
3649 uint32_t pkg_seg_id;
3650 uint32_t pkg_sign_type;
3651 uint32_t active_track_id;
3652 uint8_t pkg_has_signing_seg:1;
3653 uint8_t active_pkg_name[ICE_PKG_NAME_SIZE];
3654 uint8_t active_pkg_in_nvm;
3655
3656 /* Driver's package ver - (from the Ice Metadata section) */
3657 struct ice_pkg_ver pkg_ver;
3658 uint8_t pkg_name[ICE_PKG_NAME_SIZE];
3659 #if 0
3660 /* Driver's Ice segment format version and id (from the Ice seg) */
3661 struct ice_pkg_ver ice_seg_fmt_ver;
3662 uint8_t ice_seg_id[ICE_SEG_ID_SIZE];
3663
3664 /* Pointer to the ice segment */
3665 struct ice_seg *seg;
3666
3667 /* Pointer to allocated copy of pkg memory */
3668 uint8_t *pkg_copy;
3669 u32 pkg_size;
3670
3671 /* tunneling info */
3672 struct ice_lock tnl_lock;
3673 struct ice_tunnel_table tnl;
3674 #endif
3675 /* HW block tables */
3676 struct ice_blk_info blk[ICE_BLK_COUNT];
3677 #if 0
3678 struct ice_lock fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
3679 #endif
3680 struct ice_flow_prof_head fl_profs[ICE_BLK_COUNT];
3681 #if 0
3682 struct ice_lock rss_locks; /* protect RSS configuration */
3683 #endif
3684 struct ice_rss_cfg_head rss_list_head;
3685 #if 0
3686 uint16_t vsi_owning_pf_lut; /* SW IDX of VSI that acquired PF RSS LUT */
3687 struct ice_mbx_snapshot mbx_snapshot;
3688 uint8_t dvm_ena;
3689
3690 bool subscribable_recipes_supported;
3691 #endif
3692 };
3693
3694 /**
3695 * @enum ice_state
3696 * @brief Driver state flags
3697 *
3698 * Used to indicate the status of various driver events. Intended to be
3699 * modified only using atomic operations, so that we can use it even in places
3700 * which aren't locked.
3701 */
3702 enum ice_state {
3703 ICE_STATE_CONTROLQ_EVENT_PENDING,
3704 ICE_STATE_VFLR_PENDING,
3705 ICE_STATE_MDD_PENDING,
3706 ICE_STATE_RESET_OICR_RECV,
3707 ICE_STATE_RESET_PFR_REQ,
3708 ICE_STATE_PREPARED_FOR_RESET,
3709 ICE_STATE_SUBIF_NEEDS_REINIT,
3710 ICE_STATE_RESET_FAILED,
3711 ICE_STATE_DRIVER_INITIALIZED,
3712 ICE_STATE_NO_MEDIA,
3713 ICE_STATE_RECOVERY_MODE,
3714 ICE_STATE_ROLLBACK_MODE,
3715 ICE_STATE_LINK_STATUS_REPORTED,
3716 ICE_STATE_ATTACHING,
3717 ICE_STATE_DETACHING,
3718 ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
3719 ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER,
3720 ICE_STATE_MULTIPLE_TCS,
3721 ICE_STATE_DO_FW_DEBUG_DUMP,
3722 ICE_STATE_LINK_ACTIVE_ON_DOWN,
3723 ICE_STATE_FIRST_INIT_LINK,
3724 ICE_STATE_DO_CREATE_MIRR_INTFC,
3725 ICE_STATE_DO_DESTROY_MIRR_INTFC,
3726 /* This entry must be last */
3727 ICE_STATE_LAST,
3728 };
3729
3730 /**
3731 * ice_set_state - Set the specified state
3732 * @s: the state bitmap
3733 * @bit: the state to set
3734 *
3735 * Atomically update the state bitmap with the specified bit set.
3736 */
3737 static inline void
ice_set_state(volatile uint32_t * s,enum ice_state bit)3738 ice_set_state(volatile uint32_t *s, enum ice_state bit)
3739 {
3740 atomic_setbits_int(s, (1UL << bit));
3741 }
3742
3743 /**
3744 * ice_clear_state - Clear the specified state
3745 * @s: the state bitmap
3746 * @bit: the state to clear
3747 *
3748 * Atomically update the state bitmap with the specified bit cleared.
3749 */
3750 static inline void
ice_clear_state(volatile uint32_t * s,enum ice_state bit)3751 ice_clear_state(volatile uint32_t *s, enum ice_state bit)
3752 {
3753 atomic_clearbits_int(s, (1UL << bit));
3754 }
3755
3756 /**
3757 * ice_testandset_state - Test and set the specified state
3758 * @s: the state bitmap
3759 * @bit: the bit to test
3760 *
3761 * Atomically update the state bitmap, setting the specified bit. Returns the
3762 * previous value of the bit.
3763 */
3764 static inline uint32_t
ice_testandset_state(volatile uint32_t * s,enum ice_state bit)3765 ice_testandset_state(volatile uint32_t *s, enum ice_state bit)
3766 {
3767 uint32_t expected = *s;
3768 uint32_t previous;
3769
3770 previous = atomic_cas_uint(s, expected, expected | (1UL << bit));
3771 return (previous & (1UL << bit)) ? 1 : 0;
3772 }
3773
3774 /**
3775 * ice_testandclear_state - Test and clear the specified state
3776 * @s: the state bitmap
3777 * @bit: the bit to test
3778 *
3779 * Atomically update the state bitmap, clearing the specified bit. Returns the
3780 * previous value of the bit.
3781 */
3782 static inline uint32_t
ice_testandclear_state(volatile uint32_t * s,enum ice_state bit)3783 ice_testandclear_state(volatile uint32_t *s, enum ice_state bit)
3784 {
3785 uint32_t expected = *s;
3786 uint32_t previous;
3787
3788 previous = atomic_cas_uint(s, expected, expected & ~(1UL << bit));
3789 return (previous & (1UL << bit)) ? 1 : 0;
3790 }
3791
3792 /**
3793 * ice_test_state - Test the specified state
3794 * @s: the state bitmap
3795 * @bit: the bit to test
3796 *
3797 * Return true if the state is set, false otherwise. Use this only if the flow
3798 * does not need to update the state. If you must update the state as well,
3799 * prefer ice_testandset_state or ice_testandclear_state.
3800 */
3801 static inline uint32_t
ice_test_state(volatile uint32_t * s,enum ice_state bit)3802 ice_test_state(volatile uint32_t *s, enum ice_state bit)
3803 {
3804 return (*s & (1UL << bit)) ? 1 : 0;
3805 }
3806
ice_round_to_num(uint32_t N,uint32_t R)3807 static inline uint32_t ice_round_to_num(uint32_t N, uint32_t R)
3808 {
3809 return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) :
3810 ((((N) + (R) - 1) / (R)) * (R)));
3811 }
3812
3813 /* based on parity() in sys/net/toepliz.c */
3814 static inline uint16_t
ice_popcount16(uint16_t n16)3815 ice_popcount16(uint16_t n16)
3816 {
3817 n16 = ((n16 & 0xaaaa) >> 1) + (n16 & 0x5555);
3818 n16 = ((n16 & 0xcccc) >> 2) + (n16 & 0x3333);
3819 n16 = ((n16 & 0xf0f0) >> 4) + (n16 & 0x0f0f);
3820 n16 = ((n16 & 0xff00) >> 8) + (n16 & 0x00ff);
3821
3822 return (n16);
3823 }
3824
3825 /* based on parity() in sys/net/toepliz.c */
3826 static inline uint32_t
ice_popcount32(uint32_t n32)3827 ice_popcount32(uint32_t n32)
3828 {
3829 n32 = ((n32 & 0xaaaaaaaa) >> 1) + (n32 & 0x55555555);
3830 n32 = ((n32 & 0xcccccccc) >> 2) + (n32 & 0x33333333);
3831 n32 = ((n32 & 0xf0f0f0f0) >> 4) + (n32 & 0x0f0f0f0f);
3832 n32 = ((n32 & 0xff00ff00) >> 8) + (n32 & 0x00ff00ff);
3833 n32 = ((n32 & 0xffff0000) >> 16) + (n32 & 0x0000ffff);
3834
3835 return (n32);
3836 }
3837
3838 #define ice_ilog2(x) ((sizeof(x) <= 4) ? (fls(x) - 1) : (flsl(x) - 1))
3839
3840 /*
3841 * ice_bit_* functions derived from FreeBSD sys/bitstring.h
3842 */
3843
3844 typedef uint32_t ice_bitstr_t;
3845
3846 #define ICE_BITSTR_MASK (~0UL)
3847 #define ICE_BITSTR_BITS (sizeof(ice_bitstr_t) * 8)
3848
3849 /* round up x to the next multiple of y if y is a power of two */
3850 #define ice_bit_roundup(x, y) \
3851 (((size_t)(x) + (y) - 1) & ~((size_t)(y) - 1))
3852
3853 /* Number of bytes allocated for a bit string of nbits bits */
3854 #define ice_bitstr_size(nbits) (ice_bit_roundup((nbits), ICE_BITSTR_BITS) / 8)
3855
3856 static inline ice_bitstr_t *
ice_bit_alloc(size_t nbits)3857 ice_bit_alloc(size_t nbits)
3858 {
3859 return malloc(ice_bitstr_size(nbits), M_DEVBUF, M_NOWAIT | M_ZERO);
3860 }
3861
3862 /* Allocate a bit string on the stack */
3863 #define ice_bit_decl(name, nbits) \
3864 ((name)[bitstr_size(nbits) / sizeof(ice_bitstr_t)])
3865
3866 /* ice_bitstr_t in bit string containing the bit. */
3867 static inline size_t
ice_bit_idx(size_t bit)3868 ice_bit_idx(size_t bit)
3869 {
3870 return (bit / ICE_BITSTR_BITS);
3871 }
3872
3873 /* bit number within ice_bitstr_t at ice_bit_idx(_bit). */
3874 static inline size_t
ice_bit_offset(size_t bit)3875 ice_bit_offset(size_t bit)
3876 {
3877 return (bit % ICE_BITSTR_BITS);
3878 }
3879
3880 /* Mask for the bit within its long. */
3881 static inline ice_bitstr_t
ice_bit_mask(size_t bit)3882 ice_bit_mask(size_t bit)
3883 {
3884 return (1UL << ice_bit_offset(bit));
3885 }
3886
3887 static inline ice_bitstr_t
ice_bit_make_mask(size_t start,size_t stop)3888 ice_bit_make_mask(size_t start, size_t stop)
3889 {
3890 return ((ICE_BITSTR_MASK << ice_bit_offset(start)) &
3891 (ICE_BITSTR_MASK >> (ICE_BITSTR_BITS - ice_bit_offset(stop) - 1)));
3892 }
3893
3894 /* Is bit N of bit string set? */
3895 static inline int
ice_bit_test(const ice_bitstr_t * bitstr,size_t bit)3896 ice_bit_test(const ice_bitstr_t *bitstr, size_t bit)
3897 {
3898 return ((bitstr[ice_bit_idx(bit)] & ice_bit_mask(bit)) != 0);
3899 }
3900
3901 /* Set bit N of bit string. */
3902 static inline void
ice_bit_set(ice_bitstr_t * bitstr,size_t bit)3903 ice_bit_set(ice_bitstr_t *bitstr, size_t bit)
3904 {
3905 bitstr[ice_bit_idx(bit)] |= ice_bit_mask(bit);
3906 }
3907
3908 /* clear bit N of bit string name */
3909 static inline void
ice_bit_clear(ice_bitstr_t * bitstr,size_t bit)3910 ice_bit_clear(ice_bitstr_t *bitstr, size_t bit)
3911 {
3912 bitstr[ice_bit_idx(bit)] &= ~ice_bit_mask(bit);
3913 }
3914
3915 /* Count the number of bits set in a bitstr of size nbits at or after start */
3916 static inline ssize_t
ice_bit_count(ice_bitstr_t * bitstr,size_t start,size_t nbits)3917 ice_bit_count(ice_bitstr_t *bitstr, size_t start, size_t nbits)
3918 {
3919 ice_bitstr_t *curbitstr, mask;
3920 size_t curbitstr_len;
3921 ssize_t value = 0;
3922
3923 if (start >= nbits)
3924 return (0);
3925
3926 curbitstr = bitstr + ice_bit_idx(start);
3927 nbits -= ICE_BITSTR_BITS * ice_bit_idx(start);
3928 start -= ICE_BITSTR_BITS * ice_bit_idx(start);
3929
3930 if (start > 0) {
3931 curbitstr_len = (int)ICE_BITSTR_BITS < nbits ?
3932 (int)ICE_BITSTR_BITS : nbits;
3933 mask = ice_bit_make_mask(start,
3934 ice_bit_offset(curbitstr_len - 1));
3935 value += ice_popcount32(*curbitstr & mask);
3936 curbitstr++;
3937 if (nbits < ICE_BITSTR_BITS)
3938 return (value);
3939 nbits -= ICE_BITSTR_BITS;
3940 }
3941 while (nbits >= (int)ICE_BITSTR_BITS) {
3942 value += ice_popcount32(*curbitstr);
3943 curbitstr++;
3944 nbits -= ICE_BITSTR_BITS;
3945 }
3946 if (nbits > 0) {
3947 mask = ice_bit_make_mask(0, ice_bit_offset(nbits - 1));
3948 value += ice_popcount32(*curbitstr & mask);
3949 }
3950
3951 return (value);
3952 }
3953
3954 /* Find the first 'match'-bit in bit string at or after bit start. */
3955 static inline ssize_t
ice_bit_ff_at(ice_bitstr_t * bitstr,size_t start,size_t nbits,int match)3956 ice_bit_ff_at(ice_bitstr_t *bitstr, size_t start, size_t nbits, int match)
3957 {
3958 ice_bitstr_t *curbitstr;
3959 ice_bitstr_t *stopbitstr;
3960 ice_bitstr_t mask;
3961 ice_bitstr_t test;
3962 ssize_t value;
3963
3964 if (start >= nbits || nbits <= 0)
3965 return (-1);
3966
3967 curbitstr = bitstr + ice_bit_idx(start);
3968 stopbitstr = bitstr + ice_bit_idx(nbits - 1);
3969 mask = match ? 0 : ICE_BITSTR_MASK;
3970
3971 test = mask ^ *curbitstr;
3972 if (ice_bit_offset(start) != 0)
3973 test &= ice_bit_make_mask(start, ICE_BITSTR_BITS - 1);
3974 while (test == 0 && curbitstr < stopbitstr)
3975 test = mask ^ *(++curbitstr);
3976
3977 value = ((curbitstr - bitstr) * ICE_BITSTR_BITS) + ffs(test) - 1;
3978 if (test == 0 ||
3979 (ice_bit_offset(nbits) != 0 && (size_t)value >= nbits))
3980 value = -1;
3981 return (value);
3982 }
3983
3984 /* Find contiguous sequence of at least size 'match'-bits at or after start */
3985 static inline ssize_t
ice_bit_ff_area_at(ice_bitstr_t * bitstr,size_t start,size_t nbits,size_t size,int match)3986 ice_bit_ff_area_at(ice_bitstr_t *bitstr, size_t start, size_t nbits,
3987 size_t size, int match)
3988 {
3989 ice_bitstr_t *curbitstr, mask, test;
3990 size_t last, shft, maxshft;
3991 ssize_t value;
3992
3993 if (start + size > nbits || nbits <= 0)
3994 return (-1);
3995
3996 mask = match ? ICE_BITSTR_MASK : 0;
3997 maxshft = ice_bit_idx(size - 1) == 0 ? size : (int)ICE_BITSTR_BITS;
3998 value = start;
3999 curbitstr = bitstr + ice_bit_idx(start);
4000 test = ~(ICE_BITSTR_MASK << ice_bit_offset(start));
4001 for (last = size - 1, test |= mask ^ *curbitstr;
4002 !(ice_bit_idx(last) == 0 &&
4003 (test & ice_bit_make_mask(0, last)) == 0);
4004 last -= ICE_BITSTR_BITS, test = mask ^ *++curbitstr) {
4005 if (test == 0)
4006 continue;
4007 /* Shrink-left every 0-area in _test by maxshft-1 bits. */
4008 for (shft = maxshft; shft > 1 && (test & (test + 1)) != 0;
4009 shft = (shft + 1) / 2)
4010 test |= test >> shft / 2;
4011 /* Find the start of the first 0-area in 'test'. */
4012 last = ffs(~(test >> 1));
4013 value = (curbitstr - bitstr) * ICE_BITSTR_BITS + last;
4014 /* If there's insufficient space left, give up. */
4015 if (value + size > nbits) {
4016 value = -1;
4017 break;
4018 }
4019 last += size - 1;
4020 /* If a solution is contained in 'test', success! */
4021 if (ice_bit_idx(last) == 0)
4022 break;
4023 /* A solution here needs bits from the next word. */
4024 }
4025
4026 return (value);
4027 }
4028
4029 /* Find contiguous sequence of at least size set bits in bit string */
4030 #define ice_bit_ffs_area(_bitstr, _nbits, _size, _resultp) \
4031 *(_resultp) = ice_bit_ff_area_at((_bitstr), 0, (_nbits), (_size), 1)
4032
4033 /* Find contiguous sequence of at least size cleared bits in bit string */
4034 #define ice_bit_ffc_area(_bitstr, _nbits, _size, _resultp) \
4035 *(_resultp) = ice_bit_ff_area_at((_bitstr), 0, (_nbits), (_size), 0)
4036
4037
4038 /**
4039 * @file ice_resmgr.h
4040 * @brief Resource manager interface
4041 *
4042 * Defines an interface for managing PF hardware queues and interrupts for assigning them to
4043 * hardware VSIs and VFs.
4044 *
4045 * For queue management:
4046 * The total number of available Tx and Rx queues is not equal, so it is
4047 * expected that each PF will allocate two ice_resmgr structures, one for Tx
4048 * and one for Rx. These should be allocated in attach() prior to initializing
4049 * VSIs, and destroyed in detach().
4050 *
4051 * For interrupt management:
4052 * The PF allocates an ice_resmgr structure that does not allow scattered
4053 * allocations since interrupt allocations must be contiguous.
4054 */
4055
4056 /*
4057 * For managing VSI queue allocations
4058 */
4059 /* Hardware only supports a limited number of resources in scattered mode */
4060 #define ICE_MAX_SCATTERED_QUEUES 16
4061 /* Use highest value to indicate invalid resource mapping */
4062 #define ICE_INVALID_RES_IDX 0xFFFF
4063
4064 /**
4065 * @struct ice_resmgr
4066 * @brief Resource manager
4067 *
4068 * Represent resource allocations using a bitstring, where bit zero represents
4069 * the first resource. If a particular bit is set this indicates that the
4070 * resource has been allocated and is not free.
4071 */
4072 struct ice_resmgr {
4073 ice_bitstr_t *resources;
4074 uint16_t num_res;
4075 bool contig_only;
4076 };
4077
4078 /**
4079 * @enum ice_resmgr_alloc_type
4080 * @brief resource manager allocation types
4081 *
4082 * Enumeration of possible allocation types that can be used when
4083 * assigning resources. For now, SCATTERED is only used with
4084 * managing queue allocations.
4085 */
4086 enum ice_resmgr_alloc_type {
4087 ICE_RESMGR_ALLOC_INVALID = 0,
4088 ICE_RESMGR_ALLOC_CONTIGUOUS,
4089 ICE_RESMGR_ALLOC_SCATTERED
4090 };
4091
4092 /**
4093 * @struct ice_tc_info
4094 * @brief Traffic class information for a VSI
4095 *
4096 * Stores traffic class information used in configuring
4097 * a VSI.
4098 */
4099 struct ice_tc_info {
4100 uint16_t qoffset; /* Offset in VSI queue space */
4101 uint16_t qcount_tx; /* TX queues for this Traffic Class */
4102 uint16_t qcount_rx; /* RX queues */
4103 };
4104
4105 /* Statistics collected by each port, VSI, VEB, and S-channel */
4106 struct ice_eth_stats {
4107 uint64_t rx_bytes; /* gorc */
4108 uint64_t rx_unicast; /* uprc */
4109 uint64_t rx_multicast; /* mprc */
4110 uint64_t rx_broadcast; /* bprc */
4111 uint64_t rx_discards; /* rdpc */
4112 uint64_t rx_unknown_protocol; /* rupp */
4113 uint64_t tx_bytes; /* gotc */
4114 uint64_t tx_unicast; /* uptc */
4115 uint64_t tx_multicast; /* mptc */
4116 uint64_t tx_broadcast; /* bptc */
4117 uint64_t tx_discards; /* tdpc */
4118 uint64_t tx_errors; /* tepc */
4119 uint64_t rx_no_desc; /* repc */
4120 uint64_t rx_errors; /* repc */
4121 };
4122
4123 /**
4124 * @struct ice_vsi_hw_stats
4125 * @brief hardware statistics for a VSI
4126 *
4127 * Stores statistics that are generated by hardware for a VSI.
4128 */
4129 struct ice_vsi_hw_stats {
4130 struct ice_eth_stats prev;
4131 struct ice_eth_stats cur;
4132 bool offsets_loaded;
4133 };
4134
4135 /* Statistics collected by the MAC */
4136 struct ice_hw_port_stats {
4137 /* eth stats collected by the port */
4138 struct ice_eth_stats eth;
4139 /* additional port specific stats */
4140 uint64_t tx_dropped_link_down; /* tdold */
4141 uint64_t crc_errors; /* crcerrs */
4142 uint64_t illegal_bytes; /* illerrc */
4143 uint64_t error_bytes; /* errbc */
4144 uint64_t mac_local_faults; /* mlfc */
4145 uint64_t mac_remote_faults; /* mrfc */
4146 uint64_t rx_len_errors; /* rlec */
4147 uint64_t link_xon_rx; /* lxonrxc */
4148 uint64_t link_xoff_rx; /* lxoffrxc */
4149 uint64_t link_xon_tx; /* lxontxc */
4150 uint64_t link_xoff_tx; /* lxofftxc */
4151 uint64_t priority_xon_rx[8]; /* pxonrxc[8] */
4152 uint64_t priority_xoff_rx[8]; /* pxoffrxc[8] */
4153 uint64_t priority_xon_tx[8]; /* pxontxc[8] */
4154 uint64_t priority_xoff_tx[8]; /* pxofftxc[8] */
4155 uint64_t priority_xon_2_xoff[8];/* pxon2offc[8] */
4156 uint64_t rx_size_64; /* prc64 */
4157 uint64_t rx_size_127; /* prc127 */
4158 uint64_t rx_size_255; /* prc255 */
4159 uint64_t rx_size_511; /* prc511 */
4160 uint64_t rx_size_1023; /* prc1023 */
4161 uint64_t rx_size_1522; /* prc1522 */
4162 uint64_t rx_size_big; /* prc9522 */
4163 uint64_t rx_undersize; /* ruc */
4164 uint64_t rx_fragments; /* rfc */
4165 uint64_t rx_oversize; /* roc */
4166 uint64_t rx_jabber; /* rjc */
4167 uint64_t tx_size_64; /* ptc64 */
4168 uint64_t tx_size_127; /* ptc127 */
4169 uint64_t tx_size_255; /* ptc255 */
4170 uint64_t tx_size_511; /* ptc511 */
4171 uint64_t tx_size_1023; /* ptc1023 */
4172 uint64_t tx_size_1522; /* ptc1522 */
4173 uint64_t tx_size_big; /* ptc9522 */
4174 uint64_t mac_short_pkt_dropped; /* mspdc */
4175 /* EEE LPI */
4176 uint32_t tx_lpi_status;
4177 uint32_t rx_lpi_status;
4178 uint64_t tx_lpi_count; /* etlpic */
4179 uint64_t rx_lpi_count; /* erlpic */
4180 };
4181
4182 /**
4183 * @struct ice_pf_hw_stats
4184 * @brief hardware statistics for a PF
4185 *
4186 * Stores statistics that are generated by hardware for each PF.
4187 */
4188 struct ice_pf_hw_stats {
4189 struct ice_hw_port_stats prev;
4190 struct ice_hw_port_stats cur;
4191 bool offsets_loaded;
4192 };
4193
4194 /**
4195 * @struct ice_pf_sw_stats
4196 * @brief software statistics for a PF
4197 *
4198 * Contains software generated statistics relevant to a PF.
4199 */
4200 struct ice_pf_sw_stats {
4201 /* # of reset events handled, by type */
4202 uint32_t corer_count;
4203 uint32_t globr_count;
4204 uint32_t empr_count;
4205 uint32_t pfr_count;
4206
4207 /* # of detected MDD events for Tx and Rx */
4208 uint32_t tx_mdd_count;
4209 uint32_t rx_mdd_count;
4210 };
4211
4212 struct ice_tx_map {
4213 struct mbuf *txm_m;
4214 bus_dmamap_t txm_map;
4215 unsigned int txm_eop;
4216 };
4217
4218 /**
4219 * @struct ice_tx_queue
4220 * @brief Driver Tx queue structure
4221 *
4222 * @vsi: backpointer the VSI structure
4223 * @me: this queue's index into the queue array
4224 * @irqv: always NULL for iflib
4225 * @desc_count: the number of descriptors
4226 * @tx_paddr: the physical address for this queue
4227 * @q_teid: the Tx queue TEID returned from firmware
4228 * @stats: queue statistics
4229 * @tc: traffic class queue belongs to
4230 * @q_handle: qidx in tc; used in TXQ enable functions
4231 */
4232 struct ice_tx_queue {
4233 struct ice_vsi *vsi;
4234 struct ice_tx_desc *tx_base;
4235 struct ice_dma_mem tx_desc_mem;
4236 bus_addr_t tx_paddr;
4237 struct ice_tx_map *tx_map;
4238 #if 0
4239 struct tx_stats stats;
4240 #endif
4241 uint64_t tso;
4242 uint16_t desc_count;
4243 uint32_t tail;
4244 struct ice_intr_vector *irqv;
4245 uint32_t q_teid;
4246 uint32_t me;
4247 uint16_t q_handle;
4248 uint8_t tc;
4249
4250 /* descriptor writeback status */
4251 uint16_t *tx_rsq;
4252 uint16_t tx_rs_cidx;
4253 uint16_t tx_rs_pidx;
4254 uint16_t tx_cidx_processed;
4255
4256 struct ifqueue *txq_ifq;
4257
4258 unsigned int txq_prod;
4259 unsigned int txq_cons;
4260 };
4261
4262 struct ice_rx_map {
4263 struct mbuf *rxm_m;
4264 bus_dmamap_t rxm_map;
4265 };
4266
4267 /**
4268 * @struct ice_rx_queue
4269 * @brief Driver Rx queue structure
4270 *
4271 * @vsi: backpointer the VSI structure
4272 * @me: this queue's index into the queue array
4273 * @irqv: pointer to vector structure associated with this queue
4274 * @desc_count: the number of descriptors
4275 * @rx_paddr: the physical address for this queue
4276 * @tail: the tail register address for this queue
4277 * @stats: queue statistics
4278 * @tc: traffic class queue belongs to
4279 */
4280 struct ice_rx_queue {
4281 struct ice_vsi *vsi;
4282 union ice_32b_rx_flex_desc *rx_base;
4283 struct ice_dma_mem rx_desc_mem;
4284 bus_addr_t rx_paddr;
4285 struct ice_rx_map *rx_map;
4286 #if 0
4287 struct rx_stats stats;
4288 #endif
4289 uint16_t desc_count;
4290 uint32_t tail;
4291 struct ice_intr_vector *irqv;
4292 uint32_t me;
4293 uint8_t tc;
4294
4295 struct if_rxring rxq_acct;
4296 struct timeout rxq_refill;
4297 unsigned int rxq_prod;
4298 unsigned int rxq_cons;
4299 struct ifiqueue *rxq_ifiq;
4300 struct mbuf *rxq_m_head;
4301 struct mbuf **rxq_m_tail;
4302 };
4303
4304 /**
4305 * @struct ice_vsi
4306 * @brief VSI structure
4307 *
4308 * Contains data relevant to a single VSI
4309 */
4310 struct ice_vsi {
4311 /* back pointer to the softc */
4312 struct ice_softc *sc;
4313
4314 bool dynamic; /* if true, dynamically allocated */
4315
4316 enum ice_vsi_type type; /* type of this VSI */
4317 uint16_t idx; /* software index to sc->all_vsi[] */
4318
4319 uint16_t *tx_qmap; /* Tx VSI to PF queue mapping */
4320 uint16_t *rx_qmap; /* Rx VSI to PF queue mapping */
4321
4322 enum ice_resmgr_alloc_type qmap_type;
4323
4324 struct ice_tx_queue *tx_queues; /* Tx queue array */
4325 struct ice_rx_queue *rx_queues; /* Rx queue array */
4326
4327 int num_tx_queues;
4328 int num_rx_queues;
4329 int num_vectors;
4330
4331 int16_t rx_itr;
4332 int16_t tx_itr;
4333
4334 /* RSS configuration */
4335 uint16_t rss_table_size; /* HW RSS table size */
4336 uint8_t rss_lut_type; /* Used to configure Get/Set RSS LUT AQ call */
4337
4338 int max_frame_size;
4339 uint16_t mbuf_sz;
4340
4341 struct ice_aqc_vsi_props info;
4342
4343 /* DCB configuration */
4344 uint8_t num_tcs; /* Total number of enabled TCs */
4345 uint16_t tc_map; /* bitmap of enabled Traffic Classes */
4346 /* Information for each traffic class */
4347 struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
4348 #if 0
4349 /* context for per-VSI sysctls */
4350 struct sysctl_ctx_list ctx;
4351 struct sysctl_oid *vsi_node;
4352
4353 /* context for per-txq sysctls */
4354 struct sysctl_ctx_list txqs_ctx;
4355 struct sysctl_oid *txqs_node;
4356
4357 /* context for per-rxq sysctls */
4358 struct sysctl_ctx_list rxqs_ctx;
4359 struct sysctl_oid *rxqs_node;
4360 #endif
4361 /* VSI-level stats */
4362 struct ice_vsi_hw_stats hw_stats;
4363
4364 /* VSI mirroring details */
4365 uint16_t mirror_src_vsi;
4366 uint16_t rule_mir_ingress;
4367 uint16_t rule_mir_egress;
4368 };
4369
4370 /* Driver always calls main vsi_handle first */
4371 #define ICE_MAIN_VSI_HANDLE 0
4372