xref: /freebsd/sys/dev/ice/ice_ddp_common.c (revision 315ee00f)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2023, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "ice_ddp_common.h"
33 #include "ice_type.h"
34 #include "ice_common.h"
35 #include "ice_sched.h"
36 
37 /**
38  * ice_aq_download_pkg
39  * @hw: pointer to the hardware structure
40  * @pkg_buf: the package buffer to transfer
41  * @buf_size: the size of the package buffer
42  * @last_buf: last buffer indicator
43  * @error_offset: returns error offset
44  * @error_info: returns error information
45  * @cd: pointer to command details structure or NULL
46  *
47  * Download Package (0x0C40)
48  */
49 static enum ice_status
50 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
51 		    u16 buf_size, bool last_buf, u32 *error_offset,
52 		    u32 *error_info, struct ice_sq_cd *cd)
53 {
54 	struct ice_aqc_download_pkg *cmd;
55 	struct ice_aq_desc desc;
56 	enum ice_status status;
57 
58 	if (error_offset)
59 		*error_offset = 0;
60 	if (error_info)
61 		*error_info = 0;
62 
63 	cmd = &desc.params.download_pkg;
64 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
65 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
66 
67 	if (last_buf)
68 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
69 
70 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
71 	if (status == ICE_ERR_AQ_ERROR) {
72 		/* Read error from buffer only when the FW returned an error */
73 		struct ice_aqc_download_pkg_resp *resp;
74 
75 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
76 		if (error_offset)
77 			*error_offset = LE32_TO_CPU(resp->error_offset);
78 		if (error_info)
79 			*error_info = LE32_TO_CPU(resp->error_info);
80 	}
81 
82 	return status;
83 }
84 
85 /**
86  * ice_aq_upload_section
87  * @hw: pointer to the hardware structure
88  * @pkg_buf: the package buffer which will receive the section
89  * @buf_size: the size of the package buffer
90  * @cd: pointer to command details structure or NULL
91  *
92  * Upload Section (0x0C41)
93  */
94 enum ice_status
95 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
96 		      u16 buf_size, struct ice_sq_cd *cd)
97 {
98 	struct ice_aq_desc desc;
99 
100 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
101 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
102 
103 	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
104 }
105 
106 /**
107  * ice_aq_update_pkg
108  * @hw: pointer to the hardware structure
109  * @pkg_buf: the package cmd buffer
110  * @buf_size: the size of the package cmd buffer
111  * @last_buf: last buffer indicator
112  * @error_offset: returns error offset
113  * @error_info: returns error information
114  * @cd: pointer to command details structure or NULL
115  *
116  * Update Package (0x0C42)
117  */
118 static enum ice_status
119 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
120 		  bool last_buf, u32 *error_offset, u32 *error_info,
121 		  struct ice_sq_cd *cd)
122 {
123 	struct ice_aqc_download_pkg *cmd;
124 	struct ice_aq_desc desc;
125 	enum ice_status status;
126 
127 	if (error_offset)
128 		*error_offset = 0;
129 	if (error_info)
130 		*error_info = 0;
131 
132 	cmd = &desc.params.download_pkg;
133 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
134 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
135 
136 	if (last_buf)
137 		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
138 
139 	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
140 	if (status == ICE_ERR_AQ_ERROR) {
141 		/* Read error from buffer only when the FW returned an error */
142 		struct ice_aqc_download_pkg_resp *resp;
143 
144 		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
145 		if (error_offset)
146 			*error_offset = LE32_TO_CPU(resp->error_offset);
147 		if (error_info)
148 			*error_info = LE32_TO_CPU(resp->error_info);
149 	}
150 
151 	return status;
152 }
153 
154 /**
155  * ice_find_seg_in_pkg
156  * @hw: pointer to the hardware structure
157  * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
158  * @pkg_hdr: pointer to the package header to be searched
159  *
160  * This function searches a package file for a particular segment type. On
161  * success it returns a pointer to the segment header, otherwise it will
162  * return NULL.
163  */
164 struct ice_generic_seg_hdr *
165 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
166 		    struct ice_pkg_hdr *pkg_hdr)
167 {
168 	u32 i;
169 
170 	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
171 		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
172 		  pkg_hdr->pkg_format_ver.update,
173 		  pkg_hdr->pkg_format_ver.draft);
174 
175 	/* Search all package segments for the requested segment type */
176 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
177 		struct ice_generic_seg_hdr *seg;
178 
179 		seg = (struct ice_generic_seg_hdr *)
180 			((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
181 
182 		if (LE32_TO_CPU(seg->seg_type) == seg_type)
183 			return seg;
184 	}
185 
186 	return NULL;
187 }
188 
189 /**
190  * ice_get_pkg_seg_by_idx
191  * @pkg_hdr: pointer to the package header to be searched
192  * @idx: index of segment
193  */
194 static struct ice_generic_seg_hdr *
195 ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
196 {
197 	struct ice_generic_seg_hdr *seg = NULL;
198 
199 	if (idx < LE32_TO_CPU(pkg_hdr->seg_count))
200 		seg = (struct ice_generic_seg_hdr *)
201 			((u8 *)pkg_hdr +
202 			 LE32_TO_CPU(pkg_hdr->seg_offset[idx]));
203 
204 	return seg;
205 }
206 
207 /**
208  * ice_is_signing_seg_at_idx - determine if segment is a signing segment
209  * @pkg_hdr: pointer to package header
210  * @idx: segment index
211  */
212 static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
213 {
214 	struct ice_generic_seg_hdr *seg;
215 	bool retval = false;
216 
217 	seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx);
218 	if (seg)
219 		retval = LE32_TO_CPU(seg->seg_type) == SEGMENT_TYPE_SIGNING;
220 
221 	return retval;
222 }
223 
224 /**
225  * ice_is_signing_seg_type_at_idx
226  * @pkg_hdr: pointer to package header
227  * @idx: segment index
228  * @seg_id: segment id that is expected
229  * @sign_type: signing type
230  *
231  * Determine if a segment is a signing segment of the correct type
232  */
233 static bool
234 ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
235 			       u32 seg_id, u32 sign_type)
236 {
237 	bool result = false;
238 
239 	if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) {
240 		struct ice_sign_seg *seg;
241 
242 		seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr,
243 								    idx);
244 		if (seg && LE32_TO_CPU(seg->seg_id) == seg_id &&
245 		    LE32_TO_CPU(seg->sign_type) == sign_type)
246 			result = true;
247 	}
248 
249 	return result;
250 }
251 
252 /**
253  * ice_update_pkg_no_lock
254  * @hw: pointer to the hardware structure
255  * @bufs: pointer to an array of buffers
256  * @count: the number of buffers in the array
257  */
258 enum ice_status
259 ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
260 {
261 	enum ice_status status = ICE_SUCCESS;
262 	u32 i;
263 
264 	for (i = 0; i < count; i++) {
265 		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
266 		bool last = ((i + 1) == count);
267 		u32 offset, info;
268 
269 		status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
270 					   last, &offset, &info, NULL);
271 
272 		if (status) {
273 			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
274 				  status, offset, info);
275 			break;
276 		}
277 	}
278 
279 	return status;
280 }
281 
282 /**
283  * ice_update_pkg
284  * @hw: pointer to the hardware structure
285  * @bufs: pointer to an array of buffers
286  * @count: the number of buffers in the array
287  *
288  * Obtains change lock and updates package.
289  */
290 enum ice_status
291 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
292 {
293 	enum ice_status status;
294 
295 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
296 	if (status)
297 		return status;
298 
299 	status = ice_update_pkg_no_lock(hw, bufs, count);
300 
301 	ice_release_change_lock(hw);
302 
303 	return status;
304 }
305 
306 static enum ice_ddp_state
307 ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
308 {
309 	switch (aq_err) {
310 	case ICE_AQ_RC_ENOSEC:
311 		return ICE_DDP_PKG_NO_SEC_MANIFEST;
312 	case ICE_AQ_RC_EBADSIG:
313 		return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
314 	case ICE_AQ_RC_ESVN:
315 		return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW;
316 	case ICE_AQ_RC_EBADMAN:
317 		return ICE_DDP_PKG_MANIFEST_INVALID;
318 	case ICE_AQ_RC_EBADBUF:
319 		return ICE_DDP_PKG_BUFFER_INVALID;
320 	default:
321 		return ICE_DDP_PKG_ERR;
322 	}
323 }
324 
325 /**
326  * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
327  * @buf: pointer to buffer header
328  */
329 static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
330 {
331 	bool metadata = false;
332 
333 	if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF)
334 		metadata = true;
335 
336 	return metadata;
337 }
338 
339 /**
340  * ice_is_last_download_buffer
341  * @buf: pointer to current buffer header
342  * @idx: index of the buffer in the current sequence
343  * @count: the buffer count in the current sequence
344  *
345  * Note: this routine should only be called if the buffer is not the last buffer
346  */
347 static bool
348 ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
349 {
350 	bool last = ((idx + 1) == count);
351 
352 	/* A set metadata flag in the next buffer will signal that the current
353 	 * buffer will be the last buffer downloaded
354 	 */
355 	if (!last) {
356 		struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1;
357 
358 		last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
359 	}
360 
361 	return last;
362 }
363 
364 /**
365  * ice_dwnld_cfg_bufs_no_lock
366  * @hw: pointer to the hardware structure
367  * @bufs: pointer to an array of buffers
368  * @start: buffer index of first buffer to download
369  * @count: the number of buffers to download
370  * @indicate_last: if true, then set last buffer flag on last buffer download
371  *
372  * Downloads package configuration buffers to the firmware. Metadata buffers
373  * are skipped, and the first metadata buffer found indicates that the rest
374  * of the buffers are all metadata buffers.
375  */
376 static enum ice_ddp_state
377 ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
378 			   u32 count, bool indicate_last)
379 {
380 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
381 	struct ice_buf_hdr *bh;
382 	enum ice_aq_err err;
383 	u32 offset, info, i;
384 
385 	if (!bufs || !count)
386 		return ICE_DDP_PKG_ERR;
387 
388 	/* If the first buffer's first section has its metadata bit set
389 	 * then there are no buffers to be downloaded, and the operation is
390 	 * considered a success.
391 	 */
392 	bh = (struct ice_buf_hdr *)(bufs + start);
393 	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
394 		return ICE_DDP_PKG_SUCCESS;
395 
396 	for (i = 0; i < count; i++) {
397 		enum ice_status status;
398 		bool last = false;
399 
400 		bh = (struct ice_buf_hdr *)(bufs + start + i);
401 
402 		if (indicate_last)
403 			last = ice_is_last_download_buffer(bh, i, count);
404 
405 		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
406 					     &offset, &info, NULL);
407 
408 		/* Save AQ status from download package */
409 		if (status) {
410 			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
411 				  status, offset, info);
412 			err = hw->adminq.sq_last_status;
413 			state = ice_map_aq_err_to_ddp_state(err);
414 			break;
415 		}
416 
417 		if (last)
418 			break;
419 	}
420 
421 	return state;
422 }
423 
424 /**
425  * ice_aq_get_pkg_info_list
426  * @hw: pointer to the hardware structure
427  * @pkg_info: the buffer which will receive the information list
428  * @buf_size: the size of the pkg_info information buffer
429  * @cd: pointer to command details structure or NULL
430  *
431  * Get Package Info List (0x0C43)
432  */
433 static enum ice_status
434 ice_aq_get_pkg_info_list(struct ice_hw *hw,
435 			 struct ice_aqc_get_pkg_info_resp *pkg_info,
436 			 u16 buf_size, struct ice_sq_cd *cd)
437 {
438 	struct ice_aq_desc desc;
439 
440 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
441 
442 	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
443 }
444 
445 /**
446  * ice_has_signing_seg - determine if package has a signing segment
447  * @hw: pointer to the hardware structure
448  * @pkg_hdr: pointer to the driver's package hdr
449  */
450 static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
451 {
452 	struct ice_generic_seg_hdr *seg_hdr;
453 
454 	seg_hdr = (struct ice_generic_seg_hdr *)
455 		ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
456 
457 	return seg_hdr ? true : false;
458 }
459 
460 /**
461  * ice_get_pkg_segment_id - get correct package segment id, based on device
462  * @mac_type: MAC type of the device
463  */
464 static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
465 {
466 	u32 seg_id;
467 
468 	switch (mac_type) {
469 	case ICE_MAC_GENERIC:
470 	case ICE_MAC_GENERIC_3K:
471 	default:
472 		seg_id = SEGMENT_TYPE_ICE_E810;
473 		break;
474 	}
475 
476 	return seg_id;
477 }
478 
479 /**
480  * ice_get_pkg_sign_type - get package segment sign type, based on device
481  * @mac_type: MAC type of the device
482  */
483 static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
484 {
485 	u32 sign_type;
486 
487 	switch (mac_type) {
488 	case ICE_MAC_GENERIC_3K:
489 		sign_type = SEGMENT_SIGN_TYPE_RSA3K;
490 		break;
491 	case ICE_MAC_GENERIC:
492 	default:
493 		sign_type = SEGMENT_SIGN_TYPE_RSA2K;
494 		break;
495 	}
496 
497 	return sign_type;
498 }
499 
500 /**
501  * ice_get_signing_req - get correct package requirements, based on device
502  * @hw: pointer to the hardware structure
503  */
504 static void ice_get_signing_req(struct ice_hw *hw)
505 {
506 	hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type);
507 	hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type);
508 }
509 
510 /**
511  * ice_download_pkg_sig_seg - download a signature segment
512  * @hw: pointer to the hardware structure
513  * @seg: pointer to signature segment
514  */
515 static enum ice_ddp_state
516 ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
517 {
518 	enum ice_ddp_state state;
519 
520 	state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
521 					   LE32_TO_CPU(seg->buf_tbl.buf_count),
522 					   false);
523 
524 	return state;
525 }
526 
527 /**
528  * ice_download_pkg_config_seg - download a config segment
529  * @hw: pointer to the hardware structure
530  * @pkg_hdr: pointer to package header
531  * @idx: segment index
532  * @start: starting buffer
533  * @count: buffer count
534  *
535  * Note: idx must reference a ICE segment
536  */
537 static enum ice_ddp_state
538 ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
539 			    u32 idx, u32 start, u32 count)
540 {
541 	struct ice_buf_table *bufs;
542 	enum ice_ddp_state state;
543 	struct ice_seg *seg;
544 	u32 buf_count;
545 
546 	seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
547 	if (!seg)
548 		return ICE_DDP_PKG_ERR;
549 
550 	bufs = ice_find_buf_table(seg);
551 	buf_count = LE32_TO_CPU(bufs->buf_count);
552 
553 	if (start >= buf_count || start + count > buf_count)
554 		return ICE_DDP_PKG_ERR;
555 
556 	state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
557 					   true);
558 
559 	return state;
560 }
561 
562 /**
563  * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
564  * @hw: pointer to the hardware structure
565  * @pkg_hdr: pointer to package header
566  * @idx: segment index (must be a signature segment)
567  *
568  * Note: idx must reference a signature segment
569  */
570 static enum ice_ddp_state
571 ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
572 			    u32 idx)
573 {
574 	enum ice_ddp_state state;
575 	struct ice_sign_seg *seg;
576 	u32 conf_idx;
577 	u32 start;
578 	u32 count;
579 
580 	seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
581 	if (!seg) {
582 		state = ICE_DDP_PKG_ERR;
583 		goto exit;
584 	}
585 
586 	conf_idx = LE32_TO_CPU(seg->signed_seg_idx);
587 	start = LE32_TO_CPU(seg->signed_buf_start);
588 	count = LE32_TO_CPU(seg->signed_buf_count);
589 
590 	state = ice_download_pkg_sig_seg(hw, seg);
591 	if (state)
592 		goto exit;
593 
594 	state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
595 					    count);
596 
597 exit:
598 	return state;
599 }
600 
601 /**
602  * ice_match_signing_seg - determine if a matching signing segment exists
603  * @pkg_hdr: pointer to package header
604  * @seg_id: segment id that is expected
605  * @sign_type: signing type
606  */
607 static bool
608 ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type)
609 {
610 	bool match = false;
611 	u32 i;
612 
613 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
614 		if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id,
615 						   sign_type)) {
616 			match = true;
617 			break;
618 		}
619 	}
620 
621 	return match;
622 }
623 
624 /**
625  * ice_post_dwnld_pkg_actions - perform post download package actions
626  * @hw: pointer to the hardware structure
627  */
628 static enum ice_ddp_state
629 ice_post_dwnld_pkg_actions(struct ice_hw *hw)
630 {
631 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
632 	enum ice_status status;
633 
634 	status = ice_set_vlan_mode(hw);
635 	if (status) {
636 		ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
637 			  status);
638 		state = ICE_DDP_PKG_ERR;
639 	}
640 
641 	return state;
642 }
643 
644 /**
645  * ice_download_pkg_with_sig_seg - download package using signature segments
646  * @hw: pointer to the hardware structure
647  * @pkg_hdr: pointer to package header
648  */
649 static enum ice_ddp_state
650 ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
651 {
652 	enum ice_aq_err aq_err = hw->adminq.sq_last_status;
653 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
654 	enum ice_status status;
655 	u32 i;
656 
657 	ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
658 	ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type);
659 
660 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
661 	if (status) {
662 		if (status == ICE_ERR_AQ_NO_WORK)
663 			state = ICE_DDP_PKG_ALREADY_LOADED;
664 		else
665 			state = ice_map_aq_err_to_ddp_state(aq_err);
666 		return state;
667 	}
668 
669 	for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
670 		if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id,
671 						    hw->pkg_sign_type))
672 			continue;
673 
674 		state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
675 		if (state)
676 			break;
677 	}
678 
679 	if (!state)
680 		state = ice_post_dwnld_pkg_actions(hw);
681 
682 	ice_release_global_cfg_lock(hw);
683 
684 	return state;
685 }
686 
687 /**
688  * ice_dwnld_cfg_bufs
689  * @hw: pointer to the hardware structure
690  * @bufs: pointer to an array of buffers
691  * @count: the number of buffers in the array
692  *
693  * Obtains global config lock and downloads the package configuration buffers
694  * to the firmware.
695  */
696 static enum ice_ddp_state
697 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
698 {
699 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
700 	enum ice_status status;
701 	struct ice_buf_hdr *bh;
702 
703 	if (!bufs || !count)
704 		return ICE_DDP_PKG_ERR;
705 
706 	/* If the first buffer's first section has its metadata bit set
707 	 * then there are no buffers to be downloaded, and the operation is
708 	 * considered a success.
709 	 */
710 	bh = (struct ice_buf_hdr *)bufs;
711 	if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
712 		return ICE_DDP_PKG_SUCCESS;
713 
714 	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
715 	if (status) {
716 		if (status == ICE_ERR_AQ_NO_WORK)
717 			return ICE_DDP_PKG_ALREADY_LOADED;
718 		return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
719 	}
720 
721 	state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
722 	if (!state)
723 		state = ice_post_dwnld_pkg_actions(hw);
724 
725 	ice_release_global_cfg_lock(hw);
726 
727 	return state;
728 }
729 
730 /**
731  * ice_download_pkg_without_sig_seg
732  * @hw: pointer to the hardware structure
733  * @ice_seg: pointer to the segment of the package to be downloaded
734  *
735  * Handles the download of a complete package without signature segment.
736  */
737 static enum ice_ddp_state
738 ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
739 {
740 	struct ice_buf_table *ice_buf_tbl;
741 	enum ice_ddp_state state;
742 
743 	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
744 		  ice_seg->hdr.seg_format_ver.major,
745 		  ice_seg->hdr.seg_format_ver.minor,
746 		  ice_seg->hdr.seg_format_ver.update,
747 		  ice_seg->hdr.seg_format_ver.draft);
748 
749 	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
750 		  LE32_TO_CPU(ice_seg->hdr.seg_type),
751 		  LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
752 
753 	ice_buf_tbl = ice_find_buf_table(ice_seg);
754 
755 	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
756 		  LE32_TO_CPU(ice_buf_tbl->buf_count));
757 
758 	state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
759 				   LE32_TO_CPU(ice_buf_tbl->buf_count));
760 
761 	return state;
762 }
763 
764 /**
765  * ice_download_pkg
766  * @hw: pointer to the hardware structure
767  * @pkg_hdr: pointer to package header
768  * @ice_seg: pointer to the segment of the package to be downloaded
769  *
770  * Handles the download of a complete package.
771  */
772 static enum ice_ddp_state
773 ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
774 		 struct ice_seg *ice_seg)
775 {
776 	enum ice_ddp_state state;
777 
778 	if (hw->pkg_has_signing_seg)
779 		state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
780 	else
781 		state = ice_download_pkg_without_sig_seg(hw, ice_seg);
782 
783 	ice_post_pkg_dwnld_vlan_mode_cfg(hw);
784 
785 	return state;
786 }
787 
788 /**
789  * ice_init_pkg_info
790  * @hw: pointer to the hardware structure
791  * @pkg_hdr: pointer to the driver's package hdr
792  *
793  * Saves off the package details into the HW structure.
794  */
795 static enum ice_ddp_state
796 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
797 {
798 	struct ice_generic_seg_hdr *seg_hdr;
799 
800 	if (!pkg_hdr)
801 		return ICE_DDP_PKG_ERR;
802 
803 	hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
804 	ice_get_signing_req(hw);
805 
806 	ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
807 		  hw->pkg_seg_id);
808 
809 	seg_hdr = (struct ice_generic_seg_hdr *)
810 		ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
811 	if (seg_hdr) {
812 		struct ice_meta_sect *meta;
813 		struct ice_pkg_enum state;
814 
815 		ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
816 
817 		/* Get package information from the Metadata Section */
818 		meta = (struct ice_meta_sect *)
819 			ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
820 					     ICE_SID_METADATA);
821 		if (!meta) {
822 			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
823 			return ICE_DDP_PKG_INVALID_FILE;
824 		}
825 
826 		hw->pkg_ver = meta->ver;
827 		ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
828 			   ICE_NONDMA_TO_NONDMA);
829 
830 		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
831 			  meta->ver.major, meta->ver.minor, meta->ver.update,
832 			  meta->ver.draft, meta->name);
833 
834 		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
835 		ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
836 			   sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
837 
838 		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
839 			  seg_hdr->seg_format_ver.major,
840 			  seg_hdr->seg_format_ver.minor,
841 			  seg_hdr->seg_format_ver.update,
842 			  seg_hdr->seg_format_ver.draft,
843 			  seg_hdr->seg_id);
844 	} else {
845 		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
846 		return ICE_DDP_PKG_INVALID_FILE;
847 	}
848 
849 	return ICE_DDP_PKG_SUCCESS;
850 }
851 
852 /**
853  * ice_get_pkg_info
854  * @hw: pointer to the hardware structure
855  *
856  * Store details of the package currently loaded in HW into the HW structure.
857  */
858 enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
859 {
860 	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
861 	struct ice_aqc_get_pkg_info_resp *pkg_info;
862 	u16 size;
863 	u32 i;
864 
865 	size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
866 	pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
867 	if (!pkg_info)
868 		return ICE_DDP_PKG_ERR;
869 
870 	if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
871 		state = ICE_DDP_PKG_ERR;
872 		goto init_pkg_free_alloc;
873 	}
874 
875 	for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
876 #define ICE_PKG_FLAG_COUNT	4
877 		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
878 		u8 place = 0;
879 
880 		if (pkg_info->pkg_info[i].is_active) {
881 			flags[place++] = 'A';
882 			hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
883 			hw->active_track_id =
884 				LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
885 			ice_memcpy(hw->active_pkg_name,
886 				   pkg_info->pkg_info[i].name,
887 				   sizeof(pkg_info->pkg_info[i].name),
888 				   ICE_NONDMA_TO_NONDMA);
889 			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
890 		}
891 		if (pkg_info->pkg_info[i].is_active_at_boot)
892 			flags[place++] = 'B';
893 		if (pkg_info->pkg_info[i].is_modified)
894 			flags[place++] = 'M';
895 		if (pkg_info->pkg_info[i].is_in_nvm)
896 			flags[place++] = 'N';
897 
898 		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
899 			  i, pkg_info->pkg_info[i].ver.major,
900 			  pkg_info->pkg_info[i].ver.minor,
901 			  pkg_info->pkg_info[i].ver.update,
902 			  pkg_info->pkg_info[i].ver.draft,
903 			  pkg_info->pkg_info[i].name, flags);
904 	}
905 
906 init_pkg_free_alloc:
907 	ice_free(hw, pkg_info);
908 
909 	return state;
910 }
911 
912 /**
913  * ice_label_enum_handler
914  * @sect_type: section type
915  * @section: pointer to section
916  * @index: index of the label entry to be returned
917  * @offset: pointer to receive absolute offset, always zero for label sections
918  *
919  * This is a callback function that can be passed to ice_pkg_enum_entry.
920  * Handles enumeration of individual label entries.
921  */
922 static void *
923 ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
924 		       u32 *offset)
925 {
926 	struct ice_label_section *labels;
927 
928 	if (!section)
929 		return NULL;
930 
931 	if (index > ICE_MAX_LABELS_IN_BUF)
932 		return NULL;
933 
934 	if (offset)
935 		*offset = 0;
936 
937 	labels = (struct ice_label_section *)section;
938 	if (index >= LE16_TO_CPU(labels->count))
939 		return NULL;
940 
941 	return labels->label + index;
942 }
943 
944 /**
945  * ice_enum_labels
946  * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
947  * @type: the section type that will contain the label (0 on subsequent calls)
948  * @state: ice_pkg_enum structure that will hold the state of the enumeration
949  * @value: pointer to a value that will return the label's value if found
950  *
951  * Enumerates a list of labels in the package. The caller will call
952  * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
953  * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
954  * the end of the list has been reached.
955  */
956 static char *
957 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
958 		u16 *value)
959 {
960 	struct ice_label *label;
961 
962 	/* Check for valid label section on first call */
963 	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
964 		return NULL;
965 
966 	label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
967 						       NULL,
968 						       ice_label_enum_handler);
969 	if (!label)
970 		return NULL;
971 
972 	*value = LE16_TO_CPU(label->value);
973 	return label->name;
974 }
975 
976 /**
977  * ice_find_label_value
978  * @ice_seg: pointer to the ice segment (non-NULL)
979  * @name: name of the label to search for
980  * @type: the section type that will contain the label
981  * @value: pointer to a value that will return the label's value if found
982  *
983  * Finds a label's value given the label name and the section type to search.
984  * The ice_seg parameter must not be NULL since the first call to
985  * ice_enum_labels requires a pointer to an actual ice_seg structure.
986  */
987 enum ice_status
988 ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
989 		     u16 *value)
990 {
991 	struct ice_pkg_enum state;
992 	char *label_name;
993 	u16 val;
994 
995 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
996 
997 	if (!ice_seg)
998 		return ICE_ERR_PARAM;
999 
1000 	do {
1001 		label_name = ice_enum_labels(ice_seg, type, &state, &val);
1002 		if (label_name && !strcmp(label_name, name)) {
1003 			*value = val;
1004 			return ICE_SUCCESS;
1005 		}
1006 
1007 		ice_seg = NULL;
1008 	} while (label_name);
1009 
1010 	return ICE_ERR_CFG;
1011 }
1012 
1013 /**
1014  * ice_verify_pkg - verify package
1015  * @pkg: pointer to the package buffer
1016  * @len: size of the package buffer
1017  *
1018  * Verifies various attributes of the package file, including length, format
1019  * version, and the requirement of at least one segment.
1020  */
1021 enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1022 {
1023 	u32 seg_count;
1024 	u32 i;
1025 
1026 	if (len < ice_struct_size(pkg, seg_offset, 1))
1027 		return ICE_DDP_PKG_INVALID_FILE;
1028 
1029 	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1030 	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1031 	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1032 	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1033 		return ICE_DDP_PKG_INVALID_FILE;
1034 
1035 	/* pkg must have at least one segment */
1036 	seg_count = LE32_TO_CPU(pkg->seg_count);
1037 	if (seg_count < 1)
1038 		return ICE_DDP_PKG_INVALID_FILE;
1039 
1040 	/* make sure segment array fits in package length */
1041 	if (len < ice_struct_size(pkg, seg_offset, seg_count))
1042 		return ICE_DDP_PKG_INVALID_FILE;
1043 
1044 	/* all segments must fit within length */
1045 	for (i = 0; i < seg_count; i++) {
1046 		u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
1047 		struct ice_generic_seg_hdr *seg;
1048 
1049 		/* segment header must fit */
1050 		if (len < off + sizeof(*seg))
1051 			return ICE_DDP_PKG_INVALID_FILE;
1052 
1053 		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1054 
1055 		/* segment body must fit */
1056 		if (len < off + LE32_TO_CPU(seg->seg_size))
1057 			return ICE_DDP_PKG_INVALID_FILE;
1058 	}
1059 
1060 	return ICE_DDP_PKG_SUCCESS;
1061 }
1062 
1063 /**
1064  * ice_free_seg - free package segment pointer
1065  * @hw: pointer to the hardware structure
1066  *
1067  * Frees the package segment pointer in the proper manner, depending on if the
1068  * segment was allocated or just the passed in pointer was stored.
1069  */
1070 void ice_free_seg(struct ice_hw *hw)
1071 {
1072 	if (hw->pkg_copy) {
1073 		ice_free(hw, hw->pkg_copy);
1074 		hw->pkg_copy = NULL;
1075 		hw->pkg_size = 0;
1076 	}
1077 	hw->seg = NULL;
1078 }
1079 
1080 /**
1081  * ice_chk_pkg_version - check package version for compatibility with driver
1082  * @pkg_ver: pointer to a version structure to check
1083  *
1084  * Check to make sure that the package about to be downloaded is compatible with
1085  * the driver. To be compatible, the major and minor components of the package
1086  * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1087  * definitions.
1088  */
1089 static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1090 {
1091 	if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
1092 	    (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1093 	     pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
1094 		return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
1095 	else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
1096 		 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1097 		  pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
1098 		return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
1099 
1100 	return ICE_DDP_PKG_SUCCESS;
1101 }
1102 
1103 /**
1104  * ice_chk_pkg_compat
1105  * @hw: pointer to the hardware structure
1106  * @ospkg: pointer to the package hdr
1107  * @seg: pointer to the package segment hdr
1108  *
1109  * This function checks the package version compatibility with driver and NVM
1110  */
1111 static enum ice_ddp_state
1112 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1113 		   struct ice_seg **seg)
1114 {
1115 	struct ice_aqc_get_pkg_info_resp *pkg;
1116 	enum ice_ddp_state state;
1117 	u16 size;
1118 	u32 i;
1119 
1120 	/* Check package version compatibility */
1121 	state = ice_chk_pkg_version(&hw->pkg_ver);
1122 	if (state) {
1123 		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1124 		return state;
1125 	}
1126 
1127 	/* find ICE segment in given package */
1128 	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
1129 						     ospkg);
1130 	if (!*seg) {
1131 		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1132 		return ICE_DDP_PKG_INVALID_FILE;
1133 	}
1134 
1135 	/* Check if FW is compatible with the OS package */
1136 	size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
1137 	pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
1138 	if (!pkg)
1139 		return ICE_DDP_PKG_ERR;
1140 
1141 	if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
1142 		state = ICE_DDP_PKG_ERR;
1143 		goto fw_ddp_compat_free_alloc;
1144 	}
1145 
1146 	for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
1147 		/* loop till we find the NVM package */
1148 		if (!pkg->pkg_info[i].is_in_nvm)
1149 			continue;
1150 		if ((*seg)->hdr.seg_format_ver.major !=
1151 			pkg->pkg_info[i].ver.major ||
1152 		    (*seg)->hdr.seg_format_ver.minor >
1153 			pkg->pkg_info[i].ver.minor) {
1154 			state = ICE_DDP_PKG_FW_MISMATCH;
1155 			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1156 		}
1157 		/* done processing NVM package so break */
1158 		break;
1159 	}
1160 fw_ddp_compat_free_alloc:
1161 	ice_free(hw, pkg);
1162 	return state;
1163 }
1164 
1165 /**
1166  * ice_sw_fv_handler
1167  * @sect_type: section type
1168  * @section: pointer to section
1169  * @index: index of the field vector entry to be returned
1170  * @offset: ptr to variable that receives the offset in the field vector table
1171  *
1172  * This is a callback function that can be passed to ice_pkg_enum_entry.
1173  * This function treats the given section as of type ice_sw_fv_section and
1174  * enumerates offset field. "offset" is an index into the field vector table.
1175  */
1176 static void *
1177 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1178 {
1179 	struct ice_sw_fv_section *fv_section =
1180 		(struct ice_sw_fv_section *)section;
1181 
1182 	if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1183 		return NULL;
1184 	if (index >= LE16_TO_CPU(fv_section->count))
1185 		return NULL;
1186 	if (offset)
1187 		/* "index" passed in to this function is relative to a given
1188 		 * 4k block. To get to the true index into the field vector
1189 		 * table need to add the relative index to the base_offset
1190 		 * field of this section
1191 		 */
1192 		*offset = LE16_TO_CPU(fv_section->base_offset) + index;
1193 	return fv_section->fv + index;
1194 }
1195 
1196 /**
1197  * ice_get_prof_index_max - get the max profile index for used profile
1198  * @hw: pointer to the HW struct
1199  *
1200  * Calling this function will get the max profile index for used profile
1201  * and store the index number in struct ice_switch_info *switch_info
1202  * in hw for following use.
1203  */
1204 static int ice_get_prof_index_max(struct ice_hw *hw)
1205 {
1206 	u16 prof_index = 0, j, max_prof_index = 0;
1207 	struct ice_pkg_enum state;
1208 	struct ice_seg *ice_seg;
1209 	bool flag = false;
1210 	struct ice_fv *fv;
1211 	u32 offset;
1212 
1213 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1214 
1215 	if (!hw->seg)
1216 		return ICE_ERR_PARAM;
1217 
1218 	ice_seg = hw->seg;
1219 
1220 	do {
1221 		fv = (struct ice_fv *)
1222 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1223 					   &offset, ice_sw_fv_handler);
1224 		if (!fv)
1225 			break;
1226 		ice_seg = NULL;
1227 
1228 		/* in the profile that not be used, the prot_id is set to 0xff
1229 		 * and the off is set to 0x1ff for all the field vectors.
1230 		 */
1231 		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1232 			if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1233 			    fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1234 				flag = true;
1235 		if (flag && prof_index > max_prof_index)
1236 			max_prof_index = prof_index;
1237 
1238 		prof_index++;
1239 		flag = false;
1240 	} while (fv);
1241 
1242 	hw->switch_info->max_used_prof_index = max_prof_index;
1243 
1244 	return ICE_SUCCESS;
1245 }
1246 
1247 /**
1248  * ice_get_ddp_pkg_state - get DDP pkg state after download
1249  * @hw: pointer to the HW struct
1250  * @already_loaded: indicates if pkg was already loaded onto the device
1251  *
1252  */
1253 static enum ice_ddp_state
1254 ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
1255 {
1256 	if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
1257 	    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
1258 	    hw->pkg_ver.update == hw->active_pkg_ver.update &&
1259 	    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
1260 	    !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
1261 		if (already_loaded)
1262 			return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
1263 		else
1264 			return ICE_DDP_PKG_SUCCESS;
1265 	} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
1266 		   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
1267 		return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
1268 	} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
1269 		   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
1270 		return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
1271 	} else {
1272 		return ICE_DDP_PKG_ERR;
1273 	}
1274 }
1275 
1276 /**
1277  * ice_init_pkg_regs - initialize additional package registers
1278  * @hw: pointer to the hardware structure
1279  */
1280 static void ice_init_pkg_regs(struct ice_hw *hw)
1281 {
1282 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1283 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1284 #define ICE_SW_BLK_IDX	0
1285 
1286 	/* setup Switch block input mask, which is 48-bits in two parts */
1287 	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1288 	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1289 }
1290 
1291 /**
1292  * ice_init_pkg - initialize/download package
1293  * @hw: pointer to the hardware structure
1294  * @buf: pointer to the package buffer
1295  * @len: size of the package buffer
1296  *
1297  * This function initializes a package. The package contains HW tables
1298  * required to do packet processing. First, the function extracts package
1299  * information such as version. Then it finds the ice configuration segment
1300  * within the package; this function then saves a copy of the segment pointer
1301  * within the supplied package buffer. Next, the function will cache any hints
1302  * from the package, followed by downloading the package itself. Note, that if
1303  * a previous PF driver has already downloaded the package successfully, then
1304  * the current driver will not have to download the package again.
1305  *
1306  * The local package contents will be used to query default behavior and to
1307  * update specific sections of the HW's version of the package (e.g. to update
1308  * the parse graph to understand new protocols).
1309  *
1310  * This function stores a pointer to the package buffer memory, and it is
1311  * expected that the supplied buffer will not be freed immediately. If the
1312  * package buffer needs to be freed, such as when read from a file, use
1313  * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1314  * case.
1315  */
1316 enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1317 {
1318 	bool already_loaded = false;
1319 	enum ice_ddp_state state;
1320 	struct ice_pkg_hdr *pkg;
1321 	struct ice_seg *seg;
1322 
1323 	if (!buf || !len)
1324 		return ICE_DDP_PKG_ERR;
1325 
1326 	pkg = (struct ice_pkg_hdr *)buf;
1327 	state = ice_verify_pkg(pkg, len);
1328 	if (state) {
1329 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1330 			  state);
1331 		return state;
1332 	}
1333 
1334 	/* initialize package info */
1335 	state = ice_init_pkg_info(hw, pkg);
1336 	if (state)
1337 		return state;
1338 
1339 	/* For packages with signing segments, must be a matching segment */
1340 	if (hw->pkg_has_signing_seg)
1341 		if (!ice_match_signing_seg(pkg, hw->pkg_seg_id,
1342 					   hw->pkg_sign_type))
1343 			return ICE_DDP_PKG_ERR;
1344 
1345 	/* before downloading the package, check package version for
1346 	 * compatibility with driver
1347 	 */
1348 	state = ice_chk_pkg_compat(hw, pkg, &seg);
1349 	if (state)
1350 		return state;
1351 
1352 	/* initialize package hints and then download package */
1353 	ice_init_pkg_hints(hw, seg);
1354 	state = ice_download_pkg(hw, pkg, seg);
1355 
1356 	if (state == ICE_DDP_PKG_ALREADY_LOADED) {
1357 		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1358 		already_loaded = true;
1359 	}
1360 
1361 	/* Get information on the package currently loaded in HW, then make sure
1362 	 * the driver is compatible with this version.
1363 	 */
1364 	if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
1365 		state = ice_get_pkg_info(hw);
1366 		if (!state)
1367 			state = ice_get_ddp_pkg_state(hw, already_loaded);
1368 	}
1369 
1370 	if (ice_is_init_pkg_successful(state)) {
1371 		hw->seg = seg;
1372 		/* on successful package download update other required
1373 		 * registers to support the package and fill HW tables
1374 		 * with package content.
1375 		 */
1376 		ice_init_pkg_regs(hw);
1377 		ice_fill_blk_tbls(hw);
1378 		ice_get_prof_index_max(hw);
1379 	} else {
1380 		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1381 			  state);
1382 	}
1383 
1384 	return state;
1385 }
1386 
1387 /**
1388  * ice_copy_and_init_pkg - initialize/download a copy of the package
1389  * @hw: pointer to the hardware structure
1390  * @buf: pointer to the package buffer
1391  * @len: size of the package buffer
1392  *
1393  * This function copies the package buffer, and then calls ice_init_pkg() to
1394  * initialize the copied package contents.
1395  *
1396  * The copying is necessary if the package buffer supplied is constant, or if
1397  * the memory may disappear shortly after calling this function.
1398  *
1399  * If the package buffer resides in the data segment and can be modified, the
1400  * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1401  *
1402  * However, if the package buffer needs to be copied first, such as when being
1403  * read from a file, the caller should use ice_copy_and_init_pkg().
1404  *
1405  * This function will first copy the package buffer, before calling
1406  * ice_init_pkg(). The caller is free to immediately destroy the original
1407  * package buffer, as the new copy will be managed by this function and
1408  * related routines.
1409  */
1410 enum ice_ddp_state
1411 ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1412 {
1413 	enum ice_ddp_state state;
1414 	u8 *buf_copy;
1415 
1416 	if (!buf || !len)
1417 		return ICE_DDP_PKG_ERR;
1418 
1419 	buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
1420 
1421 	state = ice_init_pkg(hw, buf_copy, len);
1422 	if (!ice_is_init_pkg_successful(state)) {
1423 		/* Free the copy, since we failed to initialize the package */
1424 		ice_free(hw, buf_copy);
1425 	} else {
1426 		/* Track the copied pkg so we can free it later */
1427 		hw->pkg_copy = buf_copy;
1428 		hw->pkg_size = len;
1429 	}
1430 
1431 	return state;
1432 }
1433 
1434 /**
1435  * ice_is_init_pkg_successful - check if DDP init was successful
1436  * @state: state of the DDP pkg after download
1437  */
1438 bool ice_is_init_pkg_successful(enum ice_ddp_state state)
1439 {
1440 	switch (state) {
1441 	case ICE_DDP_PKG_SUCCESS:
1442 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
1443 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
1444 		return true;
1445 	default:
1446 		return false;
1447 	}
1448 }
1449 
1450 /**
1451  * ice_pkg_buf_alloc
1452  * @hw: pointer to the HW structure
1453  *
1454  * Allocates a package buffer and returns a pointer to the buffer header.
1455  * Note: all package contents must be in Little Endian form.
1456  */
1457 struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1458 {
1459 	struct ice_buf_build *bld;
1460 	struct ice_buf_hdr *buf;
1461 
1462 	bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
1463 	if (!bld)
1464 		return NULL;
1465 
1466 	buf = (struct ice_buf_hdr *)bld;
1467 	buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
1468 					     section_entry));
1469 	return bld;
1470 }
1471 
1472 static bool ice_is_gtp_u_profile(u32 prof_idx)
1473 {
1474 	return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
1475 		prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) ||
1476 	       prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
1477 }
1478 
1479 static bool ice_is_gtp_c_profile(u32 prof_idx)
1480 {
1481 	switch (prof_idx) {
1482 	case ICE_PROFID_IPV4_GTPC_TEID:
1483 	case ICE_PROFID_IPV4_GTPC_NO_TEID:
1484 	case ICE_PROFID_IPV6_GTPC_TEID:
1485 	case ICE_PROFID_IPV6_GTPC_NO_TEID:
1486 		return true;
1487 	default:
1488 		return false;
1489 	}
1490 }
1491 
1492 /**
1493  * ice_get_sw_prof_type - determine switch profile type
1494  * @hw: pointer to the HW structure
1495  * @fv: pointer to the switch field vector
1496  * @prof_idx: profile index to check
1497  */
1498 static enum ice_prof_type
1499 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
1500 {
1501 	bool valid_prof = false;
1502 	u16 i;
1503 
1504 	if (ice_is_gtp_c_profile(prof_idx))
1505 		return ICE_PROF_TUN_GTPC;
1506 
1507 	if (ice_is_gtp_u_profile(prof_idx))
1508 		return ICE_PROF_TUN_GTPU;
1509 
1510 	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1511 		if (fv->ew[i].off != ICE_NAN_OFFSET)
1512 			valid_prof = true;
1513 
1514 		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
1515 		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1516 		    fv->ew[i].off == ICE_VNI_OFFSET)
1517 			return ICE_PROF_TUN_UDP;
1518 
1519 		/* GRE tunnel will have GRE protocol */
1520 		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1521 			return ICE_PROF_TUN_GRE;
1522 	}
1523 
1524 	return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
1525 }
1526 
1527 /**
1528  * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
1529  * @hw: pointer to hardware structure
1530  * @req_profs: type of profiles requested
1531  * @bm: pointer to memory for returning the bitmap of field vectors
1532  */
1533 void
1534 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1535 		     ice_bitmap_t *bm)
1536 {
1537 	struct ice_pkg_enum state;
1538 	struct ice_seg *ice_seg;
1539 	struct ice_fv *fv;
1540 
1541 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1542 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
1543 	ice_seg = hw->seg;
1544 	do {
1545 		enum ice_prof_type prof_type;
1546 		u32 offset;
1547 
1548 		fv = (struct ice_fv *)
1549 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1550 					   &offset, ice_sw_fv_handler);
1551 		ice_seg = NULL;
1552 
1553 		if (fv) {
1554 			/* Determine field vector type */
1555 			prof_type = ice_get_sw_prof_type(hw, fv, offset);
1556 
1557 			if (req_profs & prof_type)
1558 				ice_set_bit((u16)offset, bm);
1559 		}
1560 	} while (fv);
1561 }
1562 
1563 /**
1564  * ice_get_sw_fv_list
1565  * @hw: pointer to the HW structure
1566  * @lkups: lookup elements or match criteria for the advanced recipe, one
1567  *	   structure per protocol header
1568  * @bm: bitmap of field vectors to consider
1569  * @fv_list: Head of a list
1570  *
1571  * Finds all the field vector entries from switch block that contain
1572  * a given protocol ID and offset and returns a list of structures of type
1573  * "ice_sw_fv_list_entry". Every structure in the list has a field vector
1574  * definition and profile ID information
1575  * NOTE: The caller of the function is responsible for freeing the memory
1576  * allocated for every list entry.
1577  */
1578 enum ice_status
1579 ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
1580 		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
1581 {
1582 	struct ice_sw_fv_list_entry *fvl;
1583 	struct ice_sw_fv_list_entry *tmp;
1584 	struct ice_pkg_enum state;
1585 	struct ice_seg *ice_seg;
1586 	struct ice_fv *fv;
1587 	u32 offset;
1588 
1589 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1590 
1591 	if (!lkups->n_val_words || !hw->seg)
1592 		return ICE_ERR_PARAM;
1593 
1594 	ice_seg = hw->seg;
1595 	do {
1596 		u16 i;
1597 
1598 		fv = (struct ice_fv *)
1599 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1600 					   &offset, ice_sw_fv_handler);
1601 		if (!fv)
1602 			break;
1603 		ice_seg = NULL;
1604 
1605 		/* If field vector is not in the bitmap list, then skip this
1606 		 * profile.
1607 		 */
1608 		if (!ice_is_bit_set(bm, (u16)offset))
1609 			continue;
1610 
1611 		for (i = 0; i < lkups->n_val_words; i++) {
1612 			int j;
1613 
1614 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1615 				if (fv->ew[j].prot_id ==
1616 				    lkups->fv_words[i].prot_id &&
1617 				    fv->ew[j].off == lkups->fv_words[i].off)
1618 					break;
1619 			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1620 				break;
1621 			if (i + 1 == lkups->n_val_words) {
1622 				fvl = (struct ice_sw_fv_list_entry *)
1623 					ice_malloc(hw, sizeof(*fvl));
1624 				if (!fvl)
1625 					goto err;
1626 				fvl->fv_ptr = fv;
1627 				fvl->profile_id = offset;
1628 				LIST_ADD(&fvl->list_entry, fv_list);
1629 				break;
1630 			}
1631 		}
1632 	} while (fv);
1633 	if (LIST_EMPTY(fv_list)) {
1634 		ice_warn(hw, "Required profiles not found in currently loaded DDP package");
1635 		return ICE_ERR_CFG;
1636 	}
1637 	return ICE_SUCCESS;
1638 
1639 err:
1640 	LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
1641 				 list_entry) {
1642 		LIST_DEL(&fvl->list_entry);
1643 		ice_free(hw, fvl);
1644 	}
1645 
1646 	return ICE_ERR_NO_MEMORY;
1647 }
1648 
1649 /**
1650  * ice_init_prof_result_bm - Initialize the profile result index bitmap
1651  * @hw: pointer to hardware structure
1652  */
1653 void ice_init_prof_result_bm(struct ice_hw *hw)
1654 {
1655 	struct ice_pkg_enum state;
1656 	struct ice_seg *ice_seg;
1657 	struct ice_fv *fv;
1658 
1659 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
1660 
1661 	if (!hw->seg)
1662 		return;
1663 
1664 	ice_seg = hw->seg;
1665 	do {
1666 		u32 off;
1667 		u16 i;
1668 
1669 		fv = (struct ice_fv *)
1670 			ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1671 					   &off, ice_sw_fv_handler);
1672 		ice_seg = NULL;
1673 		if (!fv)
1674 			break;
1675 
1676 		ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
1677 				ICE_MAX_FV_WORDS);
1678 
1679 		/* Determine empty field vector indices, these can be
1680 		 * used for recipe results. Skip index 0, since it is
1681 		 * always used for Switch ID.
1682 		 */
1683 		for (i = 1; i < ICE_MAX_FV_WORDS; i++)
1684 			if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
1685 			    fv->ew[i].off == ICE_FV_OFFSET_INVAL)
1686 				ice_set_bit(i,
1687 					    hw->switch_info->prof_res_bm[off]);
1688 	} while (fv);
1689 }
1690 
1691 /**
1692  * ice_pkg_buf_free
1693  * @hw: pointer to the HW structure
1694  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1695  *
1696  * Frees a package buffer
1697  */
1698 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1699 {
1700 	ice_free(hw, bld);
1701 }
1702 
1703 /**
1704  * ice_pkg_buf_reserve_section
1705  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1706  * @count: the number of sections to reserve
1707  *
1708  * Reserves one or more section table entries in a package buffer. This routine
1709  * can be called multiple times as long as they are made before calling
1710  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1711  * is called once, the number of sections that can be allocated will not be able
1712  * to be increased; not using all reserved sections is fine, but this will
1713  * result in some wasted space in the buffer.
1714  * Note: all package contents must be in Little Endian form.
1715  */
1716 enum ice_status
1717 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1718 {
1719 	struct ice_buf_hdr *buf;
1720 	u16 section_count;
1721 	u16 data_end;
1722 
1723 	if (!bld)
1724 		return ICE_ERR_PARAM;
1725 
1726 	buf = (struct ice_buf_hdr *)&bld->buf;
1727 
1728 	/* already an active section, can't increase table size */
1729 	section_count = LE16_TO_CPU(buf->section_count);
1730 	if (section_count > 0)
1731 		return ICE_ERR_CFG;
1732 
1733 	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1734 		return ICE_ERR_CFG;
1735 	bld->reserved_section_table_entries += count;
1736 
1737 	data_end = LE16_TO_CPU(buf->data_end) +
1738 		FLEX_ARRAY_SIZE(buf, section_entry, count);
1739 	buf->data_end = CPU_TO_LE16(data_end);
1740 
1741 	return ICE_SUCCESS;
1742 }
1743 
1744 /**
1745  * ice_pkg_buf_alloc_section
1746  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1747  * @type: the section type value
1748  * @size: the size of the section to reserve (in bytes)
1749  *
1750  * Reserves memory in the buffer for a section's content and updates the
1751  * buffers' status accordingly. This routine returns a pointer to the first
1752  * byte of the section start within the buffer, which is used to fill in the
1753  * section contents.
1754  * Note: all package contents must be in Little Endian form.
1755  */
1756 void *
1757 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1758 {
1759 	struct ice_buf_hdr *buf;
1760 	u16 sect_count;
1761 	u16 data_end;
1762 
1763 	if (!bld || !type || !size)
1764 		return NULL;
1765 
1766 	buf = (struct ice_buf_hdr *)&bld->buf;
1767 
1768 	/* check for enough space left in buffer */
1769 	data_end = LE16_TO_CPU(buf->data_end);
1770 
1771 	/* section start must align on 4 byte boundary */
1772 	data_end = ICE_ALIGN(data_end, 4);
1773 
1774 	if ((data_end + size) > ICE_MAX_S_DATA_END)
1775 		return NULL;
1776 
1777 	/* check for more available section table entries */
1778 	sect_count = LE16_TO_CPU(buf->section_count);
1779 	if (sect_count < bld->reserved_section_table_entries) {
1780 		void *section_ptr = ((u8 *)buf) + data_end;
1781 
1782 		buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
1783 		buf->section_entry[sect_count].size = CPU_TO_LE16(size);
1784 		buf->section_entry[sect_count].type = CPU_TO_LE32(type);
1785 
1786 		data_end += size;
1787 		buf->data_end = CPU_TO_LE16(data_end);
1788 
1789 		buf->section_count = CPU_TO_LE16(sect_count + 1);
1790 		return section_ptr;
1791 	}
1792 
1793 	/* no free section table entries */
1794 	return NULL;
1795 }
1796 
1797 /**
1798  * ice_pkg_buf_alloc_single_section
1799  * @hw: pointer to the HW structure
1800  * @type: the section type value
1801  * @size: the size of the section to reserve (in bytes)
1802  * @section: returns pointer to the section
1803  *
1804  * Allocates a package buffer with a single section.
1805  * Note: all package contents must be in Little Endian form.
1806  */
1807 struct ice_buf_build *
1808 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
1809 				 void **section)
1810 {
1811 	struct ice_buf_build *buf;
1812 
1813 	if (!section)
1814 		return NULL;
1815 
1816 	buf = ice_pkg_buf_alloc(hw);
1817 	if (!buf)
1818 		return NULL;
1819 
1820 	if (ice_pkg_buf_reserve_section(buf, 1))
1821 		goto ice_pkg_buf_alloc_single_section_err;
1822 
1823 	*section = ice_pkg_buf_alloc_section(buf, type, size);
1824 	if (!*section)
1825 		goto ice_pkg_buf_alloc_single_section_err;
1826 
1827 	return buf;
1828 
1829 ice_pkg_buf_alloc_single_section_err:
1830 	ice_pkg_buf_free(hw, buf);
1831 	return NULL;
1832 }
1833 
1834 /**
1835  * ice_pkg_buf_unreserve_section
1836  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1837  * @count: the number of sections to unreserve
1838  *
1839  * Unreserves one or more section table entries in a package buffer, releasing
1840  * space that can be used for section data. This routine can be called
1841  * multiple times as long as they are made before calling
1842  * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1843  * is called once, the number of sections that can be allocated will not be able
1844  * to be increased; not using all reserved sections is fine, but this will
1845  * result in some wasted space in the buffer.
1846  * Note: all package contents must be in Little Endian form.
1847  */
1848 enum ice_status
1849 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
1850 {
1851 	struct ice_buf_hdr *buf;
1852 	u16 section_count;
1853 	u16 data_end;
1854 
1855 	if (!bld)
1856 		return ICE_ERR_PARAM;
1857 
1858 	buf = (struct ice_buf_hdr *)&bld->buf;
1859 
1860 	/* already an active section, can't decrease table size */
1861 	section_count = LE16_TO_CPU(buf->section_count);
1862 	if (section_count > 0)
1863 		return ICE_ERR_CFG;
1864 
1865 	if (count > bld->reserved_section_table_entries)
1866 		return ICE_ERR_CFG;
1867 	bld->reserved_section_table_entries -= count;
1868 
1869 	data_end = LE16_TO_CPU(buf->data_end) -
1870 		FLEX_ARRAY_SIZE(buf, section_entry, count);
1871 	buf->data_end = CPU_TO_LE16(data_end);
1872 
1873 	return ICE_SUCCESS;
1874 }
1875 
1876 /**
1877  * ice_pkg_buf_get_free_space
1878  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1879  *
1880  * Returns the number of free bytes remaining in the buffer.
1881  * Note: all package contents must be in Little Endian form.
1882  */
1883 u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
1884 {
1885 	struct ice_buf_hdr *buf;
1886 
1887 	if (!bld)
1888 		return 0;
1889 
1890 	buf = (struct ice_buf_hdr *)&bld->buf;
1891 	return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
1892 }
1893 
1894 /**
1895  * ice_pkg_buf_get_active_sections
1896  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1897  *
1898  * Returns the number of active sections. Before using the package buffer
1899  * in an update package command, the caller should make sure that there is at
1900  * least one active section - otherwise, the buffer is not legal and should
1901  * not be used.
1902  * Note: all package contents must be in Little Endian form.
1903  */
1904 u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1905 {
1906 	struct ice_buf_hdr *buf;
1907 
1908 	if (!bld)
1909 		return 0;
1910 
1911 	buf = (struct ice_buf_hdr *)&bld->buf;
1912 	return LE16_TO_CPU(buf->section_count);
1913 }
1914 
1915 /**
1916  * ice_pkg_buf
1917  * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1918  *
1919  * Return a pointer to the buffer's header
1920  */
1921 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1922 {
1923 	if (bld)
1924 		return &bld->buf;
1925 
1926 	return NULL;
1927 }
1928 
1929 /**
1930  * ice_find_buf_table
1931  * @ice_seg: pointer to the ice segment
1932  *
1933  * Returns the address of the buffer table within the ice segment.
1934  */
1935 struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
1936 {
1937 	struct ice_nvm_table *nvms;
1938 
1939 	nvms = (struct ice_nvm_table *)
1940 		(ice_seg->device_table +
1941 		 LE32_TO_CPU(ice_seg->device_table_count));
1942 
1943 	return (_FORCE_ struct ice_buf_table *)
1944 		(nvms->vers + LE32_TO_CPU(nvms->table_count));
1945 }
1946 
1947 /**
1948  * ice_pkg_val_buf
1949  * @buf: pointer to the ice buffer
1950  *
1951  * This helper function validates a buffer's header.
1952  */
1953 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
1954 {
1955 	struct ice_buf_hdr *hdr;
1956 	u16 section_count;
1957 	u16 data_end;
1958 
1959 	hdr = (struct ice_buf_hdr *)buf->buf;
1960 	/* verify data */
1961 	section_count = LE16_TO_CPU(hdr->section_count);
1962 	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
1963 		return NULL;
1964 
1965 	data_end = LE16_TO_CPU(hdr->data_end);
1966 	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
1967 		return NULL;
1968 
1969 	return hdr;
1970 }
1971 
1972 /**
1973  * ice_pkg_enum_buf
1974  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
1975  * @state: pointer to the enum state
1976  *
1977  * This function will enumerate all the buffers in the ice segment. The first
1978  * call is made with the ice_seg parameter non-NULL; on subsequent calls,
1979  * ice_seg is set to NULL which continues the enumeration. When the function
1980  * returns a NULL pointer, then the end of the buffers has been reached, or an
1981  * unexpected value has been detected (for example an invalid section count or
1982  * an invalid buffer end value).
1983  */
1984 struct ice_buf_hdr *
1985 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
1986 {
1987 	if (ice_seg) {
1988 		state->buf_table = ice_find_buf_table(ice_seg);
1989 		if (!state->buf_table)
1990 			return NULL;
1991 
1992 		state->buf_idx = 0;
1993 		return ice_pkg_val_buf(state->buf_table->buf_array);
1994 	}
1995 
1996 	if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
1997 		return ice_pkg_val_buf(state->buf_table->buf_array +
1998 				       state->buf_idx);
1999 	else
2000 		return NULL;
2001 }
2002 
2003 /**
2004  * ice_pkg_advance_sect
2005  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2006  * @state: pointer to the enum state
2007  *
2008  * This helper function will advance the section within the ice segment,
2009  * also advancing the buffer if needed.
2010  */
2011 bool
2012 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
2013 {
2014 	if (!ice_seg && !state->buf)
2015 		return false;
2016 
2017 	if (!ice_seg && state->buf)
2018 		if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
2019 			return true;
2020 
2021 	state->buf = ice_pkg_enum_buf(ice_seg, state);
2022 	if (!state->buf)
2023 		return false;
2024 
2025 	/* start of new buffer, reset section index */
2026 	state->sect_idx = 0;
2027 	return true;
2028 }
2029 
2030 /**
2031  * ice_pkg_enum_section
2032  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2033  * @state: pointer to the enum state
2034  * @sect_type: section type to enumerate
2035  *
2036  * This function will enumerate all the sections of a particular type in the
2037  * ice segment. The first call is made with the ice_seg parameter non-NULL;
2038  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
2039  * When the function returns a NULL pointer, then the end of the matching
2040  * sections has been reached.
2041  */
2042 void *
2043 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
2044 		     u32 sect_type)
2045 {
2046 	u16 offset, size;
2047 
2048 	if (ice_seg)
2049 		state->type = sect_type;
2050 
2051 	if (!ice_pkg_advance_sect(ice_seg, state))
2052 		return NULL;
2053 
2054 	/* scan for next matching section */
2055 	while (state->buf->section_entry[state->sect_idx].type !=
2056 	       CPU_TO_LE32(state->type))
2057 		if (!ice_pkg_advance_sect(NULL, state))
2058 			return NULL;
2059 
2060 	/* validate section */
2061 	offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
2062 	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
2063 		return NULL;
2064 
2065 	size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
2066 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
2067 		return NULL;
2068 
2069 	/* make sure the section fits in the buffer */
2070 	if (offset + size > ICE_PKG_BUF_SIZE)
2071 		return NULL;
2072 
2073 	state->sect_type =
2074 		LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
2075 
2076 	/* calc pointer to this section */
2077 	state->sect = ((u8 *)state->buf) +
2078 		LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
2079 
2080 	return state->sect;
2081 }
2082 
2083 /**
2084  * ice_pkg_enum_entry
2085  * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
2086  * @state: pointer to the enum state
2087  * @sect_type: section type to enumerate
2088  * @offset: pointer to variable that receives the offset in the table (optional)
2089  * @handler: function that handles access to the entries into the section type
2090  *
2091  * This function will enumerate all the entries in particular section type in
2092  * the ice segment. The first call is made with the ice_seg parameter non-NULL;
2093  * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
2094  * When the function returns a NULL pointer, then the end of the entries has
2095  * been reached.
2096  *
2097  * Since each section may have a different header and entry size, the handler
2098  * function is needed to determine the number and location entries in each
2099  * section.
2100  *
2101  * The offset parameter is optional, but should be used for sections that
2102  * contain an offset for each section table. For such cases, the section handler
2103  * function must return the appropriate offset + index to give the absolution
2104  * offset for each entry. For example, if the base for a section's header
2105  * indicates a base offset of 10, and the index for the entry is 2, then
2106  * section handler function should set the offset to 10 + 2 = 12.
2107  */
2108 void *
2109 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
2110 		   u32 sect_type, u32 *offset,
2111 		   void *(*handler)(u32 sect_type, void *section,
2112 				    u32 index, u32 *offset))
2113 {
2114 	void *entry;
2115 
2116 	if (ice_seg) {
2117 		if (!handler)
2118 			return NULL;
2119 
2120 		if (!ice_pkg_enum_section(ice_seg, state, sect_type))
2121 			return NULL;
2122 
2123 		state->entry_idx = 0;
2124 		state->handler = handler;
2125 	} else {
2126 		state->entry_idx++;
2127 	}
2128 
2129 	if (!state->handler)
2130 		return NULL;
2131 
2132 	/* get entry */
2133 	entry = state->handler(state->sect_type, state->sect, state->entry_idx,
2134 			       offset);
2135 	if (!entry) {
2136 		/* end of a section, look for another section of this type */
2137 		if (!ice_pkg_enum_section(NULL, state, 0))
2138 			return NULL;
2139 
2140 		state->entry_idx = 0;
2141 		entry = state->handler(state->sect_type, state->sect,
2142 				       state->entry_idx, offset);
2143 	}
2144 
2145 	return entry;
2146 }
2147 
2148 /**
2149  * ice_boost_tcam_handler
2150  * @sect_type: section type
2151  * @section: pointer to section
2152  * @index: index of the boost TCAM entry to be returned
2153  * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
2154  *
2155  * This is a callback function that can be passed to ice_pkg_enum_entry.
2156  * Handles enumeration of individual boost TCAM entries.
2157  */
2158 static void *
2159 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
2160 {
2161 	struct ice_boost_tcam_section *boost;
2162 
2163 	if (!section)
2164 		return NULL;
2165 
2166 	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
2167 		return NULL;
2168 
2169 	if (index > ICE_MAX_BST_TCAMS_IN_BUF)
2170 		return NULL;
2171 
2172 	if (offset)
2173 		*offset = 0;
2174 
2175 	boost = (struct ice_boost_tcam_section *)section;
2176 	if (index >= LE16_TO_CPU(boost->count))
2177 		return NULL;
2178 
2179 	return boost->tcam + index;
2180 }
2181 
2182 /**
2183  * ice_find_boost_entry
2184  * @ice_seg: pointer to the ice segment (non-NULL)
2185  * @addr: Boost TCAM address of entry to search for
2186  * @entry: returns pointer to the entry
2187  *
2188  * Finds a particular Boost TCAM entry and returns a pointer to that entry
2189  * if it is found. The ice_seg parameter must not be NULL since the first call
2190  * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
2191  */
2192 static enum ice_status
2193 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
2194 		     struct ice_boost_tcam_entry **entry)
2195 {
2196 	struct ice_boost_tcam_entry *tcam;
2197 	struct ice_pkg_enum state;
2198 
2199 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
2200 
2201 	if (!ice_seg)
2202 		return ICE_ERR_PARAM;
2203 
2204 	do {
2205 		tcam = (struct ice_boost_tcam_entry *)
2206 		       ice_pkg_enum_entry(ice_seg, &state,
2207 					  ICE_SID_RXPARSER_BOOST_TCAM, NULL,
2208 					  ice_boost_tcam_handler);
2209 		if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
2210 			*entry = tcam;
2211 			return ICE_SUCCESS;
2212 		}
2213 
2214 		ice_seg = NULL;
2215 	} while (tcam);
2216 
2217 	*entry = NULL;
2218 	return ICE_ERR_CFG;
2219 }
2220 
2221 /**
2222  * ice_init_pkg_hints
2223  * @hw: pointer to the HW structure
2224  * @ice_seg: pointer to the segment of the package scan (non-NULL)
2225  *
2226  * This function will scan the package and save off relevant information
2227  * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
2228  * since the first call to ice_enum_labels requires a pointer to an actual
2229  * ice_seg structure.
2230  */
2231 void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
2232 {
2233 	struct ice_pkg_enum state;
2234 	char *label_name;
2235 	u16 val;
2236 	int i;
2237 
2238 	ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
2239 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
2240 
2241 	if (!ice_seg)
2242 		return;
2243 
2244 	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
2245 				     &val);
2246 
2247 	while (label_name) {
2248 /* TODO: Replace !strnsmp() with wrappers like match_some_pre() */
2249 		if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
2250 			/* check for a tunnel entry */
2251 			ice_add_tunnel_hint(hw, label_name, val);
2252 
2253 		label_name = ice_enum_labels(NULL, 0, &state, &val);
2254 	}
2255 
2256 	/* Cache the appropriate boost TCAM entry pointers for tunnels */
2257 	for (i = 0; i < hw->tnl.count; i++) {
2258 		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
2259 				     &hw->tnl.tbl[i].boost_entry);
2260 		if (hw->tnl.tbl[i].boost_entry)
2261 			hw->tnl.tbl[i].valid = true;
2262 	}
2263 }
2264 
2265 /**
2266  * ice_acquire_global_cfg_lock
2267  * @hw: pointer to the HW structure
2268  * @access: access type (read or write)
2269  *
2270  * This function will request ownership of the global config lock for reading
2271  * or writing of the package. When attempting to obtain write access, the
2272  * caller must check for the following two return values:
2273  *
2274  * ICE_SUCCESS        - Means the caller has acquired the global config lock
2275  *                      and can perform writing of the package.
2276  * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
2277  *                      package or has found that no update was necessary; in
2278  *                      this case, the caller can just skip performing any
2279  *                      update of the package.
2280  */
2281 enum ice_status
2282 ice_acquire_global_cfg_lock(struct ice_hw *hw,
2283 			    enum ice_aq_res_access_type access)
2284 {
2285 	enum ice_status status;
2286 
2287 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
2288 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
2289 
2290 	if (status == ICE_ERR_AQ_NO_WORK)
2291 		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
2292 
2293 	return status;
2294 }
2295 
2296 /**
2297  * ice_release_global_cfg_lock
2298  * @hw: pointer to the HW structure
2299  *
2300  * This function will release the global config lock.
2301  */
2302 void ice_release_global_cfg_lock(struct ice_hw *hw)
2303 {
2304 	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
2305 }
2306 
2307 /**
2308  * ice_acquire_change_lock
2309  * @hw: pointer to the HW structure
2310  * @access: access type (read or write)
2311  *
2312  * This function will request ownership of the change lock.
2313  */
2314 enum ice_status
2315 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
2316 {
2317 	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
2318 			       ICE_CHANGE_LOCK_TIMEOUT);
2319 }
2320 
2321 /**
2322  * ice_release_change_lock
2323  * @hw: pointer to the HW structure
2324  *
2325  * This function will release the change lock using the proper Admin Command.
2326  */
2327 void ice_release_change_lock(struct ice_hw *hw)
2328 {
2329 	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
2330 }
2331 
2332 /**
2333  * ice_get_set_tx_topo - get or set tx topology
2334  * @hw: pointer to the HW struct
2335  * @buf: pointer to tx topology buffer
2336  * @buf_size: buffer size
2337  * @cd: pointer to command details structure or NULL
2338  * @flags: pointer to descriptor flags
2339  * @set: 0-get, 1-set topology
2340  *
2341  * The function will get or set tx topology
2342  */
2343 static enum ice_status
2344 ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
2345 		    struct ice_sq_cd *cd, u8 *flags, bool set)
2346 {
2347 	struct ice_aqc_get_set_tx_topo *cmd;
2348 	struct ice_aq_desc desc;
2349 	enum ice_status status;
2350 
2351 	cmd = &desc.params.get_set_tx_topo;
2352 	if (set) {
2353 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
2354 		cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
2355 		/* requested to update a new topology, not a default topolgy */
2356 		if (buf)
2357 			cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
2358 					  ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
2359 	} else {
2360 		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
2361 		cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
2362 	}
2363 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2364 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2365 	if (status)
2366 		return status;
2367 	/* read the return flag values (first byte) for get operation */
2368 	if (!set && flags)
2369 		*flags = desc.params.get_set_tx_topo.set_flags;
2370 
2371 	return ICE_SUCCESS;
2372 }
2373 
2374 /**
2375  * ice_cfg_tx_topo - Initialize new tx topology if available
2376  * @hw: pointer to the HW struct
2377  * @buf: pointer to Tx topology buffer
2378  * @len: buffer size
2379  *
2380  * The function will apply the new Tx topology from the package buffer
2381  * if available.
2382  */
2383 enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
2384 {
2385 	u8 *current_topo, *new_topo = NULL;
2386 	struct ice_run_time_cfg_seg *seg;
2387 	struct ice_buf_hdr *section;
2388 	struct ice_pkg_hdr *pkg_hdr;
2389 	enum ice_ddp_state state;
2390 	u16 i, size = 0, offset;
2391 	enum ice_status status;
2392 	u32 reg = 0;
2393 	u8 flags;
2394 
2395 	if (!buf || !len)
2396 		return ICE_ERR_PARAM;
2397 
2398 	/* Does FW support new Tx topology mode ? */
2399 	if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
2400 		ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
2401 		return ICE_ERR_NOT_SUPPORTED;
2402 	}
2403 
2404 	current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2405 	if (!current_topo)
2406 		return ICE_ERR_NO_MEMORY;
2407 
2408 	/* get the current Tx topology */
2409 	status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
2410 				     &flags, false);
2411 	ice_free(hw, current_topo);
2412 
2413 	if (status) {
2414 		ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
2415 		return status;
2416 	}
2417 
2418 	/* Is default topology already applied ? */
2419 	if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2420 	    hw->num_tx_sched_layers == 9) {
2421 		ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n");
2422 		/* Already default topology is loaded */
2423 		return ICE_ERR_ALREADY_EXISTS;
2424 	}
2425 
2426 	/* Is new topology already applied ? */
2427 	if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2428 	    hw->num_tx_sched_layers == 5) {
2429 		ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n");
2430 		/* Already new topology is loaded */
2431 		return ICE_ERR_ALREADY_EXISTS;
2432 	}
2433 
2434 	/* Is set topology issued already ? */
2435 	if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
2436 		ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n");
2437 		/* add a small delay before exiting */
2438 		for (i = 0; i < 20; i++)
2439 			ice_msec_delay(100, true);
2440 		return ICE_ERR_ALREADY_EXISTS;
2441 	}
2442 
2443 	/* Change the topology from new to default (5 to 9) */
2444 	if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
2445 	    hw->num_tx_sched_layers == 5) {
2446 		ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
2447 		goto update_topo;
2448 	}
2449 
2450 	pkg_hdr = (struct ice_pkg_hdr *)buf;
2451 	state = ice_verify_pkg(pkg_hdr, len);
2452 	if (state) {
2453 		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
2454 			  state);
2455 		return ICE_ERR_CFG;
2456 	}
2457 
2458 	/* find run time configuration segment */
2459 	seg = (struct ice_run_time_cfg_seg *)
2460 		ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
2461 	if (!seg) {
2462 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
2463 		return ICE_ERR_CFG;
2464 	}
2465 
2466 	if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
2467 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
2468 			  seg->buf_table.buf_count);
2469 		return ICE_ERR_CFG;
2470 	}
2471 
2472 	section = ice_pkg_val_buf(seg->buf_table.buf_array);
2473 
2474 	if (!section || LE32_TO_CPU(section->section_entry[0].type) !=
2475 		ICE_SID_TX_5_LAYER_TOPO) {
2476 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
2477 		return ICE_ERR_CFG;
2478 	}
2479 
2480 	size = LE16_TO_CPU(section->section_entry[0].size);
2481 	offset = LE16_TO_CPU(section->section_entry[0].offset);
2482 	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
2483 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
2484 		return ICE_ERR_CFG;
2485 	}
2486 
2487 	/* make sure the section fits in the buffer */
2488 	if (offset + size > ICE_PKG_BUF_SIZE) {
2489 		ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
2490 		return ICE_ERR_CFG;
2491 	}
2492 
2493 	/* Get the new topology buffer */
2494 	new_topo = ((u8 *)section) + offset;
2495 
2496 update_topo:
2497 	/* acquire global lock to make sure that set topology issued
2498 	 * by one PF
2499 	 */
2500 	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
2501 				 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
2502 	if (status) {
2503 		ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
2504 		return status;
2505 	}
2506 
2507 	/* check reset was triggered already or not */
2508 	reg = rd32(hw, GLGEN_RSTAT);
2509 	if (reg & GLGEN_RSTAT_DEVSTATE_M) {
2510 		/* Reset is in progress, re-init the hw again */
2511 		ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n");
2512 		ice_check_reset(hw);
2513 		return ICE_SUCCESS;
2514 	}
2515 
2516 	/* set new topology */
2517 	status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
2518 	if (status) {
2519 		ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n");
2520 		return status;
2521 	}
2522 
2523 	/* new topology is updated, delay 1 second before issuing the CORRER */
2524 	for (i = 0; i < 10; i++)
2525 		ice_msec_delay(100, true);
2526 	ice_reset(hw, ICE_RESET_CORER);
2527 	/* CORER will clear the global lock, so no explicit call
2528 	 * required for release
2529 	 */
2530 	return ICE_SUCCESS;
2531 }
2532