1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "i40e_prototype.h"
36
37 /**
38 * i40e_init_nvm_ops - Initialize NVM function pointers
39 * @hw: pointer to the HW structure
40 *
41 * Setup the function pointers and the NVM info structure. Should be called
42 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
43 * Please notice that the NVM term is used here (& in all methods covered
44 * in this file) as an equivalent of the FLASH part mapped into the SR.
45 * We are accessing FLASH always through the Shadow RAM.
46 **/
i40e_init_nvm(struct i40e_hw * hw)47 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
48 {
49 struct i40e_nvm_info *nvm = &hw->nvm;
50 enum i40e_status_code ret_code = I40E_SUCCESS;
51 u32 fla, gens;
52 u8 sr_size;
53
54 DEBUGFUNC("i40e_init_nvm");
55
56 /* The SR size is stored regardless of the nvm programming mode
57 * as the blank mode may be used in the factory line.
58 */
59 gens = rd32(hw, I40E_GLNVM_GENS);
60 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
61 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
62 /* Switching to words (sr_size contains power of 2KB) */
63 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
64
65 /* Check if we are in the normal or blank NVM programming mode */
66 fla = rd32(hw, I40E_GLNVM_FLA);
67 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
68 /* Max NVM timeout */
69 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
70 nvm->blank_nvm_mode = FALSE;
71 } else { /* Blank programming mode */
72 nvm->blank_nvm_mode = TRUE;
73 ret_code = I40E_ERR_NVM_BLANK_MODE;
74 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
75 }
76
77 return ret_code;
78 }
79
80 /**
81 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
82 * @hw: pointer to the HW structure
83 * @access: NVM access type (read or write)
84 *
85 * This function will request NVM ownership for reading
86 * via the proper Admin Command.
87 **/
i40e_acquire_nvm(struct i40e_hw * hw,enum i40e_aq_resource_access_type access)88 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
89 enum i40e_aq_resource_access_type access)
90 {
91 enum i40e_status_code ret_code = I40E_SUCCESS;
92 u64 gtime, timeout;
93 u64 time_left = 0;
94
95 DEBUGFUNC("i40e_acquire_nvm");
96
97 if (hw->nvm.blank_nvm_mode)
98 goto i40e_i40e_acquire_nvm_exit;
99
100 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
101 0, &time_left, NULL);
102 /* Reading the Global Device Timer */
103 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
104
105 /* Store the timeout */
106 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
107
108 if (ret_code)
109 i40e_debug(hw, I40E_DEBUG_NVM,
110 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
111 access, time_left, ret_code, hw->aq.asq_last_status);
112
113 if (ret_code && time_left) {
114 /* Poll until the current NVM owner timeouts */
115 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
116 while ((gtime < timeout) && time_left) {
117 i40e_msec_delay(10);
118 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
119 ret_code = i40e_aq_request_resource(hw,
120 I40E_NVM_RESOURCE_ID,
121 access, 0, &time_left,
122 NULL);
123 if (ret_code == I40E_SUCCESS) {
124 hw->nvm.hw_semaphore_timeout =
125 I40E_MS_TO_GTIME(time_left) + gtime;
126 break;
127 }
128 }
129 if (ret_code != I40E_SUCCESS) {
130 hw->nvm.hw_semaphore_timeout = 0;
131 i40e_debug(hw, I40E_DEBUG_NVM,
132 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
133 time_left, ret_code, hw->aq.asq_last_status);
134 }
135 }
136
137 i40e_i40e_acquire_nvm_exit:
138 return ret_code;
139 }
140
141 /**
142 * i40e_release_nvm - Generic request for releasing the NVM ownership
143 * @hw: pointer to the HW structure
144 *
145 * This function will release NVM resource via the proper Admin Command.
146 **/
i40e_release_nvm(struct i40e_hw * hw)147 void i40e_release_nvm(struct i40e_hw *hw)
148 {
149 enum i40e_status_code ret_code = I40E_SUCCESS;
150 u32 total_delay = 0;
151
152 DEBUGFUNC("i40e_release_nvm");
153
154 if (hw->nvm.blank_nvm_mode)
155 return;
156
157 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
158
159 /* there are some rare cases when trying to release the resource
160 * results in an admin Q timeout, so handle them correctly
161 */
162 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
163 (total_delay < hw->aq.asq_cmd_timeout)) {
164 i40e_msec_delay(1);
165 ret_code = i40e_aq_release_resource(hw,
166 I40E_NVM_RESOURCE_ID, 0, NULL);
167 total_delay++;
168 }
169 }
170
171 /**
172 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
173 * @hw: pointer to the HW structure
174 *
175 * Polls the SRCTL Shadow RAM register done bit.
176 **/
i40e_poll_sr_srctl_done_bit(struct i40e_hw * hw)177 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
178 {
179 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
180 u32 srctl, wait_cnt;
181
182 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
183
184 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
185 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
186 srctl = rd32(hw, I40E_GLNVM_SRCTL);
187 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
188 ret_code = I40E_SUCCESS;
189 break;
190 }
191 i40e_usec_delay(5);
192 }
193 if (ret_code == I40E_ERR_TIMEOUT)
194 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
195 return ret_code;
196 }
197
198 /**
199 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
200 * @hw: pointer to the HW structure
201 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
202 * @data: word read from the Shadow RAM
203 *
204 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
205 **/
i40e_read_nvm_word_srctl(struct i40e_hw * hw,u16 offset,u16 * data)206 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
207 u16 *data)
208 {
209 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
210 u32 sr_reg;
211
212 DEBUGFUNC("i40e_read_nvm_word_srctl");
213
214 if (offset >= hw->nvm.sr_size) {
215 i40e_debug(hw, I40E_DEBUG_NVM,
216 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
217 offset, hw->nvm.sr_size);
218 ret_code = I40E_ERR_PARAM;
219 goto read_nvm_exit;
220 }
221
222 /* Poll the done bit first */
223 ret_code = i40e_poll_sr_srctl_done_bit(hw);
224 if (ret_code == I40E_SUCCESS) {
225 /* Write the address and start reading */
226 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
227 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
228 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
229
230 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
231 ret_code = i40e_poll_sr_srctl_done_bit(hw);
232 if (ret_code == I40E_SUCCESS) {
233 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
234 *data = (u16)((sr_reg &
235 I40E_GLNVM_SRDATA_RDDATA_MASK)
236 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
237 }
238 }
239 if (ret_code != I40E_SUCCESS)
240 i40e_debug(hw, I40E_DEBUG_NVM,
241 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
242 offset);
243
244 read_nvm_exit:
245 return ret_code;
246 }
247
248 /**
249 * i40e_read_nvm_aq - Read Shadow RAM.
250 * @hw: pointer to the HW structure.
251 * @module_pointer: module pointer location in words from the NVM beginning
252 * @offset: offset in words from module start
253 * @words: number of words to write
254 * @data: buffer with words to write to the Shadow RAM
255 * @last_command: tells the AdminQ that this is the last command
256 *
257 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
258 **/
i40e_read_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)259 static enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
260 u8 module_pointer, u32 offset,
261 u16 words, void *data,
262 bool last_command)
263 {
264 enum i40e_status_code ret_code = I40E_ERR_NVM;
265 struct i40e_asq_cmd_details cmd_details;
266
267 DEBUGFUNC("i40e_read_nvm_aq");
268
269 memset(&cmd_details, 0, sizeof(cmd_details));
270 cmd_details.wb_desc = &hw->nvm_wb_desc;
271
272 /* Here we are checking the SR limit only for the flat memory model.
273 * We cannot do it for the module-based model, as we did not acquire
274 * the NVM resource yet (we cannot get the module pointer value).
275 * Firmware will check the module-based model.
276 */
277 if ((offset + words) > hw->nvm.sr_size)
278 i40e_debug(hw, I40E_DEBUG_NVM,
279 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
280 (offset + words), hw->nvm.sr_size);
281 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
282 /* We can write only up to 4KB (one sector), in one AQ write */
283 i40e_debug(hw, I40E_DEBUG_NVM,
284 "NVM write fail error: tried to write %d words, limit is %d.\n",
285 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
286 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
287 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
288 /* A single write cannot spread over two sectors */
289 i40e_debug(hw, I40E_DEBUG_NVM,
290 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
291 offset, words);
292 else
293 ret_code = i40e_aq_read_nvm(hw, module_pointer,
294 2 * offset, /*bytes*/
295 2 * words, /*bytes*/
296 data, last_command, &cmd_details);
297
298 return ret_code;
299 }
300
301 /**
302 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
303 * @hw: pointer to the HW structure
304 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
305 * @data: word read from the Shadow RAM
306 *
307 * Reads one 16 bit word from the Shadow RAM using the AdminQ
308 **/
i40e_read_nvm_word_aq(struct i40e_hw * hw,u16 offset,u16 * data)309 static enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
310 u16 *data)
311 {
312 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
313
314 DEBUGFUNC("i40e_read_nvm_word_aq");
315
316 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
317 *data = LE16_TO_CPU(*(__le16 *)data);
318
319 return ret_code;
320 }
321
322 /**
323 * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
324 * @hw: pointer to the HW structure
325 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
326 * @data: word read from the Shadow RAM
327 *
328 * Reads one 16 bit word from the Shadow RAM.
329 *
330 * Do not use this function except in cases where the nvm lock is already
331 * taken via i40e_acquire_nvm().
332 **/
__i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)333 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
334 u16 offset,
335 u16 *data)
336 {
337
338 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
339 return i40e_read_nvm_word_aq(hw, offset, data);
340
341 return i40e_read_nvm_word_srctl(hw, offset, data);
342 }
343
344 /**
345 * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
346 * @hw: pointer to the HW structure
347 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
348 * @data: word read from the Shadow RAM
349 *
350 * Reads one 16 bit word from the Shadow RAM.
351 **/
i40e_read_nvm_word(struct i40e_hw * hw,u16 offset,u16 * data)352 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
353 u16 *data)
354 {
355 enum i40e_status_code ret_code = I40E_SUCCESS;
356
357 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
358 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
359
360 if (ret_code)
361 return ret_code;
362 ret_code = __i40e_read_nvm_word(hw, offset, data);
363
364 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
365 i40e_release_nvm(hw);
366 return ret_code;
367 }
368
369 /**
370 * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
371 * @hw: Pointer to the HW structure
372 * @module_ptr: Pointer to module in words with respect to NVM beginning
373 * @module_offset: Offset in words from module start
374 * @data_offset: Offset in words from reading data area start
375 * @words_data_size: Words to read from NVM
376 * @data_ptr: Pointer to memory location where resulting buffer will be stored
377 **/
378 enum i40e_status_code
i40e_read_nvm_module_data(struct i40e_hw * hw,u8 module_ptr,u16 module_offset,u16 data_offset,u16 words_data_size,u16 * data_ptr)379 i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
380 u16 data_offset, u16 words_data_size, u16 *data_ptr)
381 {
382 enum i40e_status_code status;
383 u16 specific_ptr = 0;
384 u16 ptr_value = 0;
385 u16 offset = 0;
386
387 if (module_ptr != 0) {
388 status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
389 if (status != I40E_SUCCESS) {
390 i40e_debug(hw, I40E_DEBUG_ALL,
391 "Reading nvm word failed.Error code: %d.\n",
392 status);
393 return I40E_ERR_NVM;
394 }
395 }
396 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
397 #define I40E_NVM_INVALID_VAL 0xFFFF
398
399 /* Pointer not initialized */
400 if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
401 ptr_value == I40E_NVM_INVALID_VAL) {
402 i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
403 return I40E_ERR_BAD_PTR;
404 }
405
406 /* Check whether the module is in SR mapped area or outside */
407 if (ptr_value & I40E_PTR_TYPE) {
408 /* Pointer points outside of the Shared RAM mapped area */
409 i40e_debug(hw, I40E_DEBUG_ALL,
410 "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
411
412 return I40E_ERR_PARAM;
413 } else {
414 /* Read from the Shadow RAM */
415
416 status = i40e_read_nvm_word(hw, ptr_value + module_offset,
417 &specific_ptr);
418 if (status != I40E_SUCCESS) {
419 i40e_debug(hw, I40E_DEBUG_ALL,
420 "Reading nvm word failed.Error code: %d.\n",
421 status);
422 return I40E_ERR_NVM;
423 }
424
425 offset = ptr_value + module_offset + specific_ptr +
426 data_offset;
427
428 status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
429 data_ptr);
430 if (status != I40E_SUCCESS) {
431 i40e_debug(hw, I40E_DEBUG_ALL,
432 "Reading nvm buffer failed.Error code: %d.\n",
433 status);
434 }
435 }
436
437 return status;
438 }
439
440 /**
441 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
442 * @hw: pointer to the HW structure
443 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
444 * @words: (in) number of words to read; (out) number of words actually read
445 * @data: words read from the Shadow RAM
446 *
447 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
448 * method. The buffer read is preceded by the NVM ownership take
449 * and followed by the release.
450 **/
i40e_read_nvm_buffer_srctl(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)451 static enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
452 u16 *words, u16 *data)
453 {
454 enum i40e_status_code ret_code = I40E_SUCCESS;
455 u16 index, word;
456
457 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
458
459 /* Loop through the selected region */
460 for (word = 0; word < *words; word++) {
461 index = offset + word;
462 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
463 if (ret_code != I40E_SUCCESS)
464 break;
465 }
466
467 /* Update the number of words read from the Shadow RAM */
468 *words = word;
469
470 return ret_code;
471 }
472
473 /**
474 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
475 * @hw: pointer to the HW structure
476 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
477 * @words: (in) number of words to read; (out) number of words actually read
478 * @data: words read from the Shadow RAM
479 *
480 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
481 * method. The buffer read is preceded by the NVM ownership take
482 * and followed by the release.
483 **/
i40e_read_nvm_buffer_aq(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)484 static enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
485 u16 *words, u16 *data)
486 {
487 enum i40e_status_code ret_code;
488 u16 read_size = *words;
489 bool last_cmd = FALSE;
490 u16 words_read = 0;
491 u16 i = 0;
492
493 DEBUGFUNC("i40e_read_nvm_buffer_aq");
494
495 do {
496 /* Calculate number of bytes we should read in this step.
497 * FVL AQ do not allow to read more than one page at a time or
498 * to cross page boundaries.
499 */
500 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
501 read_size = min(*words,
502 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
503 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
504 else
505 read_size = min((*words - words_read),
506 I40E_SR_SECTOR_SIZE_IN_WORDS);
507
508 /* Check if this is last command, if so set proper flag */
509 if ((words_read + read_size) >= *words)
510 last_cmd = TRUE;
511
512 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
513 data + words_read, last_cmd);
514 if (ret_code != I40E_SUCCESS)
515 goto read_nvm_buffer_aq_exit;
516
517 /* Increment counter for words already read and move offset to
518 * new read location
519 */
520 words_read += read_size;
521 offset += read_size;
522 } while (words_read < *words);
523
524 for (i = 0; i < *words; i++)
525 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
526
527 read_nvm_buffer_aq_exit:
528 *words = words_read;
529 return ret_code;
530 }
531
532 /**
533 * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
534 * @hw: pointer to the HW structure
535 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
536 * @words: (in) number of words to read; (out) number of words actually read
537 * @data: words read from the Shadow RAM
538 *
539 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
540 * method.
541 **/
__i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)542 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
543 u16 offset,
544 u16 *words, u16 *data)
545 {
546 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
547 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
548
549 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
550 }
551
552 /**
553 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
554 * @hw: pointer to the HW structure
555 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
556 * @words: (in) number of words to read; (out) number of words actually read
557 * @data: words read from the Shadow RAM
558 *
559 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
560 * method. The buffer read is preceded by the NVM ownership take
561 * and followed by the release.
562 **/
i40e_read_nvm_buffer(struct i40e_hw * hw,u16 offset,u16 * words,u16 * data)563 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
564 u16 *words, u16 *data)
565 {
566 enum i40e_status_code ret_code = I40E_SUCCESS;
567
568 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
569 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
570 if (!ret_code) {
571 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
572 data);
573 i40e_release_nvm(hw);
574 }
575 } else {
576 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
577 }
578
579 return ret_code;
580 }
581
582 /**
583 * i40e_write_nvm_aq - Writes Shadow RAM.
584 * @hw: pointer to the HW structure.
585 * @module_pointer: module pointer location in words from the NVM beginning
586 * @offset: offset in words from module start
587 * @words: number of words to write
588 * @data: buffer with words to write to the Shadow RAM
589 * @last_command: tells the AdminQ that this is the last command
590 *
591 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
592 **/
i40e_write_nvm_aq(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data,bool last_command)593 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
594 u32 offset, u16 words, void *data,
595 bool last_command)
596 {
597 enum i40e_status_code ret_code = I40E_ERR_NVM;
598 struct i40e_asq_cmd_details cmd_details;
599
600 DEBUGFUNC("i40e_write_nvm_aq");
601
602 memset(&cmd_details, 0, sizeof(cmd_details));
603 cmd_details.wb_desc = &hw->nvm_wb_desc;
604
605 /* Here we are checking the SR limit only for the flat memory model.
606 * We cannot do it for the module-based model, as we did not acquire
607 * the NVM resource yet (we cannot get the module pointer value).
608 * Firmware will check the module-based model.
609 */
610 if ((offset + words) > hw->nvm.sr_size)
611 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
612 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
613 /* We can write only up to 4KB (one sector), in one AQ write */
614 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
615 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
616 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
617 /* A single write cannot spread over two sectors */
618 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
619 else
620 ret_code = i40e_aq_update_nvm(hw, module_pointer,
621 2 * offset, /*bytes*/
622 2 * words, /*bytes*/
623 data, last_command, 0,
624 &cmd_details);
625
626 return ret_code;
627 }
628
629 /**
630 * __i40e_write_nvm_word - Writes Shadow RAM word
631 * @hw: pointer to the HW structure
632 * @offset: offset of the Shadow RAM word to write
633 * @data: word to write to the Shadow RAM
634 *
635 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
636 * NVM ownership have to be acquired and released (on ARQ completion event
637 * reception) by caller. To commit SR to NVM update checksum function
638 * should be called.
639 **/
__i40e_write_nvm_word(struct i40e_hw * hw,u32 offset,void * data)640 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
641 void *data)
642 {
643 DEBUGFUNC("i40e_write_nvm_word");
644
645 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
646
647 /* Value 0x00 below means that we treat SR as a flat mem */
648 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
649 }
650
651 /**
652 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
653 * @hw: pointer to the HW structure
654 * @module_pointer: module pointer location in words from the NVM beginning
655 * @offset: offset of the Shadow RAM buffer to write
656 * @words: number of words to write
657 * @data: words to write to the Shadow RAM
658 *
659 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
660 * NVM ownership must be acquired before calling this function and released
661 * on ARQ completion event reception by caller. To commit SR to NVM update
662 * checksum function should be called.
663 **/
__i40e_write_nvm_buffer(struct i40e_hw * hw,u8 module_pointer,u32 offset,u16 words,void * data)664 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
665 u8 module_pointer, u32 offset,
666 u16 words, void *data)
667 {
668 __le16 *le_word_ptr = (__le16 *)data;
669 u16 *word_ptr = (u16 *)data;
670 u32 i = 0;
671
672 DEBUGFUNC("i40e_write_nvm_buffer");
673
674 for (i = 0; i < words; i++)
675 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
676
677 /* Here we will only write one buffer as the size of the modules
678 * mirrored in the Shadow RAM is always less than 4K.
679 */
680 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
681 data, FALSE);
682 }
683
684 /**
685 * i40e_calc_nvm_checksum - Calculates and returns the checksum
686 * @hw: pointer to hardware structure
687 * @checksum: pointer to the checksum
688 *
689 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
690 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
691 * is customer specific and unknown. Therefore, this function skips all maximum
692 * possible size of VPD (1kB).
693 **/
i40e_calc_nvm_checksum(struct i40e_hw * hw,u16 * checksum)694 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
695 {
696 enum i40e_status_code ret_code = I40E_SUCCESS;
697 struct i40e_virt_mem vmem;
698 u16 pcie_alt_module = 0;
699 u16 checksum_local = 0;
700 u16 vpd_module = 0;
701 u16 *data;
702 u16 i = 0;
703
704 DEBUGFUNC("i40e_calc_nvm_checksum");
705
706 ret_code = i40e_allocate_virt_mem(hw, &vmem,
707 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
708 if (ret_code)
709 goto i40e_calc_nvm_checksum_exit;
710 data = (u16 *)vmem.va;
711
712 /* read pointer to VPD area */
713 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
714 if (ret_code != I40E_SUCCESS) {
715 ret_code = I40E_ERR_NVM_CHECKSUM;
716 goto i40e_calc_nvm_checksum_exit;
717 }
718
719 /* read pointer to PCIe Alt Auto-load module */
720 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
721 &pcie_alt_module);
722 if (ret_code != I40E_SUCCESS) {
723 ret_code = I40E_ERR_NVM_CHECKSUM;
724 goto i40e_calc_nvm_checksum_exit;
725 }
726
727 /* Calculate SW checksum that covers the whole 64kB shadow RAM
728 * except the VPD and PCIe ALT Auto-load modules
729 */
730 for (i = 0; i < hw->nvm.sr_size; i++) {
731 /* Read SR page */
732 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
733 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
734
735 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
736 if (ret_code != I40E_SUCCESS) {
737 ret_code = I40E_ERR_NVM_CHECKSUM;
738 goto i40e_calc_nvm_checksum_exit;
739 }
740 }
741
742 /* Skip Checksum word */
743 if (i == I40E_SR_SW_CHECKSUM_WORD)
744 continue;
745 /* Skip VPD module (convert byte size to word count) */
746 if ((i >= (u32)vpd_module) &&
747 (i < ((u32)vpd_module +
748 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
749 continue;
750 }
751 /* Skip PCIe ALT module (convert byte size to word count) */
752 if ((i >= (u32)pcie_alt_module) &&
753 (i < ((u32)pcie_alt_module +
754 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
755 continue;
756 }
757
758 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
759 }
760
761 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
762
763 i40e_calc_nvm_checksum_exit:
764 i40e_free_virt_mem(hw, &vmem);
765 return ret_code;
766 }
767
768 /**
769 * i40e_update_nvm_checksum - Updates the NVM checksum
770 * @hw: pointer to hardware structure
771 *
772 * NVM ownership must be acquired before calling this function and released
773 * on ARQ completion event reception by caller.
774 * This function will commit SR to NVM.
775 **/
i40e_update_nvm_checksum(struct i40e_hw * hw)776 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
777 {
778 enum i40e_status_code ret_code = I40E_SUCCESS;
779 u16 checksum;
780 __le16 le_sum;
781
782 DEBUGFUNC("i40e_update_nvm_checksum");
783
784 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
785 le_sum = CPU_TO_LE16(checksum);
786 if (ret_code == I40E_SUCCESS)
787 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
788 1, &le_sum, TRUE);
789
790 return ret_code;
791 }
792
793 /**
794 * i40e_validate_nvm_checksum - Validate EEPROM checksum
795 * @hw: pointer to hardware structure
796 * @checksum: calculated checksum
797 *
798 * Performs checksum calculation and validates the NVM SW checksum. If the
799 * caller does not need checksum, the value can be NULL.
800 **/
i40e_validate_nvm_checksum(struct i40e_hw * hw,u16 * checksum)801 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
802 u16 *checksum)
803 {
804 enum i40e_status_code ret_code = I40E_SUCCESS;
805 u16 checksum_sr = 0;
806 u16 checksum_local = 0;
807
808 DEBUGFUNC("i40e_validate_nvm_checksum");
809
810 /* We must acquire the NVM lock in order to correctly synchronize the
811 * NVM accesses across multiple PFs. Without doing so it is possible
812 * for one of the PFs to read invalid data potentially indicating that
813 * the checksum is invalid.
814 */
815 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
816 if (ret_code)
817 return ret_code;
818 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
819 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
820 i40e_release_nvm(hw);
821 if (ret_code)
822 return ret_code;
823
824 /* Verify read checksum from EEPROM is the same as
825 * calculated checksum
826 */
827 if (checksum_local != checksum_sr)
828 ret_code = I40E_ERR_NVM_CHECKSUM;
829
830 /* If the user cares, return the calculated checksum */
831 if (checksum)
832 *checksum = checksum_local;
833
834 return ret_code;
835 }
836
837 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
838 struct i40e_nvm_access *cmd,
839 u8 *bytes, int *perrno);
840 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
841 struct i40e_nvm_access *cmd,
842 u8 *bytes, int *perrno);
843 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
844 struct i40e_nvm_access *cmd,
845 u8 *bytes, int *perrno);
846 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
847 struct i40e_nvm_access *cmd,
848 int *perrno);
849 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
850 struct i40e_nvm_access *cmd,
851 int *perrno);
852 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
853 struct i40e_nvm_access *cmd,
854 u8 *bytes, int *perrno);
855 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
856 struct i40e_nvm_access *cmd,
857 u8 *bytes, int *perrno);
858 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
859 struct i40e_nvm_access *cmd,
860 u8 *bytes, int *perrno);
861 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
862 struct i40e_nvm_access *cmd,
863 u8 *bytes, int *perrno);
864 static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
865 struct i40e_nvm_access *cmd,
866 u8 *bytes, int *perrno);
i40e_nvmupd_get_module(u32 val)867 static INLINE u8 i40e_nvmupd_get_module(u32 val)
868 {
869 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
870 }
i40e_nvmupd_get_transaction(u32 val)871 static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
872 {
873 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
874 }
875
i40e_nvmupd_get_preservation_flags(u32 val)876 static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
877 {
878 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
879 I40E_NVM_PRESERVATION_FLAGS_SHIFT);
880 }
881
882 static const char *i40e_nvm_update_state_str[] = {
883 "I40E_NVMUPD_INVALID",
884 "I40E_NVMUPD_READ_CON",
885 "I40E_NVMUPD_READ_SNT",
886 "I40E_NVMUPD_READ_LCB",
887 "I40E_NVMUPD_READ_SA",
888 "I40E_NVMUPD_WRITE_ERA",
889 "I40E_NVMUPD_WRITE_CON",
890 "I40E_NVMUPD_WRITE_SNT",
891 "I40E_NVMUPD_WRITE_LCB",
892 "I40E_NVMUPD_WRITE_SA",
893 "I40E_NVMUPD_CSUM_CON",
894 "I40E_NVMUPD_CSUM_SA",
895 "I40E_NVMUPD_CSUM_LCB",
896 "I40E_NVMUPD_STATUS",
897 "I40E_NVMUPD_EXEC_AQ",
898 "I40E_NVMUPD_GET_AQ_RESULT",
899 "I40E_NVMUPD_GET_AQ_EVENT",
900 "I40E_NVMUPD_GET_FEATURES",
901 };
902
903 /**
904 * i40e_nvmupd_command - Process an NVM update command
905 * @hw: pointer to hardware structure
906 * @cmd: pointer to nvm update command
907 * @bytes: pointer to the data buffer
908 * @perrno: pointer to return error code
909 *
910 * Dispatches command depending on what update state is current
911 **/
i40e_nvmupd_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)912 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
913 struct i40e_nvm_access *cmd,
914 u8 *bytes, int *perrno)
915 {
916 enum i40e_status_code status;
917 enum i40e_nvmupd_cmd upd_cmd;
918
919 DEBUGFUNC("i40e_nvmupd_command");
920
921 /* assume success */
922 *perrno = 0;
923
924 /* early check for status command and debug msgs */
925 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
926
927 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
928 i40e_nvm_update_state_str[upd_cmd],
929 hw->nvmupd_state,
930 hw->nvm_release_on_done, hw->nvm_wait_opcode,
931 cmd->command, cmd->config, cmd->offset, cmd->data_size);
932
933 if (upd_cmd == I40E_NVMUPD_INVALID) {
934 *perrno = -EFAULT;
935 i40e_debug(hw, I40E_DEBUG_NVM,
936 "i40e_nvmupd_validate_command returns %d errno %d\n",
937 upd_cmd, *perrno);
938 }
939
940 /* a status request returns immediately rather than
941 * going into the state machine
942 */
943 if (upd_cmd == I40E_NVMUPD_STATUS) {
944 if (!cmd->data_size) {
945 *perrno = -EFAULT;
946 return I40E_ERR_BUF_TOO_SHORT;
947 }
948
949 bytes[0] = hw->nvmupd_state;
950
951 if (cmd->data_size >= 4) {
952 bytes[1] = 0;
953 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
954 }
955
956 /* Clear error status on read */
957 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
958 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
959
960 return I40E_SUCCESS;
961 }
962
963 /*
964 * A supported features request returns immediately
965 * rather than going into state machine
966 */
967 if (upd_cmd == I40E_NVMUPD_FEATURES) {
968 if (cmd->data_size < hw->nvmupd_features.size) {
969 *perrno = -EFAULT;
970 return I40E_ERR_BUF_TOO_SHORT;
971 }
972
973 /*
974 * If buffer is bigger than i40e_nvmupd_features structure,
975 * make sure the trailing bytes are set to 0x0.
976 */
977 if (cmd->data_size > hw->nvmupd_features.size)
978 i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
979 cmd->data_size - hw->nvmupd_features.size,
980 I40E_NONDMA_MEM);
981
982 i40e_memcpy(bytes, &hw->nvmupd_features,
983 hw->nvmupd_features.size, I40E_NONDMA_MEM);
984
985 return I40E_SUCCESS;
986 }
987
988 /* Clear status even it is not read and log */
989 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
990 i40e_debug(hw, I40E_DEBUG_NVM,
991 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
992 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
993 }
994
995 /* Acquire lock to prevent race condition where adminq_task
996 * can execute after i40e_nvmupd_nvm_read/write but before state
997 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
998 *
999 * During NVMUpdate, it is observed that lock could be held for
1000 * ~5ms for most commands. However lock is held for ~60ms for
1001 * NVMUPD_CSUM_LCB command.
1002 */
1003 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1004 switch (hw->nvmupd_state) {
1005 case I40E_NVMUPD_STATE_INIT:
1006 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
1007 break;
1008
1009 case I40E_NVMUPD_STATE_READING:
1010 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
1011 break;
1012
1013 case I40E_NVMUPD_STATE_WRITING:
1014 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
1015 break;
1016
1017 case I40E_NVMUPD_STATE_INIT_WAIT:
1018 case I40E_NVMUPD_STATE_WRITE_WAIT:
1019 /* if we need to stop waiting for an event, clear
1020 * the wait info and return before doing anything else
1021 */
1022 if (cmd->offset == 0xffff) {
1023 i40e_nvmupd_clear_wait_state(hw);
1024 status = I40E_SUCCESS;
1025 break;
1026 }
1027
1028 status = I40E_ERR_NOT_READY;
1029 *perrno = -EBUSY;
1030 break;
1031
1032 default:
1033 /* invalid state, should never happen */
1034 i40e_debug(hw, I40E_DEBUG_NVM,
1035 "NVMUPD: no such state %d\n", hw->nvmupd_state);
1036 status = I40E_NOT_SUPPORTED;
1037 *perrno = -ESRCH;
1038 break;
1039 }
1040
1041 i40e_release_spinlock(&hw->aq.arq_spinlock);
1042 return status;
1043 }
1044
1045 /**
1046 * i40e_nvmupd_state_init - Handle NVM update state Init
1047 * @hw: pointer to hardware structure
1048 * @cmd: pointer to nvm update command buffer
1049 * @bytes: pointer to the data buffer
1050 * @perrno: pointer to return error code
1051 *
1052 * Process legitimate commands of the Init state and conditionally set next
1053 * state. Reject all other commands.
1054 **/
i40e_nvmupd_state_init(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1055 static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
1056 struct i40e_nvm_access *cmd,
1057 u8 *bytes, int *perrno)
1058 {
1059 enum i40e_status_code status = I40E_SUCCESS;
1060 enum i40e_nvmupd_cmd upd_cmd;
1061
1062 DEBUGFUNC("i40e_nvmupd_state_init");
1063
1064 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1065
1066 switch (upd_cmd) {
1067 case I40E_NVMUPD_READ_SA:
1068 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1069 if (status) {
1070 *perrno = i40e_aq_rc_to_posix(status,
1071 hw->aq.asq_last_status);
1072 } else {
1073 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1074 i40e_release_nvm(hw);
1075 }
1076 break;
1077
1078 case I40E_NVMUPD_READ_SNT:
1079 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1080 if (status) {
1081 *perrno = i40e_aq_rc_to_posix(status,
1082 hw->aq.asq_last_status);
1083 } else {
1084 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1085 if (status)
1086 i40e_release_nvm(hw);
1087 else
1088 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1089 }
1090 break;
1091
1092 case I40E_NVMUPD_WRITE_ERA:
1093 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1094 if (status) {
1095 *perrno = i40e_aq_rc_to_posix(status,
1096 hw->aq.asq_last_status);
1097 } else {
1098 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1099 if (status) {
1100 i40e_release_nvm(hw);
1101 } else {
1102 hw->nvm_release_on_done = TRUE;
1103 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1104 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1105 }
1106 }
1107 break;
1108
1109 case I40E_NVMUPD_WRITE_SA:
1110 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1111 if (status) {
1112 *perrno = i40e_aq_rc_to_posix(status,
1113 hw->aq.asq_last_status);
1114 } else {
1115 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1116 if (status) {
1117 i40e_release_nvm(hw);
1118 } else {
1119 hw->nvm_release_on_done = TRUE;
1120 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1121 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1122 }
1123 }
1124 break;
1125
1126 case I40E_NVMUPD_WRITE_SNT:
1127 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1128 if (status) {
1129 *perrno = i40e_aq_rc_to_posix(status,
1130 hw->aq.asq_last_status);
1131 } else {
1132 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1133 if (status) {
1134 i40e_release_nvm(hw);
1135 } else {
1136 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1137 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1138 }
1139 }
1140 break;
1141
1142 case I40E_NVMUPD_CSUM_SA:
1143 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1144 if (status) {
1145 *perrno = i40e_aq_rc_to_posix(status,
1146 hw->aq.asq_last_status);
1147 } else {
1148 status = i40e_update_nvm_checksum(hw);
1149 if (status) {
1150 *perrno = hw->aq.asq_last_status ?
1151 i40e_aq_rc_to_posix(status,
1152 hw->aq.asq_last_status) :
1153 -EIO;
1154 i40e_release_nvm(hw);
1155 } else {
1156 hw->nvm_release_on_done = TRUE;
1157 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1158 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1159 }
1160 }
1161 break;
1162
1163 case I40E_NVMUPD_EXEC_AQ:
1164 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1165 break;
1166
1167 case I40E_NVMUPD_GET_AQ_RESULT:
1168 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1169 break;
1170
1171 case I40E_NVMUPD_GET_AQ_EVENT:
1172 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1173 break;
1174
1175 default:
1176 i40e_debug(hw, I40E_DEBUG_NVM,
1177 "NVMUPD: bad cmd %s in init state\n",
1178 i40e_nvm_update_state_str[upd_cmd]);
1179 status = I40E_ERR_NVM;
1180 *perrno = -ESRCH;
1181 break;
1182 }
1183 return status;
1184 }
1185
1186 /**
1187 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1188 * @hw: pointer to hardware structure
1189 * @cmd: pointer to nvm update command buffer
1190 * @bytes: pointer to the data buffer
1191 * @perrno: pointer to return error code
1192 *
1193 * NVM ownership is already held. Process legitimate commands and set any
1194 * change in state; reject all other commands.
1195 **/
i40e_nvmupd_state_reading(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1196 static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1197 struct i40e_nvm_access *cmd,
1198 u8 *bytes, int *perrno)
1199 {
1200 enum i40e_status_code status = I40E_SUCCESS;
1201 enum i40e_nvmupd_cmd upd_cmd;
1202
1203 DEBUGFUNC("i40e_nvmupd_state_reading");
1204
1205 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1206
1207 switch (upd_cmd) {
1208 case I40E_NVMUPD_READ_SA:
1209 case I40E_NVMUPD_READ_CON:
1210 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1211 break;
1212
1213 case I40E_NVMUPD_READ_LCB:
1214 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1215 i40e_release_nvm(hw);
1216 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1217 break;
1218
1219 default:
1220 i40e_debug(hw, I40E_DEBUG_NVM,
1221 "NVMUPD: bad cmd %s in reading state.\n",
1222 i40e_nvm_update_state_str[upd_cmd]);
1223 status = I40E_NOT_SUPPORTED;
1224 *perrno = -ESRCH;
1225 break;
1226 }
1227 return status;
1228 }
1229
1230 /**
1231 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1232 * @hw: pointer to hardware structure
1233 * @cmd: pointer to nvm update command buffer
1234 * @bytes: pointer to the data buffer
1235 * @perrno: pointer to return error code
1236 *
1237 * NVM ownership is already held. Process legitimate commands and set any
1238 * change in state; reject all other commands
1239 **/
i40e_nvmupd_state_writing(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1240 static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1241 struct i40e_nvm_access *cmd,
1242 u8 *bytes, int *perrno)
1243 {
1244 enum i40e_status_code status = I40E_SUCCESS;
1245 enum i40e_nvmupd_cmd upd_cmd;
1246 bool retry_attempt = FALSE;
1247
1248 DEBUGFUNC("i40e_nvmupd_state_writing");
1249
1250 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1251
1252 retry:
1253 switch (upd_cmd) {
1254 case I40E_NVMUPD_WRITE_CON:
1255 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1256 if (!status) {
1257 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1258 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1259 }
1260 break;
1261
1262 case I40E_NVMUPD_WRITE_LCB:
1263 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1264 if (status) {
1265 *perrno = hw->aq.asq_last_status ?
1266 i40e_aq_rc_to_posix(status,
1267 hw->aq.asq_last_status) :
1268 -EIO;
1269 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1270 } else {
1271 hw->nvm_release_on_done = TRUE;
1272 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1273 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1274 }
1275 break;
1276
1277 case I40E_NVMUPD_CSUM_CON:
1278 /* Assumes the caller has acquired the nvm */
1279 status = i40e_update_nvm_checksum(hw);
1280 if (status) {
1281 *perrno = hw->aq.asq_last_status ?
1282 i40e_aq_rc_to_posix(status,
1283 hw->aq.asq_last_status) :
1284 -EIO;
1285 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1286 } else {
1287 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1288 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1289 }
1290 break;
1291
1292 case I40E_NVMUPD_CSUM_LCB:
1293 /* Assumes the caller has acquired the nvm */
1294 status = i40e_update_nvm_checksum(hw);
1295 if (status) {
1296 *perrno = hw->aq.asq_last_status ?
1297 i40e_aq_rc_to_posix(status,
1298 hw->aq.asq_last_status) :
1299 -EIO;
1300 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1301 } else {
1302 hw->nvm_release_on_done = TRUE;
1303 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1304 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1305 }
1306 break;
1307
1308 default:
1309 i40e_debug(hw, I40E_DEBUG_NVM,
1310 "NVMUPD: bad cmd %s in writing state.\n",
1311 i40e_nvm_update_state_str[upd_cmd]);
1312 status = I40E_NOT_SUPPORTED;
1313 *perrno = -ESRCH;
1314 break;
1315 }
1316
1317 /* In some circumstances, a multi-write transaction takes longer
1318 * than the default 3 minute timeout on the write semaphore. If
1319 * the write failed with an EBUSY status, this is likely the problem,
1320 * so here we try to reacquire the semaphore then retry the write.
1321 * We only do one retry, then give up.
1322 */
1323 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1324 !retry_attempt) {
1325 enum i40e_status_code old_status = status;
1326 u32 old_asq_status = hw->aq.asq_last_status;
1327 u32 gtime;
1328
1329 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1330 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1331 i40e_debug(hw, I40E_DEBUG_ALL,
1332 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1333 gtime, hw->nvm.hw_semaphore_timeout);
1334 i40e_release_nvm(hw);
1335 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1336 if (status) {
1337 i40e_debug(hw, I40E_DEBUG_ALL,
1338 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1339 hw->aq.asq_last_status);
1340 status = old_status;
1341 hw->aq.asq_last_status = old_asq_status;
1342 } else {
1343 retry_attempt = TRUE;
1344 goto retry;
1345 }
1346 }
1347 }
1348
1349 return status;
1350 }
1351
1352 /**
1353 * i40e_nvmupd_clear_wait_state - clear wait state on hw
1354 * @hw: pointer to the hardware structure
1355 **/
i40e_nvmupd_clear_wait_state(struct i40e_hw * hw)1356 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1357 {
1358 i40e_debug(hw, I40E_DEBUG_NVM,
1359 "NVMUPD: clearing wait on opcode 0x%04x\n",
1360 hw->nvm_wait_opcode);
1361
1362 if (hw->nvm_release_on_done) {
1363 i40e_release_nvm(hw);
1364 hw->nvm_release_on_done = FALSE;
1365 }
1366 hw->nvm_wait_opcode = 0;
1367
1368 if (hw->aq.arq_last_status) {
1369 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1370 return;
1371 }
1372
1373 switch (hw->nvmupd_state) {
1374 case I40E_NVMUPD_STATE_INIT_WAIT:
1375 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1376 break;
1377
1378 case I40E_NVMUPD_STATE_WRITE_WAIT:
1379 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1380 break;
1381
1382 default:
1383 break;
1384 }
1385 }
1386
1387 /**
1388 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1389 * @hw: pointer to the hardware structure
1390 * @opcode: the event that just happened
1391 * @desc: AdminQ descriptor
1392 **/
i40e_nvmupd_check_wait_event(struct i40e_hw * hw,u16 opcode,struct i40e_aq_desc * desc)1393 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1394 struct i40e_aq_desc *desc)
1395 {
1396 u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1397
1398 if (opcode == hw->nvm_wait_opcode) {
1399 i40e_memcpy(&hw->nvm_aq_event_desc, desc,
1400 aq_desc_len, I40E_NONDMA_TO_NONDMA);
1401 i40e_nvmupd_clear_wait_state(hw);
1402 }
1403 }
1404
1405 /**
1406 * i40e_nvmupd_validate_command - Validate given command
1407 * @hw: pointer to hardware structure
1408 * @cmd: pointer to nvm update command buffer
1409 * @perrno: pointer to return error code
1410 *
1411 * Return one of the valid command types or I40E_NVMUPD_INVALID
1412 **/
i40e_nvmupd_validate_command(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1413 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1414 struct i40e_nvm_access *cmd,
1415 int *perrno)
1416 {
1417 enum i40e_nvmupd_cmd upd_cmd;
1418 u8 module, transaction;
1419
1420 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1421
1422 /* anything that doesn't match a recognized case is an error */
1423 upd_cmd = I40E_NVMUPD_INVALID;
1424
1425 transaction = i40e_nvmupd_get_transaction(cmd->config);
1426 module = i40e_nvmupd_get_module(cmd->config);
1427
1428 /* limits on data size */
1429 if ((cmd->data_size < 1) ||
1430 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1431 i40e_debug(hw, I40E_DEBUG_NVM,
1432 "i40e_nvmupd_validate_command data_size %d\n",
1433 cmd->data_size);
1434 *perrno = -EFAULT;
1435 return I40E_NVMUPD_INVALID;
1436 }
1437
1438 switch (cmd->command) {
1439 case I40E_NVM_READ:
1440 switch (transaction) {
1441 case I40E_NVM_CON:
1442 upd_cmd = I40E_NVMUPD_READ_CON;
1443 break;
1444 case I40E_NVM_SNT:
1445 upd_cmd = I40E_NVMUPD_READ_SNT;
1446 break;
1447 case I40E_NVM_LCB:
1448 upd_cmd = I40E_NVMUPD_READ_LCB;
1449 break;
1450 case I40E_NVM_SA:
1451 upd_cmd = I40E_NVMUPD_READ_SA;
1452 break;
1453 case I40E_NVM_EXEC:
1454 switch (module) {
1455 case I40E_NVM_EXEC_GET_AQ_RESULT:
1456 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1457 break;
1458 case I40E_NVM_EXEC_FEATURES:
1459 upd_cmd = I40E_NVMUPD_FEATURES;
1460 break;
1461 case I40E_NVM_EXEC_STATUS:
1462 upd_cmd = I40E_NVMUPD_STATUS;
1463 break;
1464 default:
1465 *perrno = -EFAULT;
1466 return I40E_NVMUPD_INVALID;
1467 }
1468 break;
1469 case I40E_NVM_AQE:
1470 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1471 break;
1472 }
1473 break;
1474
1475 case I40E_NVM_WRITE:
1476 switch (transaction) {
1477 case I40E_NVM_CON:
1478 upd_cmd = I40E_NVMUPD_WRITE_CON;
1479 break;
1480 case I40E_NVM_SNT:
1481 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1482 break;
1483 case I40E_NVM_LCB:
1484 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1485 break;
1486 case I40E_NVM_SA:
1487 upd_cmd = I40E_NVMUPD_WRITE_SA;
1488 break;
1489 case I40E_NVM_ERA:
1490 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1491 break;
1492 case I40E_NVM_CSUM:
1493 upd_cmd = I40E_NVMUPD_CSUM_CON;
1494 break;
1495 case (I40E_NVM_CSUM|I40E_NVM_SA):
1496 upd_cmd = I40E_NVMUPD_CSUM_SA;
1497 break;
1498 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1499 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1500 break;
1501 case I40E_NVM_EXEC:
1502 if (module == 0)
1503 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1504 break;
1505 }
1506 break;
1507 }
1508
1509 return upd_cmd;
1510 }
1511
1512 /**
1513 * i40e_nvmupd_exec_aq - Run an AQ command
1514 * @hw: pointer to hardware structure
1515 * @cmd: pointer to nvm update command buffer
1516 * @bytes: pointer to the data buffer
1517 * @perrno: pointer to return error code
1518 *
1519 * cmd structure contains identifiers and data buffer
1520 **/
i40e_nvmupd_exec_aq(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1521 static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1522 struct i40e_nvm_access *cmd,
1523 u8 *bytes, int *perrno)
1524 {
1525 struct i40e_asq_cmd_details cmd_details;
1526 enum i40e_status_code status;
1527 struct i40e_aq_desc *aq_desc;
1528 u32 buff_size = 0;
1529 u8 *buff = NULL;
1530 u32 aq_desc_len;
1531 u32 aq_data_len;
1532
1533 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1534 if (cmd->offset == 0xffff)
1535 return I40E_SUCCESS;
1536
1537 memset(&cmd_details, 0, sizeof(cmd_details));
1538 cmd_details.wb_desc = &hw->nvm_wb_desc;
1539
1540 aq_desc_len = sizeof(struct i40e_aq_desc);
1541 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1542
1543 /* get the aq descriptor */
1544 if (cmd->data_size < aq_desc_len) {
1545 i40e_debug(hw, I40E_DEBUG_NVM,
1546 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1547 cmd->data_size, aq_desc_len);
1548 *perrno = -EINVAL;
1549 return I40E_ERR_PARAM;
1550 }
1551 aq_desc = (struct i40e_aq_desc *)bytes;
1552
1553 /* if data buffer needed, make sure it's ready */
1554 aq_data_len = cmd->data_size - aq_desc_len;
1555 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1556 if (buff_size) {
1557 if (!hw->nvm_buff.va) {
1558 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1559 hw->aq.asq_buf_size);
1560 if (status)
1561 i40e_debug(hw, I40E_DEBUG_NVM,
1562 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1563 status);
1564 }
1565
1566 if (hw->nvm_buff.va) {
1567 buff = hw->nvm_buff.va;
1568 i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1569 I40E_NONDMA_TO_NONDMA);
1570 }
1571 }
1572
1573 if (cmd->offset)
1574 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1575
1576 /* and away we go! */
1577 status = i40e_asq_send_command(hw, aq_desc, buff,
1578 buff_size, &cmd_details);
1579 if (status) {
1580 i40e_debug(hw, I40E_DEBUG_NVM,
1581 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1582 i40e_stat_str(hw, status),
1583 i40e_aq_str(hw, hw->aq.asq_last_status));
1584 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1585 return status;
1586 }
1587
1588 /* should we wait for a followup event? */
1589 if (cmd->offset) {
1590 hw->nvm_wait_opcode = cmd->offset;
1591 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1592 }
1593
1594 return status;
1595 }
1596
1597 /**
1598 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1599 * @hw: pointer to hardware structure
1600 * @cmd: pointer to nvm update command buffer
1601 * @bytes: pointer to the data buffer
1602 * @perrno: pointer to return error code
1603 *
1604 * cmd structure contains identifiers and data buffer
1605 **/
i40e_nvmupd_get_aq_result(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1606 static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1607 struct i40e_nvm_access *cmd,
1608 u8 *bytes, int *perrno)
1609 {
1610 u32 aq_total_len;
1611 u32 aq_desc_len;
1612 int remainder;
1613 u8 *buff;
1614
1615 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1616
1617 aq_desc_len = sizeof(struct i40e_aq_desc);
1618 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1619
1620 /* check offset range */
1621 if (cmd->offset > aq_total_len) {
1622 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1623 __func__, cmd->offset, aq_total_len);
1624 *perrno = -EINVAL;
1625 return I40E_ERR_PARAM;
1626 }
1627
1628 /* check copylength range */
1629 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1630 int new_len = aq_total_len - cmd->offset;
1631
1632 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1633 __func__, cmd->data_size, new_len);
1634 cmd->data_size = new_len;
1635 }
1636
1637 remainder = cmd->data_size;
1638 if (cmd->offset < aq_desc_len) {
1639 u32 len = aq_desc_len - cmd->offset;
1640
1641 len = min(len, cmd->data_size);
1642 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1643 __func__, cmd->offset, cmd->offset + len);
1644
1645 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1646 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1647
1648 bytes += len;
1649 remainder -= len;
1650 buff = hw->nvm_buff.va;
1651 } else {
1652 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1653 }
1654
1655 if (remainder > 0) {
1656 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1657
1658 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1659 __func__, start_byte, start_byte + remainder);
1660 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1661 }
1662
1663 return I40E_SUCCESS;
1664 }
1665
1666 /**
1667 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1668 * @hw: pointer to hardware structure
1669 * @cmd: pointer to nvm update command buffer
1670 * @bytes: pointer to the data buffer
1671 * @perrno: pointer to return error code
1672 *
1673 * cmd structure contains identifiers and data buffer
1674 **/
i40e_nvmupd_get_aq_event(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1675 static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1676 struct i40e_nvm_access *cmd,
1677 u8 *bytes, int *perrno)
1678 {
1679 u32 aq_total_len;
1680 u32 aq_desc_len;
1681
1682 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1683
1684 aq_desc_len = sizeof(struct i40e_aq_desc);
1685 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
1686
1687 /* check copylength range */
1688 if (cmd->data_size > aq_total_len) {
1689 i40e_debug(hw, I40E_DEBUG_NVM,
1690 "%s: copy length %d too big, trimming to %d\n",
1691 __func__, cmd->data_size, aq_total_len);
1692 cmd->data_size = aq_total_len;
1693 }
1694
1695 i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
1696 I40E_NONDMA_TO_NONDMA);
1697
1698 return I40E_SUCCESS;
1699 }
1700
1701 /**
1702 * i40e_nvmupd_nvm_read - Read NVM
1703 * @hw: pointer to hardware structure
1704 * @cmd: pointer to nvm update command buffer
1705 * @bytes: pointer to the data buffer
1706 * @perrno: pointer to return error code
1707 *
1708 * cmd structure contains identifiers and data buffer
1709 **/
i40e_nvmupd_nvm_read(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1710 static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1711 struct i40e_nvm_access *cmd,
1712 u8 *bytes, int *perrno)
1713 {
1714 struct i40e_asq_cmd_details cmd_details;
1715 enum i40e_status_code status;
1716 u8 module, transaction;
1717 bool last;
1718
1719 transaction = i40e_nvmupd_get_transaction(cmd->config);
1720 module = i40e_nvmupd_get_module(cmd->config);
1721 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1722
1723 memset(&cmd_details, 0, sizeof(cmd_details));
1724 cmd_details.wb_desc = &hw->nvm_wb_desc;
1725
1726 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1727 bytes, last, &cmd_details);
1728 if (status) {
1729 i40e_debug(hw, I40E_DEBUG_NVM,
1730 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1731 module, cmd->offset, cmd->data_size);
1732 i40e_debug(hw, I40E_DEBUG_NVM,
1733 "i40e_nvmupd_nvm_read status %d aq %d\n",
1734 status, hw->aq.asq_last_status);
1735 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1736 }
1737
1738 return status;
1739 }
1740
1741 /**
1742 * i40e_nvmupd_nvm_erase - Erase an NVM module
1743 * @hw: pointer to hardware structure
1744 * @cmd: pointer to nvm update command buffer
1745 * @perrno: pointer to return error code
1746 *
1747 * module, offset, data_size and data are in cmd structure
1748 **/
i40e_nvmupd_nvm_erase(struct i40e_hw * hw,struct i40e_nvm_access * cmd,int * perrno)1749 static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1750 struct i40e_nvm_access *cmd,
1751 int *perrno)
1752 {
1753 enum i40e_status_code status = I40E_SUCCESS;
1754 struct i40e_asq_cmd_details cmd_details;
1755 u8 module, transaction;
1756 bool last;
1757
1758 transaction = i40e_nvmupd_get_transaction(cmd->config);
1759 module = i40e_nvmupd_get_module(cmd->config);
1760 last = (transaction & I40E_NVM_LCB);
1761
1762 memset(&cmd_details, 0, sizeof(cmd_details));
1763 cmd_details.wb_desc = &hw->nvm_wb_desc;
1764
1765 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1766 last, &cmd_details);
1767 if (status) {
1768 i40e_debug(hw, I40E_DEBUG_NVM,
1769 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1770 module, cmd->offset, cmd->data_size);
1771 i40e_debug(hw, I40E_DEBUG_NVM,
1772 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1773 status, hw->aq.asq_last_status);
1774 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1775 }
1776
1777 return status;
1778 }
1779
1780 /**
1781 * i40e_nvmupd_nvm_write - Write NVM
1782 * @hw: pointer to hardware structure
1783 * @cmd: pointer to nvm update command buffer
1784 * @bytes: pointer to the data buffer
1785 * @perrno: pointer to return error code
1786 *
1787 * module, offset, data_size and data are in cmd structure
1788 **/
i40e_nvmupd_nvm_write(struct i40e_hw * hw,struct i40e_nvm_access * cmd,u8 * bytes,int * perrno)1789 static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1790 struct i40e_nvm_access *cmd,
1791 u8 *bytes, int *perrno)
1792 {
1793 enum i40e_status_code status = I40E_SUCCESS;
1794 struct i40e_asq_cmd_details cmd_details;
1795 u8 module, transaction;
1796 u8 preservation_flags;
1797 bool last;
1798
1799 transaction = i40e_nvmupd_get_transaction(cmd->config);
1800 module = i40e_nvmupd_get_module(cmd->config);
1801 last = (transaction & I40E_NVM_LCB);
1802 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1803
1804 memset(&cmd_details, 0, sizeof(cmd_details));
1805 cmd_details.wb_desc = &hw->nvm_wb_desc;
1806
1807 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1808 (u16)cmd->data_size, bytes, last,
1809 preservation_flags, &cmd_details);
1810 if (status) {
1811 i40e_debug(hw, I40E_DEBUG_NVM,
1812 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1813 module, cmd->offset, cmd->data_size);
1814 i40e_debug(hw, I40E_DEBUG_NVM,
1815 "i40e_nvmupd_nvm_write status %d aq %d\n",
1816 status, hw->aq.asq_last_status);
1817 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1818 }
1819
1820 return status;
1821 }
1822