xref: /dragonfly/sys/dev/netif/ig_hal/e1000_i210.c (revision 25a2db75)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2011, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "e1000_api.h"
36 
37 
38 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
39 static void e1000_release_nvm_i210(struct e1000_hw *hw);
40 static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
41 static void e1000_put_hw_semaphore_i210(struct e1000_hw *hw);
42 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
43 				u16 *data);
44 static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
45 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
46 static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
47 			       u16 *data);
48 
49 /**
50  *  e1000_acquire_nvm_i210 - Request for access to EEPROM
51  *  @hw: pointer to the HW structure
52  *
53  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
54  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
55  *  Return successful if access grant bit set, else clear the request for
56  *  EEPROM access and return -E1000_ERR_NVM (-1).
57  **/
58 static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
59 {
60 	s32 ret_val;
61 
62 	DEBUGFUNC("e1000_acquire_nvm_i210");
63 
64 	ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
65 
66 	return ret_val;
67 }
68 
69 /**
70  *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
71  *  @hw: pointer to the HW structure
72  *
73  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
74  *  then release the semaphores acquired.
75  **/
76 static void e1000_release_nvm_i210(struct e1000_hw *hw)
77 {
78 	DEBUGFUNC("e1000_release_nvm_i210");
79 
80 	e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
81 }
82 
83 /**
84  *  e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
85  *  @hw: pointer to the HW structure
86  *  @mask: specifies which semaphore to acquire
87  *
88  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
89  *  will also specify which port we're acquiring the lock for.
90  **/
91 s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
92 {
93 	u32 swfw_sync;
94 	u32 swmask = mask;
95 	u32 fwmask = mask << 16;
96 	s32 ret_val = E1000_SUCCESS;
97 	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
98 
99 	DEBUGFUNC("e1000_acquire_swfw_sync_i210");
100 
101 	while (i < timeout) {
102 		if (e1000_get_hw_semaphore_i210(hw)) {
103 			ret_val = -E1000_ERR_SWFW_SYNC;
104 			goto out;
105 		}
106 
107 		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
108 		if (!(swfw_sync & fwmask))
109 			break;
110 
111 		/*
112 		 * Firmware currently using resource (fwmask)
113 		 */
114 		e1000_put_hw_semaphore_i210(hw);
115 		msec_delay_irq(5);
116 		i++;
117 	}
118 
119 	if (i == timeout) {
120 		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
121 		ret_val = -E1000_ERR_SWFW_SYNC;
122 		goto out;
123 	}
124 
125 	swfw_sync |= swmask;
126 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
127 
128 	e1000_put_hw_semaphore_i210(hw);
129 
130 out:
131 	return ret_val;
132 }
133 
134 /**
135  *  e1000_release_swfw_sync_i210 - Release SW/FW semaphore
136  *  @hw: pointer to the HW structure
137  *  @mask: specifies which semaphore to acquire
138  *
139  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
140  *  will also specify which port we're releasing the lock for.
141  **/
142 void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
143 {
144 	u32 swfw_sync;
145 
146 	DEBUGFUNC("e1000_release_swfw_sync_i210");
147 
148 	while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
149 		; /* Empty */
150 
151 	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
152 	swfw_sync &= ~mask;
153 	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
154 
155 	e1000_put_hw_semaphore_i210(hw);
156 }
157 
158 /**
159  *  e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
160  *  @hw: pointer to the HW structure
161  *
162  *  Acquire the HW semaphore to access the PHY or NVM
163  **/
164 static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
165 {
166 	u32 swsm;
167 	s32 ret_val = E1000_SUCCESS;
168 	s32 timeout = hw->nvm.word_size + 1;
169 	s32 i = 0;
170 
171 	DEBUGFUNC("e1000_get_hw_semaphore_i210");
172 
173 	/* Get the FW semaphore. */
174 	for (i = 0; i < timeout; i++) {
175 		swsm = E1000_READ_REG(hw, E1000_SWSM);
176 		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
177 
178 		/* Semaphore acquired if bit latched */
179 		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
180 			break;
181 
182 		usec_delay(50);
183 	}
184 
185 	if (i == timeout) {
186 		/* Release semaphores */
187 		e1000_put_hw_semaphore_generic(hw);
188 		DEBUGOUT("Driver can't access the NVM\n");
189 		ret_val = -E1000_ERR_NVM;
190 		goto out;
191 	}
192 
193 out:
194 	return ret_val;
195 }
196 
197 /**
198  *  e1000_put_hw_semaphore_i210 - Release hardware semaphore
199  *  @hw: pointer to the HW structure
200  *
201  *  Release hardware semaphore used to access the PHY or NVM
202  **/
203 static void e1000_put_hw_semaphore_i210(struct e1000_hw *hw)
204 {
205 	u32 swsm;
206 
207 	DEBUGFUNC("e1000_put_hw_semaphore_i210");
208 
209 	swsm = E1000_READ_REG(hw, E1000_SWSM);
210 
211 	swsm &= ~E1000_SWSM_SWESMBI;
212 
213 	E1000_WRITE_REG(hw, E1000_SWSM, swsm);
214 }
215 
216 /**
217  *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
218  *  @hw: pointer to the HW structure
219  *  @offset: offset of word in the Shadow Ram to read
220  *  @words: number of words to read
221  *  @data: word read from the Shadow Ram
222  *
223  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
224  *  Uses necessary synchronization semaphores.
225  **/
226 s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
227 			     u16 *data)
228 {
229 	s32 status = E1000_SUCCESS;
230 	u16 i, count;
231 
232 	DEBUGFUNC("e1000_read_nvm_srrd_i210");
233 
234 	/* We cannot hold synchronization semaphores for too long,
235 	 * because of forceful takeover procedure. However it is more efficient
236 	 * to read in bursts than synchronizing access for each word. */
237 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
238 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
239 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
240 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
241 			status = e1000_read_nvm_eerd(hw, offset, count,
242 						     data + i);
243 			hw->nvm.ops.release(hw);
244 		} else {
245 			status = E1000_ERR_SWFW_SYNC;
246 		}
247 
248 		if (status != E1000_SUCCESS)
249 			break;
250 	}
251 
252 	return status;
253 }
254 
255 /**
256  *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
257  *  @hw: pointer to the HW structure
258  *  @offset: offset within the Shadow RAM to be written to
259  *  @words: number of words to write
260  *  @data: 16 bit word(s) to be written to the Shadow RAM
261  *
262  *  Writes data to Shadow RAM at offset using EEWR register.
263  *
264  *  If e1000_update_nvm_checksum is not called after this function , the
265  *  data will not be committed to FLASH and also Shadow RAM will most likely
266  *  contain an invalid checksum.
267  *
268  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
269  *  partially written.
270  **/
271 s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
272 			      u16 *data)
273 {
274 	s32 status = E1000_SUCCESS;
275 	u16 i, count;
276 
277 	DEBUGFUNC("e1000_write_nvm_srwr_i210");
278 
279 	/* We cannot hold synchronization semaphores for too long,
280 	 * because of forceful takeover procedure. However it is more efficient
281 	 * to write in bursts than synchronizing access for each word. */
282 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
283 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
284 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
285 		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
286 			status = e1000_write_nvm_srwr(hw, offset, count,
287 						      data + i);
288 			hw->nvm.ops.release(hw);
289 		} else {
290 			status = E1000_ERR_SWFW_SYNC;
291 		}
292 
293 		if (status != E1000_SUCCESS)
294 			break;
295 	}
296 
297 	return status;
298 }
299 
300 /**
301  *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
302  *  @hw: pointer to the HW structure
303  *  @offset: offset within the Shadow Ram to be written to
304  *  @words: number of words to write
305  *  @data: 16 bit word(s) to be written to the Shadow Ram
306  *
307  *  Writes data to Shadow Ram at offset using EEWR register.
308  *
309  *  If e1000_update_nvm_checksum is not called after this function , the
310  *  Shadow Ram will most likely contain an invalid checksum.
311  **/
312 static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
313 				u16 *data)
314 {
315 	struct e1000_nvm_info *nvm = &hw->nvm;
316 	u32 i, k, eewr = 0;
317 	u32 attempts = 100000;
318 	s32 ret_val = E1000_SUCCESS;
319 
320 	DEBUGFUNC("e1000_write_nvm_srwr");
321 
322 	/*
323 	 * A check for invalid values:  offset too large, too many words,
324 	 * too many words for the offset, and not enough words.
325 	 */
326 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
327 	    (words == 0)) {
328 		DEBUGOUT("nvm parameter(s) out of bounds\n");
329 		ret_val = -E1000_ERR_NVM;
330 		goto out;
331 	}
332 
333 	for (i = 0; i < words; i++) {
334 		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
335 			(data[i] << E1000_NVM_RW_REG_DATA) |
336 			E1000_NVM_RW_REG_START;
337 
338 		E1000_WRITE_REG(hw, E1000_SRWR, eewr);
339 
340 		for (k = 0; k < attempts; k++) {
341 			if (E1000_NVM_RW_REG_DONE &
342 			    E1000_READ_REG(hw, E1000_SRWR)) {
343 				ret_val = E1000_SUCCESS;
344 				break;
345 			}
346 			usec_delay(5);
347 		}
348 
349 		if (ret_val != E1000_SUCCESS) {
350 			DEBUGOUT("Shadow RAM write EEWR timed out\n");
351 			break;
352 		}
353 	}
354 
355 out:
356 	return ret_val;
357 }
358 
359 /**
360  *  e1000_read_nvm_i211 - Read NVM wrapper function for I211
361  *  @hw: pointer to the HW structure
362  *  @address: the word address (aka eeprom offset) to read
363  *  @data: pointer to the data read
364  *
365  *  Wrapper function to return data formerly found in the NVM.
366  **/
367 static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
368 			       u16 *data)
369 {
370 	s32 ret_val = E1000_SUCCESS;
371 
372 	DEBUGFUNC("e1000_read_nvm_i211");
373 
374 	/* Only the MAC addr is required to be present in the iNVM */
375 	switch (offset) {
376 	case NVM_MAC_ADDR:
377 		ret_val = e1000_read_invm_i211(hw, (u8)offset, &data[0]);
378 		ret_val |= e1000_read_invm_i211(hw, (u8)offset+1, &data[1]);
379 		ret_val |= e1000_read_invm_i211(hw, (u8)offset+2, &data[2]);
380 		if (ret_val != E1000_SUCCESS)
381 			DEBUGOUT("MAC Addr not found in iNVM\n");
382 		break;
383 	case NVM_INIT_CTRL_2:
384 		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
385 		if (ret_val != E1000_SUCCESS) {
386 			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
387 			ret_val = E1000_SUCCESS;
388 		}
389 		break;
390 	case NVM_INIT_CTRL_4:
391 		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
392 		if (ret_val != E1000_SUCCESS) {
393 			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
394 			ret_val = E1000_SUCCESS;
395 		}
396 		break;
397 	case NVM_LED_1_CFG:
398 		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
399 		if (ret_val != E1000_SUCCESS) {
400 			*data = NVM_LED_1_CFG_DEFAULT_I211;
401 			ret_val = E1000_SUCCESS;
402 		}
403 		break;
404 	case NVM_LED_0_2_CFG:
405 		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
406 		if (ret_val != E1000_SUCCESS) {
407 			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
408 			ret_val = E1000_SUCCESS;
409 		}
410 		break;
411 	case NVM_ID_LED_SETTINGS:
412 		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
413 		if (ret_val != E1000_SUCCESS) {
414 			*data = ID_LED_RESERVED_FFFF;
415 			ret_val = E1000_SUCCESS;
416 		}
417 		break;
418 	case NVM_SUB_DEV_ID:
419 		*data = hw->subsystem_device_id;
420 		break;
421 	case NVM_SUB_VEN_ID:
422 		*data = hw->subsystem_vendor_id;
423 		break;
424 	case NVM_DEV_ID:
425 		*data = hw->device_id;
426 		break;
427 	case NVM_VEN_ID:
428 		*data = hw->vendor_id;
429 		break;
430 	default:
431 		DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
432 		*data = NVM_RESERVED_WORD;
433 		break;
434 	}
435 	return ret_val;
436 }
437 
438 /**
439  *  e1000_read_invm_i211 - Reads OTP
440  *  @hw: pointer to the HW structure
441  *  @address: the word address (aka eeprom offset) to read
442  *  @data: pointer to the data read
443  *
444  *  Reads 16-bit words from the OTP. Return error when the word is not
445  *  stored in OTP.
446  **/
447 s32 e1000_read_invm_i211(struct e1000_hw *hw, u8 address, u16 *data)
448 {
449 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
450 	u32 invm_dword;
451 	u16 i;
452 	u8 record_type, word_address;
453 
454 	DEBUGFUNC("e1000_read_invm_i211");
455 
456 	for (i = 0; i < E1000_INVM_SIZE; i++) {
457 		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
458 		/* Get record type */
459 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
460 		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
461 			break;
462 		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
463 			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
464 		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
465 			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
466 		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
467 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
468 			if (word_address == address) {
469 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
470 				DEBUGOUT2("Read INVM Word 0x%02x = %x",
471 					  address, *data);
472 				status = E1000_SUCCESS;
473 				break;
474 			}
475 		}
476 	}
477 	if (status != E1000_SUCCESS)
478 		DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
479 	return status;
480 }
481 
482 /**
483  *  e1000_read_invm_version - Reads iNVM version and image type
484  *  @hw: pointer to the HW structure
485  *  @invm_ver: version structure for the version read
486  *
487  *  Reads iNVM version and image type.
488  **/
489 s32 e1000_read_invm_version(struct e1000_hw *hw,
490 			    struct e1000_fw_version *invm_ver)
491 {
492 	u32 *record = NULL;
493 	u32 *next_record = NULL;
494 	u32 i = 0;
495 	u32 invm_dword = 0;
496 	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
497 					     E1000_INVM_RECORD_SIZE_IN_BYTES);
498 	u32 buffer[E1000_INVM_SIZE];
499 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
500 	u16 version = 0;
501 
502 	DEBUGFUNC("e1000_read_invm_version");
503 
504 	/* Read iNVM memory */
505 	for (i = 0; i < E1000_INVM_SIZE; i++) {
506 		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
507 		buffer[i] = invm_dword;
508 	}
509 
510 	/* Read version number */
511 	for (i = 1; i < invm_blocks; i++) {
512 		record = &buffer[invm_blocks - i];
513 		next_record = &buffer[invm_blocks - i + 1];
514 
515 		/* Check if we have first version location used */
516 		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
517 			version = 0;
518 			status = E1000_SUCCESS;
519 			break;
520 		}
521 		/* Check if we have second version location used */
522 		else if ((i == 1) &&
523 			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
524 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
525 			status = E1000_SUCCESS;
526 			break;
527 		}
528 		/*
529 		 * Check if we have odd version location
530 		 * used and it is the last one used
531 		 */
532 		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
533 			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
534 			 (i != 1))) {
535 			version = (*next_record & E1000_INVM_VER_FIELD_TWO)
536 				  >> 13;
537 			status = E1000_SUCCESS;
538 			break;
539 		}
540 		/*
541 		 * Check if we have even version location
542 		 * used and it is the last one used
543 		 */
544 		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
545 			 ((*record & 0x3) == 0)) {
546 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
547 			status = E1000_SUCCESS;
548 			break;
549 		}
550 	}
551 
552 	if (status == E1000_SUCCESS) {
553 		invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
554 					>> E1000_INVM_MAJOR_SHIFT;
555 		invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
556 	}
557 	/* Read Image Type */
558 	for (i = 1; i < invm_blocks; i++) {
559 		record = &buffer[invm_blocks - i];
560 		next_record = &buffer[invm_blocks - i + 1];
561 
562 		/* Check if we have image type in first location used */
563 		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
564 			invm_ver->invm_img_type = 0;
565 			status = E1000_SUCCESS;
566 			break;
567 		}
568 		/* Check if we have image type in first location used */
569 		else if ((((*record & 0x3) == 0) &&
570 			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
571 			 ((((*record & 0x3) != 0) && (i != 1)))) {
572 			invm_ver->invm_img_type =
573 				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
574 			status = E1000_SUCCESS;
575 			break;
576 		}
577 	}
578 	return status;
579 }
580 
581 /**
582  *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
583  *  @hw: pointer to the HW structure
584  *
585  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
586  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
587  **/
588 s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
589 {
590 	s32 status = E1000_SUCCESS;
591 	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
592 
593 	DEBUGFUNC("e1000_validate_nvm_checksum_i210");
594 
595 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
596 
597 		/*
598 		 * Replace the read function with semaphore grabbing with
599 		 * the one that skips this for a while.
600 		 * We have semaphore taken already here.
601 		 */
602 		read_op_ptr = hw->nvm.ops.read;
603 		hw->nvm.ops.read = e1000_read_nvm_eerd;
604 
605 		status = e1000_validate_nvm_checksum_generic(hw);
606 
607 		/* Revert original read operation. */
608 		hw->nvm.ops.read = read_op_ptr;
609 
610 		hw->nvm.ops.release(hw);
611 	} else {
612 		status = E1000_ERR_SWFW_SYNC;
613 	}
614 
615 	return status;
616 }
617 
618 
619 /**
620  *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
621  *  @hw: pointer to the HW structure
622  *
623  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
624  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
625  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
626  **/
627 s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
628 {
629 	s32 ret_val = E1000_SUCCESS;
630 	u16 checksum = 0;
631 	u16 i, nvm_data;
632 
633 	DEBUGFUNC("e1000_update_nvm_checksum_i210");
634 
635 	/*
636 	 * Read the first word from the EEPROM. If this times out or fails, do
637 	 * not continue or we could be in for a very long wait while every
638 	 * EEPROM read fails
639 	 */
640 	ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
641 	if (ret_val != E1000_SUCCESS) {
642 		DEBUGOUT("EEPROM read failed\n");
643 		goto out;
644 	}
645 
646 	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
647 		/*
648 		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
649 		 * because we do not want to take the synchronization
650 		 * semaphores twice here.
651 		 */
652 
653 		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
654 			ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
655 			if (ret_val) {
656 				hw->nvm.ops.release(hw);
657 				DEBUGOUT("NVM Read Error while updating checksum.\n");
658 				goto out;
659 			}
660 			checksum += nvm_data;
661 		}
662 		checksum = (u16) NVM_SUM - checksum;
663 		ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
664 						&checksum);
665 		if (ret_val != E1000_SUCCESS) {
666 			hw->nvm.ops.release(hw);
667 			DEBUGOUT("NVM Write Error while updating checksum.\n");
668 			goto out;
669 		}
670 
671 		hw->nvm.ops.release(hw);
672 
673 		ret_val = e1000_update_flash_i210(hw);
674 	} else {
675 		ret_val = E1000_ERR_SWFW_SYNC;
676 	}
677 out:
678 	return ret_val;
679 }
680 
681 #if defined(QV_RELEASE) && defined(SPRINGVILLE_FLASHLESS_HW)
682 /**
683  *  e1000_get_flash_presence_i210 - Check if flash device is detected.
684  *  @hw: pointer to the HW structure
685  *
686  **/
687 static bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
688 {
689 	u32 eec = 0;
690 	bool ret_val = FALSE;
691 
692 	DEBUGFUNC("e1000_get_flash_presence_i210");
693 
694 	eec = E1000_READ_REG(hw, E1000_EECD);
695 
696 	if (eec & E1000_EECD_FLASH_DETECTED_I210)
697 		ret_val = TRUE;
698 
699 	return ret_val;
700 }
701 
702 #endif /* QV_RELEASE && SPRINGVILLE_FLASHLESS_HW */
703 /**
704  *  e1000_update_flash_i210 - Commit EEPROM to the flash
705  *  @hw: pointer to the HW structure
706  *
707  **/
708 s32 e1000_update_flash_i210(struct e1000_hw *hw)
709 {
710 	s32 ret_val = E1000_SUCCESS;
711 	u32 flup;
712 
713 	DEBUGFUNC("e1000_update_flash_i210");
714 
715 	ret_val = e1000_pool_flash_update_done_i210(hw);
716 	if (ret_val == -E1000_ERR_NVM) {
717 		DEBUGOUT("Flash update time out\n");
718 		goto out;
719 	}
720 
721 	flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
722 	E1000_WRITE_REG(hw, E1000_EECD, flup);
723 
724 	ret_val = e1000_pool_flash_update_done_i210(hw);
725 	if (ret_val == E1000_SUCCESS)
726 		DEBUGOUT("Flash update complete\n");
727 	else
728 		DEBUGOUT("Flash update time out\n");
729 
730 out:
731 	return ret_val;
732 }
733 
734 /**
735  *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
736  *  @hw: pointer to the HW structure
737  *
738  **/
739 s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
740 {
741 	s32 ret_val = -E1000_ERR_NVM;
742 	u32 i, reg;
743 
744 	DEBUGFUNC("e1000_pool_flash_update_done_i210");
745 
746 	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
747 		reg = E1000_READ_REG(hw, E1000_EECD);
748 		if (reg & E1000_EECD_FLUDONE_I210) {
749 			ret_val = E1000_SUCCESS;
750 			break;
751 		}
752 		usec_delay(5);
753 	}
754 
755 	return ret_val;
756 }
757 
758 /**
759  *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
760  *  @hw: pointer to the HW structure
761  *
762  *  Initialize the i210 NVM parameters and function pointers.
763  **/
764 static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
765 {
766 	s32 ret_val = E1000_SUCCESS;
767 	struct e1000_nvm_info *nvm = &hw->nvm;
768 
769 	DEBUGFUNC("e1000_init_nvm_params_i210");
770 
771 	ret_val = e1000_init_nvm_params_82575(hw);
772 
773 	nvm->ops.acquire = e1000_acquire_nvm_i210;
774 	nvm->ops.release = e1000_release_nvm_i210;
775 	nvm->ops.read    = e1000_read_nvm_srrd_i210;
776 	nvm->ops.write   = e1000_write_nvm_srwr_i210;
777 	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
778 	nvm->ops.validate = e1000_validate_nvm_checksum_i210;
779 	nvm->ops.update   = e1000_update_nvm_checksum_i210;
780 
781 	return ret_val;
782 }
783 
784 /**
785  *  e1000_init_nvm_params_i211 - Initialize i211 NVM function pointers
786  *  @hw: pointer to the HW structure
787  *
788  *  Initialize the NVM parameters and function pointers for i211.
789  **/
790 static s32 e1000_init_nvm_params_i211(struct e1000_hw *hw)
791 {
792 	struct e1000_nvm_info *nvm = &hw->nvm;
793 
794 	DEBUGFUNC("e1000_init_nvm_params_i211");
795 
796 	nvm->ops.acquire  = e1000_acquire_nvm_i210;
797 	nvm->ops.release  = e1000_release_nvm_i210;
798 	nvm->ops.read     = e1000_read_nvm_i211;
799 	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
800 	nvm->ops.write    = e1000_null_write_nvm;
801 	nvm->ops.validate = e1000_null_ops_generic;
802 	nvm->ops.update   = e1000_null_ops_generic;
803 
804 	return E1000_SUCCESS;
805 }
806 
807 /**
808  *  e1000_init_function_pointers_i210 - Init func ptrs.
809  *  @hw: pointer to the HW structure
810  *
811  *  Called to initialize all function pointers and parameters.
812  **/
813 void e1000_init_function_pointers_i210(struct e1000_hw *hw)
814 {
815 	e1000_init_function_pointers_82575(hw);
816 
817 	switch (hw->mac.type) {
818 	case e1000_i210:
819 #if defined(QV_RELEASE) && defined(SPRINGVILLE_FLASHLESS_HW)
820 		if (e1000_get_flash_presence_i210(hw))
821 			hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
822 		else
823 			hw->nvm.ops.init_params = e1000_init_nvm_params_i211;
824 #else
825 		hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
826 #endif /* QV_RELEASE && SPRINGVILLE_FLASHLESS_HW */
827 		break;
828 	case e1000_i211:
829 		hw->nvm.ops.init_params = e1000_init_nvm_params_i211;
830 		break;
831 	default:
832 		break;
833 	}
834 	return;
835 }
836 
837 /**
838  *  e1000_valid_led_default_i210 - Verify a valid default LED config
839  *  @hw: pointer to the HW structure
840  *  @data: pointer to the NVM (EEPROM)
841  *
842  *  Read the EEPROM for the current default LED configuration.  If the
843  *  LED configuration is not valid, set to a valid LED configuration.
844  **/
845 static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
846 {
847 	s32 ret_val;
848 
849 	DEBUGFUNC("e1000_valid_led_default_i210");
850 
851 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
852 	if (ret_val) {
853 		DEBUGOUT("NVM Read Error\n");
854 		goto out;
855 	}
856 
857 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
858 		switch (hw->phy.media_type) {
859 		case e1000_media_type_internal_serdes:
860 			*data = ID_LED_DEFAULT_I210_SERDES;
861 			break;
862 		case e1000_media_type_copper:
863 		default:
864 			*data = ID_LED_DEFAULT_I210;
865 			break;
866 		}
867 	}
868 out:
869 	return ret_val;
870 }
871