xref: /freebsd/sys/dev/cxgb/common/cxgb_t3_hw.c (revision 39beb93c)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2008, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 
34 #include <cxgb_include.h>
35 
36 #undef msleep
37 #define msleep t3_os_sleep
38 
39 /**
40  *	t3_wait_op_done_val - wait until an operation is completed
41  *	@adapter: the adapter performing the operation
42  *	@reg: the register to check for completion
43  *	@mask: a single-bit field within @reg that indicates completion
44  *	@polarity: the value of the field when the operation is completed
45  *	@attempts: number of check iterations
46  *	@delay: delay in usecs between iterations
47  *	@valp: where to store the value of the register at completion time
48  *
49  *	Wait until an operation is completed by checking a bit in a register
50  *	up to @attempts times.  If @valp is not NULL the value of the register
51  *	at the time it indicated completion is stored there.  Returns 0 if the
52  *	operation completes and	-EAGAIN	otherwise.
53  */
54 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
55 			int attempts, int delay, u32 *valp)
56 {
57 	while (1) {
58 		u32 val = t3_read_reg(adapter, reg);
59 
60 		if (!!(val & mask) == polarity) {
61 			if (valp)
62 				*valp = val;
63 			return 0;
64 		}
65 		if (--attempts == 0)
66 			return -EAGAIN;
67 		if (delay)
68 			udelay(delay);
69 	}
70 }
71 
72 /**
73  *	t3_write_regs - write a bunch of registers
74  *	@adapter: the adapter to program
75  *	@p: an array of register address/register value pairs
76  *	@n: the number of address/value pairs
77  *	@offset: register address offset
78  *
79  *	Takes an array of register address/register value pairs and writes each
80  *	value to the corresponding register.  Register addresses are adjusted
81  *	by the supplied offset.
82  */
83 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
84 		   unsigned int offset)
85 {
86 	while (n--) {
87 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
88 		p++;
89 	}
90 }
91 
92 /**
93  *	t3_set_reg_field - set a register field to a value
94  *	@adapter: the adapter to program
95  *	@addr: the register address
96  *	@mask: specifies the portion of the register to modify
97  *	@val: the new value for the register field
98  *
99  *	Sets a register field specified by the supplied mask to the
100  *	given value.
101  */
102 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
103 {
104 	u32 v = t3_read_reg(adapter, addr) & ~mask;
105 
106 	t3_write_reg(adapter, addr, v | val);
107 	(void) t3_read_reg(adapter, addr);      /* flush */
108 }
109 
110 /**
111  *	t3_read_indirect - read indirectly addressed registers
112  *	@adap: the adapter
113  *	@addr_reg: register holding the indirect address
114  *	@data_reg: register holding the value of the indirect register
115  *	@vals: where the read register values are stored
116  *	@start_idx: index of first indirect register to read
117  *	@nregs: how many indirect registers to read
118  *
119  *	Reads registers that are accessed indirectly through an address/data
120  *	register pair.
121  */
122 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
123 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
124 		      unsigned int start_idx)
125 {
126 	while (nregs--) {
127 		t3_write_reg(adap, addr_reg, start_idx);
128 		*vals++ = t3_read_reg(adap, data_reg);
129 		start_idx++;
130 	}
131 }
132 
133 /**
134  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
135  *	@mc7: identifies MC7 to read from
136  *	@start: index of first 64-bit word to read
137  *	@n: number of 64-bit words to read
138  *	@buf: where to store the read result
139  *
140  *	Read n 64-bit words from MC7 starting at word start, using backdoor
141  *	accesses.
142  */
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144                    u64 *buf)
145 {
146 	static int shift[] = { 0, 0, 16, 24 };
147 	static int step[]  = { 0, 32, 16, 8 };
148 
149 	unsigned int size64 = mc7->size / 8;  /* # of 64-bit words */
150 	adapter_t *adap = mc7->adapter;
151 
152 	if (start >= size64 || start + n > size64)
153 		return -EINVAL;
154 
155 	start *= (8 << mc7->width);
156 	while (n--) {
157 		int i;
158 		u64 val64 = 0;
159 
160 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 			int attempts = 10;
162 			u32 val;
163 
164 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
165 				       start);
166 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
167 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
168 			while ((val & F_BUSY) && attempts--)
169 				val = t3_read_reg(adap,
170 						  mc7->offset + A_MC7_BD_OP);
171 			if (val & F_BUSY)
172 				return -EIO;
173 
174 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
175 			if (mc7->width == 0) {
176 				val64 = t3_read_reg(adap,
177 						mc7->offset + A_MC7_BD_DATA0);
178 				val64 |= (u64)val << 32;
179 			} else {
180 				if (mc7->width > 1)
181 					val >>= shift[mc7->width];
182 				val64 |= (u64)val << (step[mc7->width] * i);
183 			}
184 			start += 8;
185 		}
186 		*buf++ = val64;
187 	}
188 	return 0;
189 }
190 
191 /*
192  * Initialize MI1.
193  */
194 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
195 {
196         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197         u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 		  V_CLKDIV(clkdiv);
199 
200 	if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 		val |= V_ST(1);
202         t3_write_reg(adap, A_MI1_CFG, val);
203 }
204 
205 #define MDIO_ATTEMPTS 20
206 
207 /*
208  * MI1 read/write operations for direct-addressed PHYs.
209  */
210 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
211 		    int reg_addr, unsigned int *valp)
212 {
213 	int ret;
214 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215 
216 	if (mmd_addr)
217 		return -EINVAL;
218 
219 	MDIO_LOCK(adapter);
220 	t3_write_reg(adapter, A_MI1_ADDR, addr);
221 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
223 	if (!ret)
224 		*valp = t3_read_reg(adapter, A_MI1_DATA);
225 	MDIO_UNLOCK(adapter);
226 	return ret;
227 }
228 
229 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
230 		     int reg_addr, unsigned int val)
231 {
232 	int ret;
233 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234 
235 	if (mmd_addr)
236 		return -EINVAL;
237 
238 	MDIO_LOCK(adapter);
239 	t3_write_reg(adapter, A_MI1_ADDR, addr);
240 	t3_write_reg(adapter, A_MI1_DATA, val);
241 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
243 	MDIO_UNLOCK(adapter);
244 	return ret;
245 }
246 
247 static struct mdio_ops mi1_mdio_ops = {
248 	mi1_read,
249 	mi1_write
250 };
251 
252 /*
253  * MI1 read/write operations for indirect-addressed PHYs.
254  */
255 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
256 			int reg_addr, unsigned int *valp)
257 {
258 	int ret;
259 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 
261 	MDIO_LOCK(adapter);
262 	t3_write_reg(adapter, A_MI1_ADDR, addr);
263 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
266 	if (!ret) {
267 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 				      MDIO_ATTEMPTS, 10);
270 		if (!ret)
271 			*valp = t3_read_reg(adapter, A_MI1_DATA);
272 	}
273 	MDIO_UNLOCK(adapter);
274 	return ret;
275 }
276 
277 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
278 			 int reg_addr, unsigned int val)
279 {
280 	int ret;
281 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282 
283 	MDIO_LOCK(adapter);
284 	t3_write_reg(adapter, A_MI1_ADDR, addr);
285 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
288 	if (!ret) {
289 		t3_write_reg(adapter, A_MI1_DATA, val);
290 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 				      MDIO_ATTEMPTS, 10);
293 	}
294 	MDIO_UNLOCK(adapter);
295 	return ret;
296 }
297 
298 static struct mdio_ops mi1_mdio_ext_ops = {
299 	mi1_ext_read,
300 	mi1_ext_write
301 };
302 
303 /**
304  *	t3_mdio_change_bits - modify the value of a PHY register
305  *	@phy: the PHY to operate on
306  *	@mmd: the device address
307  *	@reg: the register address
308  *	@clear: what part of the register value to mask off
309  *	@set: what part of the register value to set
310  *
311  *	Changes the value of a PHY register by applying a mask to its current
312  *	value and ORing the result with a new value.
313  */
314 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 			unsigned int set)
316 {
317 	int ret;
318 	unsigned int val;
319 
320 	ret = mdio_read(phy, mmd, reg, &val);
321 	if (!ret) {
322 		val &= ~clear;
323 		ret = mdio_write(phy, mmd, reg, val | set);
324 	}
325 	return ret;
326 }
327 
328 /**
329  *	t3_phy_reset - reset a PHY block
330  *	@phy: the PHY to operate on
331  *	@mmd: the device address of the PHY block to reset
332  *	@wait: how long to wait for the reset to complete in 1ms increments
333  *
334  *	Resets a PHY block and optionally waits for the reset to complete.
335  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336  *	for 10G PHYs.
337  */
338 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339 {
340 	int err;
341 	unsigned int ctl;
342 
343 	err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 	if (err || !wait)
345 		return err;
346 
347 	do {
348 		err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 		if (err)
350 			return err;
351 		ctl &= BMCR_RESET;
352 		if (ctl)
353 			msleep(1);
354 	} while (ctl && --wait);
355 
356 	return ctl ? -1 : 0;
357 }
358 
359 /**
360  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
361  *	@phy: the PHY to operate on
362  *	@advert: bitmap of capabilities the PHY should advertise
363  *
364  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
365  *	requested capabilities.
366  */
367 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368 {
369 	int err;
370 	unsigned int val = 0;
371 
372 	err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 	if (err)
374 		return err;
375 
376 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 	if (advert & ADVERTISED_1000baseT_Half)
378 		val |= ADVERTISE_1000HALF;
379 	if (advert & ADVERTISED_1000baseT_Full)
380 		val |= ADVERTISE_1000FULL;
381 
382 	err = mdio_write(phy, 0, MII_CTRL1000, val);
383 	if (err)
384 		return err;
385 
386 	val = 1;
387 	if (advert & ADVERTISED_10baseT_Half)
388 		val |= ADVERTISE_10HALF;
389 	if (advert & ADVERTISED_10baseT_Full)
390 		val |= ADVERTISE_10FULL;
391 	if (advert & ADVERTISED_100baseT_Half)
392 		val |= ADVERTISE_100HALF;
393 	if (advert & ADVERTISED_100baseT_Full)
394 		val |= ADVERTISE_100FULL;
395 	if (advert & ADVERTISED_Pause)
396 		val |= ADVERTISE_PAUSE_CAP;
397 	if (advert & ADVERTISED_Asym_Pause)
398 		val |= ADVERTISE_PAUSE_ASYM;
399 	return mdio_write(phy, 0, MII_ADVERTISE, val);
400 }
401 
402 /**
403  *	t3_phy_advertise_fiber - set fiber PHY advertisement register
404  *	@phy: the PHY to operate on
405  *	@advert: bitmap of capabilities the PHY should advertise
406  *
407  *	Sets a fiber PHY's advertisement register to advertise the
408  *	requested capabilities.
409  */
410 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
411 {
412 	unsigned int val = 0;
413 
414 	if (advert & ADVERTISED_1000baseT_Half)
415 		val |= ADVERTISE_1000XHALF;
416 	if (advert & ADVERTISED_1000baseT_Full)
417 		val |= ADVERTISE_1000XFULL;
418 	if (advert & ADVERTISED_Pause)
419 		val |= ADVERTISE_1000XPAUSE;
420 	if (advert & ADVERTISED_Asym_Pause)
421 		val |= ADVERTISE_1000XPSE_ASYM;
422 	return mdio_write(phy, 0, MII_ADVERTISE, val);
423 }
424 
425 /**
426  *	t3_set_phy_speed_duplex - force PHY speed and duplex
427  *	@phy: the PHY to operate on
428  *	@speed: requested PHY speed
429  *	@duplex: requested PHY duplex
430  *
431  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
432  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
433  */
434 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
435 {
436 	int err;
437 	unsigned int ctl;
438 
439 	err = mdio_read(phy, 0, MII_BMCR, &ctl);
440 	if (err)
441 		return err;
442 
443 	if (speed >= 0) {
444 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
445 		if (speed == SPEED_100)
446 			ctl |= BMCR_SPEED100;
447 		else if (speed == SPEED_1000)
448 			ctl |= BMCR_SPEED1000;
449 	}
450 	if (duplex >= 0) {
451 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
452 		if (duplex == DUPLEX_FULL)
453 			ctl |= BMCR_FULLDPLX;
454 	}
455 	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for GigE */
456 		ctl |= BMCR_ANENABLE;
457 	return mdio_write(phy, 0, MII_BMCR, ctl);
458 }
459 
460 int t3_phy_lasi_intr_enable(struct cphy *phy)
461 {
462 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
463 }
464 
465 int t3_phy_lasi_intr_disable(struct cphy *phy)
466 {
467 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
468 }
469 
470 int t3_phy_lasi_intr_clear(struct cphy *phy)
471 {
472 	u32 val;
473 
474 	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
475 }
476 
477 int t3_phy_lasi_intr_handler(struct cphy *phy)
478 {
479 	unsigned int status;
480 	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
481 
482 	if (err)
483 		return err;
484 	return (status & 1) ?  cphy_cause_link_change : 0;
485 }
486 
487 static struct adapter_info t3_adap_info[] = {
488 	{ 1, 1, 0, 0, 0,
489 	  F_GPIO2_OEN | F_GPIO4_OEN |
490 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
491 	  &mi1_mdio_ops, "Chelsio PE9000" },
492 	{ 1, 1, 0, 0, 0,
493 	  F_GPIO2_OEN | F_GPIO4_OEN |
494 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
495 	  &mi1_mdio_ops, "Chelsio T302" },
496 	{ 1, 0, 0, 0, 0,
497 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
498 	  F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
499 	  { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
500 	  &mi1_mdio_ext_ops, "Chelsio T310" },
501 	{ 1, 1, 0, 0, 0,
502 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
503 	  F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
504 	  F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
505 	  { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
506 	  &mi1_mdio_ext_ops, "Chelsio T320" },
507 	{ 4, 0, 0, 0, 0,
508 	  F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
509 	  F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
510 	  { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
511 	  &mi1_mdio_ops, "Chelsio T304" },
512 	{ 0 },
513 	{ 1, 0, 0, 0, 0,
514 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
515 	  F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
516 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
517 	  &mi1_mdio_ext_ops, "Chelsio N310" }
518 };
519 
520 /*
521  * Return the adapter_info structure with a given index.  Out-of-range indices
522  * return NULL.
523  */
524 const struct adapter_info *t3_get_adapter_info(unsigned int id)
525 {
526 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
527 }
528 
529 struct port_type_info {
530 	int (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr,
531 			const struct mdio_ops *ops);
532 };
533 
534 static struct port_type_info port_types[] = {
535 	{ NULL },
536 	{ t3_ael1002_phy_prep },
537 	{ t3_vsc8211_phy_prep },
538 	{ t3_mv88e1xxx_phy_prep },
539 	{ t3_xaui_direct_phy_prep },
540 	{ t3_ael2005_phy_prep },
541 	{ t3_qt2045_phy_prep },
542 	{ t3_ael1006_phy_prep },
543 	{ t3_tn1010_phy_prep },
544 };
545 
546 #define VPD_ENTRY(name, len) \
547 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
548 
549 /*
550  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
551  * VPD-R sections.
552  */
553 struct t3_vpd {
554 	u8  id_tag;
555 	u8  id_len[2];
556 	u8  id_data[16];
557 	u8  vpdr_tag;
558 	u8  vpdr_len[2];
559 	VPD_ENTRY(pn, 16);                     /* part number */
560 	VPD_ENTRY(ec, ECNUM_LEN);              /* EC level */
561 	VPD_ENTRY(sn, SERNUM_LEN);             /* serial number */
562 	VPD_ENTRY(na, 12);                     /* MAC address base */
563 	VPD_ENTRY(cclk, 6);                    /* core clock */
564 	VPD_ENTRY(mclk, 6);                    /* mem clock */
565 	VPD_ENTRY(uclk, 6);                    /* uP clk */
566 	VPD_ENTRY(mdc, 6);                     /* MDIO clk */
567 	VPD_ENTRY(mt, 2);                      /* mem timing */
568 	VPD_ENTRY(xaui0cfg, 6);                /* XAUI0 config */
569 	VPD_ENTRY(xaui1cfg, 6);                /* XAUI1 config */
570 	VPD_ENTRY(port0, 2);                   /* PHY0 complex */
571 	VPD_ENTRY(port1, 2);                   /* PHY1 complex */
572 	VPD_ENTRY(port2, 2);                   /* PHY2 complex */
573 	VPD_ENTRY(port3, 2);                   /* PHY3 complex */
574 	VPD_ENTRY(rv, 1);                      /* csum */
575 	u32 pad;                  /* for multiple-of-4 sizing and alignment */
576 };
577 
578 #define EEPROM_MAX_POLL   40
579 #define EEPROM_STAT_ADDR  0x4000
580 #define VPD_BASE          0xc00
581 
582 /**
583  *	t3_seeprom_read - read a VPD EEPROM location
584  *	@adapter: adapter to read
585  *	@addr: EEPROM address
586  *	@data: where to store the read data
587  *
588  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
589  *	VPD ROM capability.  A zero is written to the flag bit when the
590  *	addres is written to the control register.  The hardware device will
591  *	set the flag to 1 when 4 bytes have been read into the data register.
592  */
593 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
594 {
595 	u16 val;
596 	int attempts = EEPROM_MAX_POLL;
597 	unsigned int base = adapter->params.pci.vpd_cap_addr;
598 
599 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
600 		return -EINVAL;
601 
602 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
603 	do {
604 		udelay(10);
605 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
606 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
607 
608 	if (!(val & PCI_VPD_ADDR_F)) {
609 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
610 		return -EIO;
611 	}
612 	t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
613 	*data = le32_to_cpu(*data);
614 	return 0;
615 }
616 
617 /**
618  *	t3_seeprom_write - write a VPD EEPROM location
619  *	@adapter: adapter to write
620  *	@addr: EEPROM address
621  *	@data: value to write
622  *
623  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
624  *	VPD ROM capability.
625  */
626 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
627 {
628 	u16 val;
629 	int attempts = EEPROM_MAX_POLL;
630 	unsigned int base = adapter->params.pci.vpd_cap_addr;
631 
632 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
633 		return -EINVAL;
634 
635 	t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
636 				 cpu_to_le32(data));
637 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
638 				 (u16)addr | PCI_VPD_ADDR_F);
639 	do {
640 		msleep(1);
641 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
642 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
643 
644 	if (val & PCI_VPD_ADDR_F) {
645 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
646 		return -EIO;
647 	}
648 	return 0;
649 }
650 
651 /**
652  *	t3_seeprom_wp - enable/disable EEPROM write protection
653  *	@adapter: the adapter
654  *	@enable: 1 to enable write protection, 0 to disable it
655  *
656  *	Enables or disables write protection on the serial EEPROM.
657  */
658 int t3_seeprom_wp(adapter_t *adapter, int enable)
659 {
660 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
661 }
662 
663 /*
664  * Convert a character holding a hex digit to a number.
665  */
666 static unsigned int hex2int(unsigned char c)
667 {
668 	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
669 }
670 
671 /**
672  *	get_vpd_params - read VPD parameters from VPD EEPROM
673  *	@adapter: adapter to read
674  *	@p: where to store the parameters
675  *
676  *	Reads card parameters stored in VPD EEPROM.
677  */
678 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
679 {
680 	int i, addr, ret;
681 	struct t3_vpd vpd;
682 
683 	/*
684 	 * Card information is normally at VPD_BASE but some early cards had
685 	 * it at 0.
686 	 */
687 	ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
688 	if (ret)
689 		return ret;
690 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
691 
692 	for (i = 0; i < sizeof(vpd); i += 4) {
693 		ret = t3_seeprom_read(adapter, addr + i,
694 				      (u32 *)((u8 *)&vpd + i));
695 		if (ret)
696 			return ret;
697 	}
698 
699 	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
700 	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
701 	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
702 	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
703 	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
704 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
705 	memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
706 
707 	/* Old eeproms didn't have port information */
708 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
709 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
710 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
711 	} else {
712 		p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
713 		p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
714 		p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
715 		p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
716 		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
717 		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
718 	}
719 
720 	for (i = 0; i < 6; i++)
721 		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
722 				 hex2int(vpd.na_data[2 * i + 1]);
723 	return 0;
724 }
725 
726 /* BIOS boot header */
727 typedef struct boot_header_s {
728 	u8	signature[2];	/* signature */
729 	u8	length;		/* image length (include header) */
730 	u8	offset[4];	/* initialization vector */
731 	u8	reserved[19];	/* reserved */
732 	u8	exheader[2];	/* offset to expansion header */
733 } boot_header_t;
734 
735 /* serial flash and firmware constants */
736 enum {
737 	SF_ATTEMPTS = 5,           /* max retries for SF1 operations */
738 	SF_SEC_SIZE = 64 * 1024,   /* serial flash sector size */
739 	SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
740 
741 	/* flash command opcodes */
742 	SF_PROG_PAGE    = 2,       /* program page */
743 	SF_WR_DISABLE   = 4,       /* disable writes */
744 	SF_RD_STATUS    = 5,       /* read status register */
745 	SF_WR_ENABLE    = 6,       /* enable writes */
746 	SF_RD_DATA_FAST = 0xb,     /* read flash */
747 	SF_ERASE_SECTOR = 0xd8,    /* erase sector */
748 
749 	FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
750 	OLD_FW_VERS_ADDR = 0x77ffc,   /* flash address holding FW version */
751 	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
752 	FW_MIN_SIZE = 8,           /* at least version and csum */
753 	FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
754 
755 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
756 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
757 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
758 	BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
759 	BOOT_MAX_SIZE = 0xff*BOOT_SIZE_INC /* 1 byte * length increment  */
760 };
761 
762 /**
763  *	sf1_read - read data from the serial flash
764  *	@adapter: the adapter
765  *	@byte_cnt: number of bytes to read
766  *	@cont: whether another operation will be chained
767  *	@valp: where to store the read data
768  *
769  *	Reads up to 4 bytes of data from the serial flash.  The location of
770  *	the read needs to be specified prior to calling this by issuing the
771  *	appropriate commands to the serial flash.
772  */
773 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
774 		    u32 *valp)
775 {
776 	int ret;
777 
778 	if (!byte_cnt || byte_cnt > 4)
779 		return -EINVAL;
780 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
781 		return -EBUSY;
782 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
783 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
784 	if (!ret)
785 		*valp = t3_read_reg(adapter, A_SF_DATA);
786 	return ret;
787 }
788 
789 /**
790  *	sf1_write - write data to the serial flash
791  *	@adapter: the adapter
792  *	@byte_cnt: number of bytes to write
793  *	@cont: whether another operation will be chained
794  *	@val: value to write
795  *
796  *	Writes up to 4 bytes of data to the serial flash.  The location of
797  *	the write needs to be specified prior to calling this by issuing the
798  *	appropriate commands to the serial flash.
799  */
800 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
801 		     u32 val)
802 {
803 	if (!byte_cnt || byte_cnt > 4)
804 		return -EINVAL;
805 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
806 		return -EBUSY;
807 	t3_write_reg(adapter, A_SF_DATA, val);
808 	t3_write_reg(adapter, A_SF_OP,
809 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
810 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
811 }
812 
813 /**
814  *	flash_wait_op - wait for a flash operation to complete
815  *	@adapter: the adapter
816  *	@attempts: max number of polls of the status register
817  *	@delay: delay between polls in ms
818  *
819  *	Wait for a flash operation to complete by polling the status register.
820  */
821 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
822 {
823 	int ret;
824 	u32 status;
825 
826 	while (1) {
827 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
828 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
829 			return ret;
830 		if (!(status & 1))
831 			return 0;
832 		if (--attempts == 0)
833 			return -EAGAIN;
834 		if (delay)
835 			msleep(delay);
836 	}
837 }
838 
839 /**
840  *	t3_read_flash - read words from serial flash
841  *	@adapter: the adapter
842  *	@addr: the start address for the read
843  *	@nwords: how many 32-bit words to read
844  *	@data: where to store the read data
845  *	@byte_oriented: whether to store data as bytes or as words
846  *
847  *	Read the specified number of 32-bit words from the serial flash.
848  *	If @byte_oriented is set the read data is stored as a byte array
849  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
850  *	natural endianess.
851  */
852 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
853 		  u32 *data, int byte_oriented)
854 {
855 	int ret;
856 
857 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
858 		return -EINVAL;
859 
860 	addr = swab32(addr) | SF_RD_DATA_FAST;
861 
862 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
863 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
864 		return ret;
865 
866 	for ( ; nwords; nwords--, data++) {
867 		ret = sf1_read(adapter, 4, nwords > 1, data);
868 		if (ret)
869 			return ret;
870 		if (byte_oriented)
871 			*data = htonl(*data);
872 	}
873 	return 0;
874 }
875 
876 /**
877  *	t3_write_flash - write up to a page of data to the serial flash
878  *	@adapter: the adapter
879  *	@addr: the start address to write
880  *	@n: length of data to write
881  *	@data: the data to write
882  *	@byte_oriented: whether to store data as bytes or as words
883  *
884  *	Writes up to a page of data (256 bytes) to the serial flash starting
885  *	at the given address.
886  *	If @byte_oriented is set the write data is stored as a 32-bit
887  *	big-endian array, otherwise in the processor's native endianess.
888  *
889  */
890 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
891 			  unsigned int n, const u8 *data,
892 			  int byte_oriented)
893 {
894 	int ret;
895 	u32 buf[64];
896 	unsigned int c, left, val, offset = addr & 0xff;
897 
898 	if (addr + n > SF_SIZE || offset + n > 256)
899 		return -EINVAL;
900 
901 	val = swab32(addr) | SF_PROG_PAGE;
902 
903 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
904 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
905 		return ret;
906 
907 	for (left = n; left; left -= c) {
908 		c = min(left, 4U);
909 		val = *(const u32*)data;
910 		data += c;
911 		if (byte_oriented)
912 			val = htonl(val);
913 
914 		ret = sf1_write(adapter, c, c != left, val);
915 		if (ret)
916 			return ret;
917 	}
918 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
919 		return ret;
920 
921 	/* Read the page to verify the write succeeded */
922 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
923 			    byte_oriented);
924 	if (ret)
925 		return ret;
926 
927 	if (memcmp(data - n, (u8 *)buf + offset, n))
928 		return -EIO;
929 	return 0;
930 }
931 
932 /**
933  *	t3_get_tp_version - read the tp sram version
934  *	@adapter: the adapter
935  *	@vers: where to place the version
936  *
937  *	Reads the protocol sram version from sram.
938  */
939 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
940 {
941 	int ret;
942 
943 	/* Get version loaded in SRAM */
944 	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
945 	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
946 			      1, 1, 5, 1);
947 	if (ret)
948 		return ret;
949 
950 	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
951 
952 	return 0;
953 }
954 
955 /**
956  *	t3_check_tpsram_version - read the tp sram version
957  *	@adapter: the adapter
958  *
959  */
960 int t3_check_tpsram_version(adapter_t *adapter, int *must_load)
961 {
962 	int ret;
963 	u32 vers;
964 	unsigned int major, minor;
965 
966 	if (adapter->params.rev == T3_REV_A)
967 		return 0;
968 
969 	*must_load = 1;
970 
971 	ret = t3_get_tp_version(adapter, &vers);
972 	if (ret)
973 		return ret;
974 
975 	vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
976 
977 	major = G_TP_VERSION_MAJOR(vers);
978 	minor = G_TP_VERSION_MINOR(vers);
979 
980 	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
981 		return 0;
982 
983 	if (major != TP_VERSION_MAJOR)
984 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
985 		       "driver needs version %d.%d\n", major, minor,
986 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
987 	else {
988 		*must_load = 0;
989 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
990 		       "driver compiled for version %d.%d\n", major, minor,
991 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
992 	}
993 	return -EINVAL;
994 }
995 
996 /**
997  *	t3_check_tpsram - check if provided protocol SRAM
998  *			  is compatible with this driver
999  *	@adapter: the adapter
1000  *	@tp_sram: the firmware image to write
1001  *	@size: image size
1002  *
1003  *	Checks if an adapter's tp sram is compatible with the driver.
1004  *	Returns 0 if the versions are compatible, a negative error otherwise.
1005  */
1006 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1007 {
1008 	u32 csum;
1009 	unsigned int i;
1010 	const u32 *p = (const u32 *)tp_sram;
1011 
1012 	/* Verify checksum */
1013 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1014 		csum += ntohl(p[i]);
1015 	if (csum != 0xffffffff) {
1016 		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1017 		       csum);
1018 		return -EINVAL;
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 enum fw_version_type {
1025 	FW_VERSION_N3,
1026 	FW_VERSION_T3
1027 };
1028 
1029 /**
1030  *	t3_get_fw_version - read the firmware version
1031  *	@adapter: the adapter
1032  *	@vers: where to place the version
1033  *
1034  *	Reads the FW version from flash.
1035  */
1036 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1037 {
1038 	int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1039 
1040 	if (!ret && *vers != 0xffffffff)
1041 		return 0;
1042 	else
1043 		return t3_read_flash(adapter, OLD_FW_VERS_ADDR, 1, vers, 0);
1044 }
1045 
1046 /**
1047  *	t3_check_fw_version - check if the FW is compatible with this driver
1048  *	@adapter: the adapter
1049  *
1050  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1051  *	if the versions are compatible, a negative error otherwise.
1052  */
1053 int t3_check_fw_version(adapter_t *adapter, int *must_load)
1054 {
1055 	int ret;
1056 	u32 vers;
1057 	unsigned int type, major, minor;
1058 
1059 	*must_load = 1;
1060 	ret = t3_get_fw_version(adapter, &vers);
1061 	if (ret)
1062 		return ret;
1063 
1064 	type = G_FW_VERSION_TYPE(vers);
1065 	major = G_FW_VERSION_MAJOR(vers);
1066 	minor = G_FW_VERSION_MINOR(vers);
1067 
1068 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1069 	    minor == FW_VERSION_MINOR)
1070 		return 0;
1071 
1072 	if (major != FW_VERSION_MAJOR)
1073 		CH_ERR(adapter, "found wrong FW version(%u.%u), "
1074 		       "driver needs version %u.%u\n", major, minor,
1075 		       FW_VERSION_MAJOR, FW_VERSION_MINOR);
1076 	else if ((int)minor < FW_VERSION_MINOR) {
1077 		*must_load = 0;
1078 		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1079 		        "driver compiled for version %u.%u\n", major, minor,
1080 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1081 	} else {
1082 		CH_WARN(adapter, "found newer FW version(%u.%u), "
1083 		        "driver compiled for version %u.%u\n", major, minor,
1084 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1085 			return 0;
1086 	}
1087 	return -EINVAL;
1088 }
1089 
1090 /**
1091  *	t3_flash_erase_sectors - erase a range of flash sectors
1092  *	@adapter: the adapter
1093  *	@start: the first sector to erase
1094  *	@end: the last sector to erase
1095  *
1096  *	Erases the sectors in the given range.
1097  */
1098 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1099 {
1100 	while (start <= end) {
1101 		int ret;
1102 
1103 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1104 		    (ret = sf1_write(adapter, 4, 0,
1105 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1106 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1107 			return ret;
1108 		start++;
1109 	}
1110 	return 0;
1111 }
1112 
1113 /*
1114  *	t3_load_fw - download firmware
1115  *	@adapter: the adapter
1116  *	@fw_data: the firmware image to write
1117  *	@size: image size
1118  *
1119  *	Write the supplied firmware image to the card's serial flash.
1120  *	The FW image has the following sections: @size - 8 bytes of code and
1121  *	data, followed by 4 bytes of FW version, followed by the 32-bit
1122  *	1's complement checksum of the whole image.
1123  */
1124 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1125 {
1126 	u32 csum;
1127 	unsigned int i;
1128 	const u32 *p = (const u32 *)fw_data;
1129 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1130 
1131 	if ((size & 3) || size < FW_MIN_SIZE)
1132 		return -EINVAL;
1133 	if (size - 8 > FW_MAX_SIZE)
1134 		return -EFBIG;
1135 
1136 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1137 		csum += ntohl(p[i]);
1138 	if (csum != 0xffffffff) {
1139 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1140 		       csum);
1141 		return -EINVAL;
1142 	}
1143 
1144 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1145 	if (ret)
1146 		goto out;
1147 
1148 	size -= 8;  /* trim off version and checksum */
1149 	for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1150 		unsigned int chunk_size = min(size, 256U);
1151 
1152 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1153 		if (ret)
1154 			goto out;
1155 
1156 		addr += chunk_size;
1157 		fw_data += chunk_size;
1158 		size -= chunk_size;
1159 	}
1160 
1161 	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data, 1);
1162 out:
1163 	if (ret)
1164 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1165 	return ret;
1166 }
1167 
1168 /*
1169  *	t3_load_boot - download boot flash
1170  *	@adapter: the adapter
1171  *	@boot_data: the boot image to write
1172  *	@size: image size
1173  *
1174  *	Write the supplied boot image to the card's serial flash.
1175  *	The boot image has the following sections: a 28-byte header and the
1176  *	boot image.
1177  */
1178 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1179 {
1180 	boot_header_t *header = (boot_header_t *)boot_data;
1181 	int ret;
1182 	unsigned int addr;
1183 	unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1184 	unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1185 
1186 	/*
1187 	 * Perform some primitive sanity testing to avoid accidentally
1188 	 * writing garbage over the boot sectors.  We ought to check for
1189 	 * more but it's not worth it for now ...
1190 	 */
1191 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1192 		CH_ERR(adapter, "boot image too small/large\n");
1193 		return -EFBIG;
1194 	}
1195 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1196 		CH_ERR(adapter, "boot image missing signature\n");
1197 		return -EINVAL;
1198 	}
1199 	if (header->length * BOOT_SIZE_INC != size) {
1200 		CH_ERR(adapter, "boot image header length != image length\n");
1201 		return -EINVAL;
1202 	}
1203 
1204 	ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1205 	if (ret)
1206 		goto out;
1207 
1208 	for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1209 		unsigned int chunk_size = min(size, 256U);
1210 
1211 		ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1212 		if (ret)
1213 			goto out;
1214 
1215 		addr += chunk_size;
1216 		boot_data += chunk_size;
1217 		size -= chunk_size;
1218 	}
1219 
1220 out:
1221 	if (ret)
1222 		CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1223 	return ret;
1224 }
1225 
1226 #define CIM_CTL_BASE 0x2000
1227 
1228 /**
1229  *	t3_cim_ctl_blk_read - read a block from CIM control region
1230  *	@adap: the adapter
1231  *	@addr: the start address within the CIM control region
1232  *	@n: number of words to read
1233  *	@valp: where to store the result
1234  *
1235  *	Reads a block of 4-byte words from the CIM control region.
1236  */
1237 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1238 			unsigned int *valp)
1239 {
1240 	int ret = 0;
1241 
1242 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1243 		return -EBUSY;
1244 
1245 	for ( ; !ret && n--; addr += 4) {
1246 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1247 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1248 				      0, 5, 2);
1249 		if (!ret)
1250 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1251 	}
1252 	return ret;
1253 }
1254 
1255 /**
1256  *	t3_link_changed - handle interface link changes
1257  *	@adapter: the adapter
1258  *	@port_id: the port index that changed link state
1259  *
1260  *	Called when a port's link settings change to propagate the new values
1261  *	to the associated PHY and MAC.  After performing the common tasks it
1262  *	invokes an OS-specific handler.
1263  */
1264 void t3_link_changed(adapter_t *adapter, int port_id)
1265 {
1266 	int link_ok, speed, duplex, fc;
1267 	struct port_info *pi = adap2pinfo(adapter, port_id);
1268 	struct cphy *phy = &pi->phy;
1269 	struct cmac *mac = &pi->mac;
1270 	struct link_config *lc = &pi->link_config;
1271 
1272 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1273 
1274 	if (lc->requested_fc & PAUSE_AUTONEG)
1275 		fc &= lc->requested_fc;
1276 	else
1277 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1278 
1279 	if (link_ok == lc->link_ok && speed == lc->speed &&
1280 	    duplex == lc->duplex && fc == lc->fc)
1281 		return;                            /* nothing changed */
1282 
1283 	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1284 	    uses_xaui(adapter)) {
1285 		if (link_ok)
1286 			t3b_pcs_reset(mac);
1287 		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1288 			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1289 	}
1290 	lc->link_ok = (unsigned char)link_ok;
1291 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1292 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1293 
1294 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1295 		/* Set MAC speed, duplex, and flow control to match PHY. */
1296 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1297 		lc->fc = (unsigned char)fc;
1298 	}
1299 
1300 	t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1301 }
1302 
1303 /**
1304  *	t3_link_start - apply link configuration to MAC/PHY
1305  *	@phy: the PHY to setup
1306  *	@mac: the MAC to setup
1307  *	@lc: the requested link configuration
1308  *
1309  *	Set up a port's MAC and PHY according to a desired link configuration.
1310  *	- If the PHY can auto-negotiate first decide what to advertise, then
1311  *	  enable/disable auto-negotiation as desired, and reset.
1312  *	- If the PHY does not auto-negotiate just reset it.
1313  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1314  *	  otherwise do it later based on the outcome of auto-negotiation.
1315  */
1316 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1317 {
1318 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1319 
1320 	lc->link_ok = 0;
1321 	if (lc->supported & SUPPORTED_Autoneg) {
1322 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1323 		if (fc) {
1324 			lc->advertising |= ADVERTISED_Asym_Pause;
1325 			if (fc & PAUSE_RX)
1326 				lc->advertising |= ADVERTISED_Pause;
1327 		}
1328 		phy->ops->advertise(phy, lc->advertising);
1329 
1330 		if (lc->autoneg == AUTONEG_DISABLE) {
1331 			lc->speed = lc->requested_speed;
1332 			lc->duplex = lc->requested_duplex;
1333 			lc->fc = (unsigned char)fc;
1334 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1335 						   fc);
1336 			/* Also disables autoneg */
1337 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1338 		} else
1339 			phy->ops->autoneg_enable(phy);
1340 	} else {
1341 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1342 		lc->fc = (unsigned char)fc;
1343 		phy->ops->reset(phy, 0);
1344 	}
1345 	return 0;
1346 }
1347 
1348 /**
1349  *	t3_set_vlan_accel - control HW VLAN extraction
1350  *	@adapter: the adapter
1351  *	@ports: bitmap of adapter ports to operate on
1352  *	@on: enable (1) or disable (0) HW VLAN extraction
1353  *
1354  *	Enables or disables HW extraction of VLAN tags for the given port.
1355  */
1356 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1357 {
1358 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1359 			 ports << S_VLANEXTRACTIONENABLE,
1360 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1361 }
1362 
1363 struct intr_info {
1364 	unsigned int mask;       /* bits to check in interrupt status */
1365 	const char *msg;         /* message to print or NULL */
1366 	short stat_idx;          /* stat counter to increment or -1 */
1367 	unsigned short fatal;    /* whether the condition reported is fatal */
1368 };
1369 
1370 /**
1371  *	t3_handle_intr_status - table driven interrupt handler
1372  *	@adapter: the adapter that generated the interrupt
1373  *	@reg: the interrupt status register to process
1374  *	@mask: a mask to apply to the interrupt status
1375  *	@acts: table of interrupt actions
1376  *	@stats: statistics counters tracking interrupt occurences
1377  *
1378  *	A table driven interrupt handler that applies a set of masks to an
1379  *	interrupt status word and performs the corresponding actions if the
1380  *	interrupts described by the mask have occured.  The actions include
1381  *	optionally printing a warning or alert message, and optionally
1382  *	incrementing a stat counter.  The table is terminated by an entry
1383  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1384  */
1385 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1386 				 unsigned int mask,
1387 				 const struct intr_info *acts,
1388 				 unsigned long *stats)
1389 {
1390 	int fatal = 0;
1391 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1392 
1393 	for ( ; acts->mask; ++acts) {
1394 		if (!(status & acts->mask)) continue;
1395 		if (acts->fatal) {
1396 			fatal++;
1397 			CH_ALERT(adapter, "%s (0x%x)\n",
1398 				 acts->msg, status & acts->mask);
1399 		} else if (acts->msg)
1400 			CH_WARN(adapter, "%s (0x%x)\n",
1401 				acts->msg, status & acts->mask);
1402 		if (acts->stat_idx >= 0)
1403 			stats[acts->stat_idx]++;
1404 	}
1405 	if (status)                           /* clear processed interrupts */
1406 		t3_write_reg(adapter, reg, status);
1407 	return fatal;
1408 }
1409 
1410 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1411 		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1412 		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1413 		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1414 		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1415 		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1416 		       F_HIRCQPARITYERROR)
1417 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1418 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1419 		       F_NFASRCHFAIL)
1420 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1421 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1422 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1423 		       F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1424 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1425 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1426 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1427 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1428 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1429 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1430 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1431 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1432 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1433 			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1434 			F_TXPARERR | V_BISTERR(M_BISTERR))
1435 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1436 			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1437 			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1438 #define ULPTX_INTR_MASK 0xfc
1439 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1440 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1441 			 F_ZERO_SWITCH_ERROR)
1442 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1443 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1444 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1445 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1446 		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1447 		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1448 		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1449 		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1450 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1451 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1452 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1453 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1454 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1455 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1456 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1457 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1458 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1459 		       V_MCAPARERRENB(M_MCAPARERRENB))
1460 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1461 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1462 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1463 		      F_MPS0 | F_CPL_SWITCH)
1464 
1465 /*
1466  * Interrupt handler for the PCIX1 module.
1467  */
1468 static void pci_intr_handler(adapter_t *adapter)
1469 {
1470 	static struct intr_info pcix1_intr_info[] = {
1471 		{ F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1472 		{ F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1473 		{ F_RCVTARABT, "PCI received target abort", -1, 1 },
1474 		{ F_RCVMSTABT, "PCI received master abort", -1, 1 },
1475 		{ F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1476 		{ F_DETPARERR, "PCI detected parity error", -1, 1 },
1477 		{ F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1478 		{ F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1479 		{ F_RCVSPLCMPERR, "PCI received split completion error", -1,
1480 		  1 },
1481 		{ F_DETCORECCERR, "PCI correctable ECC error",
1482 		  STAT_PCI_CORR_ECC, 0 },
1483 		{ F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1484 		{ F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1485 		{ V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1486 		  1 },
1487 		{ V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1488 		  1 },
1489 		{ V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1490 		  1 },
1491 		{ V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1492 		  "error", -1, 1 },
1493 		{ 0 }
1494 	};
1495 
1496 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1497 				  pcix1_intr_info, adapter->irq_stats))
1498 		t3_fatal_err(adapter);
1499 }
1500 
1501 /*
1502  * Interrupt handler for the PCIE module.
1503  */
1504 static void pcie_intr_handler(adapter_t *adapter)
1505 {
1506 	static struct intr_info pcie_intr_info[] = {
1507 		{ F_PEXERR, "PCI PEX error", -1, 1 },
1508 		{ F_UNXSPLCPLERRR,
1509 		  "PCI unexpected split completion DMA read error", -1, 1 },
1510 		{ F_UNXSPLCPLERRC,
1511 		  "PCI unexpected split completion DMA command error", -1, 1 },
1512 		{ F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1513 		{ F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1514 		{ F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1515 		{ F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1516 		{ V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1517 		  "PCI MSI-X table/PBA parity error", -1, 1 },
1518 		{ F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1519 		{ F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1520 		{ F_RXPARERR, "PCI Rx parity error", -1, 1 },
1521 		{ F_TXPARERR, "PCI Tx parity error", -1, 1 },
1522 		{ V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1523 		{ 0 }
1524 	};
1525 
1526 	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1527 		CH_ALERT(adapter, "PEX error code 0x%x\n",
1528 			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1529 
1530 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1531 				  pcie_intr_info, adapter->irq_stats))
1532 		t3_fatal_err(adapter);
1533 }
1534 
1535 /*
1536  * TP interrupt handler.
1537  */
1538 static void tp_intr_handler(adapter_t *adapter)
1539 {
1540 	static struct intr_info tp_intr_info[] = {
1541 		{ 0xffffff,  "TP parity error", -1, 1 },
1542 		{ 0x1000000, "TP out of Rx pages", -1, 1 },
1543 		{ 0x2000000, "TP out of Tx pages", -1, 1 },
1544 		{ 0 }
1545 	};
1546 	static struct intr_info tp_intr_info_t3c[] = {
1547 		{ 0x1fffffff,  "TP parity error", -1, 1 },
1548 		{ F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1549 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1550 		{ 0 }
1551 	};
1552 
1553 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1554 				  adapter->params.rev < T3_REV_C ?
1555 					tp_intr_info : tp_intr_info_t3c, NULL))
1556 		t3_fatal_err(adapter);
1557 }
1558 
1559 /*
1560  * CIM interrupt handler.
1561  */
1562 static void cim_intr_handler(adapter_t *adapter)
1563 {
1564 	static struct intr_info cim_intr_info[] = {
1565 		{ F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1566 		{ F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1567 		{ F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1568 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1569 		{ F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1570 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1571 		{ F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1572 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1573 		{ F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1574 		{ F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1575 		{ F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1576 		{ F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1577 		{ F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1578 		{ F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1579 		{ F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1580 		{ F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1581 		{ F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1582 		{ F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1583 		{ F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1584 		{ F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1585 		{ F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1586 		{ F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1587 		{ F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1588 		{ F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1589 		{ 0 }
1590         };
1591 
1592 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1593 				  cim_intr_info, NULL))
1594 		t3_fatal_err(adapter);
1595 }
1596 
1597 /*
1598  * ULP RX interrupt handler.
1599  */
1600 static void ulprx_intr_handler(adapter_t *adapter)
1601 {
1602 	static struct intr_info ulprx_intr_info[] = {
1603 		{ F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1604 		{ F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1605 		{ F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1606 		{ F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1607 		{ F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1608 		{ F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1609 		{ F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1610 		{ F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1611 		{ 0 }
1612         };
1613 
1614 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1615 				  ulprx_intr_info, NULL))
1616 		t3_fatal_err(adapter);
1617 }
1618 
1619 /*
1620  * ULP TX interrupt handler.
1621  */
1622 static void ulptx_intr_handler(adapter_t *adapter)
1623 {
1624 	static struct intr_info ulptx_intr_info[] = {
1625 		{ F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1626 		  STAT_ULP_CH0_PBL_OOB, 0 },
1627 		{ F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1628 		  STAT_ULP_CH1_PBL_OOB, 0 },
1629 		{ 0xfc, "ULP TX parity error", -1, 1 },
1630 		{ 0 }
1631         };
1632 
1633 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1634 				  ulptx_intr_info, adapter->irq_stats))
1635 		t3_fatal_err(adapter);
1636 }
1637 
1638 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1639 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1640 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1641 	F_ICSPI1_TX_FRAMING_ERROR)
1642 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1643 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1644 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1645 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1646 
1647 /*
1648  * PM TX interrupt handler.
1649  */
1650 static void pmtx_intr_handler(adapter_t *adapter)
1651 {
1652 	static struct intr_info pmtx_intr_info[] = {
1653 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1654 		{ ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1655 		{ OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1656 		{ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1657 		  "PMTX ispi parity error", -1, 1 },
1658 		{ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1659 		  "PMTX ospi parity error", -1, 1 },
1660 		{ 0 }
1661         };
1662 
1663 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1664 				  pmtx_intr_info, NULL))
1665 		t3_fatal_err(adapter);
1666 }
1667 
1668 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1669 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1670 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1671 	F_IESPI1_TX_FRAMING_ERROR)
1672 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1673 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1674 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1675 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1676 
1677 /*
1678  * PM RX interrupt handler.
1679  */
1680 static void pmrx_intr_handler(adapter_t *adapter)
1681 {
1682 	static struct intr_info pmrx_intr_info[] = {
1683 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1684 		{ IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1685 		{ OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1686 		{ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1687 		  "PMRX ispi parity error", -1, 1 },
1688 		{ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1689 		  "PMRX ospi parity error", -1, 1 },
1690 		{ 0 }
1691         };
1692 
1693 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1694 				  pmrx_intr_info, NULL))
1695 		t3_fatal_err(adapter);
1696 }
1697 
1698 /*
1699  * CPL switch interrupt handler.
1700  */
1701 static void cplsw_intr_handler(adapter_t *adapter)
1702 {
1703 	static struct intr_info cplsw_intr_info[] = {
1704 		{ F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
1705 		{ F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1706 		{ F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1707 		{ F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1708 		{ F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1709 		{ F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1710 		{ 0 }
1711         };
1712 
1713 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1714 				  cplsw_intr_info, NULL))
1715 		t3_fatal_err(adapter);
1716 }
1717 
1718 /*
1719  * MPS interrupt handler.
1720  */
1721 static void mps_intr_handler(adapter_t *adapter)
1722 {
1723 	static struct intr_info mps_intr_info[] = {
1724 		{ 0x1ff, "MPS parity error", -1, 1 },
1725 		{ 0 }
1726 	};
1727 
1728 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1729 				  mps_intr_info, NULL))
1730 		t3_fatal_err(adapter);
1731 }
1732 
1733 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1734 
1735 /*
1736  * MC7 interrupt handler.
1737  */
1738 static void mc7_intr_handler(struct mc7 *mc7)
1739 {
1740 	adapter_t *adapter = mc7->adapter;
1741 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1742 
1743 	if (cause & F_CE) {
1744 		mc7->stats.corr_err++;
1745 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1746 			"data 0x%x 0x%x 0x%x\n", mc7->name,
1747 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1748 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1749 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1750 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1751 	}
1752 
1753 	if (cause & F_UE) {
1754 		mc7->stats.uncorr_err++;
1755 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1756 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1757 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1758 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1759 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1760 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1761 	}
1762 
1763 	if (G_PE(cause)) {
1764 		mc7->stats.parity_err++;
1765 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1766 			 mc7->name, G_PE(cause));
1767 	}
1768 
1769 	if (cause & F_AE) {
1770 		u32 addr = 0;
1771 
1772 		if (adapter->params.rev > 0)
1773 			addr = t3_read_reg(adapter,
1774 					   mc7->offset + A_MC7_ERR_ADDR);
1775 		mc7->stats.addr_err++;
1776 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1777 			 mc7->name, addr);
1778 	}
1779 
1780 	if (cause & MC7_INTR_FATAL)
1781 		t3_fatal_err(adapter);
1782 
1783 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1784 }
1785 
1786 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1787 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1788 /*
1789  * XGMAC interrupt handler.
1790  */
1791 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1792 {
1793 	u32 cause;
1794 	struct cmac *mac;
1795 
1796 	idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
1797 	mac = &adap2pinfo(adap, idx)->mac;
1798 	cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1799 
1800 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1801 		mac->stats.tx_fifo_parity_err++;
1802 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1803 	}
1804 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1805 		mac->stats.rx_fifo_parity_err++;
1806 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1807 	}
1808 	if (cause & F_TXFIFO_UNDERRUN)
1809 		mac->stats.tx_fifo_urun++;
1810 	if (cause & F_RXFIFO_OVERFLOW)
1811 		mac->stats.rx_fifo_ovfl++;
1812 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1813 		mac->stats.serdes_signal_loss++;
1814 	if (cause & F_XAUIPCSCTCERR)
1815 		mac->stats.xaui_pcs_ctc_err++;
1816 	if (cause & F_XAUIPCSALIGNCHANGE)
1817 		mac->stats.xaui_pcs_align_change++;
1818 
1819 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1820 	if (cause & XGM_INTR_FATAL)
1821 		t3_fatal_err(adap);
1822 	return cause != 0;
1823 }
1824 
1825 /*
1826  * Interrupt handler for PHY events.
1827  */
1828 int t3_phy_intr_handler(adapter_t *adapter)
1829 {
1830 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1831 
1832 	for_each_port(adapter, i) {
1833 		struct port_info *p = adap2pinfo(adapter, i);
1834 
1835 		if (!(p->phy.caps & SUPPORTED_IRQ))
1836 			continue;
1837 
1838 		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1839 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1840 
1841 			if (phy_cause & cphy_cause_link_change)
1842 				t3_link_changed(adapter, i);
1843 			if (phy_cause & cphy_cause_fifo_error)
1844 				p->phy.fifo_errors++;
1845 			if (phy_cause & cphy_cause_module_change)
1846 				t3_os_phymod_changed(adapter, i);
1847 		}
1848 	}
1849 
1850 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1851 	return 0;
1852 }
1853 
1854 /**
1855  *	t3_slow_intr_handler - control path interrupt handler
1856  *	@adapter: the adapter
1857  *
1858  *	T3 interrupt handler for non-data interrupt events, e.g., errors.
1859  *	The designation 'slow' is because it involves register reads, while
1860  *	data interrupts typically don't involve any MMIOs.
1861  */
1862 int t3_slow_intr_handler(adapter_t *adapter)
1863 {
1864 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1865 
1866 	cause &= adapter->slow_intr_mask;
1867 	if (!cause)
1868 		return 0;
1869 	if (cause & F_PCIM0) {
1870 		if (is_pcie(adapter))
1871 			pcie_intr_handler(adapter);
1872 		else
1873 			pci_intr_handler(adapter);
1874 	}
1875 	if (cause & F_SGE3)
1876 		t3_sge_err_intr_handler(adapter);
1877 	if (cause & F_MC7_PMRX)
1878 		mc7_intr_handler(&adapter->pmrx);
1879 	if (cause & F_MC7_PMTX)
1880 		mc7_intr_handler(&adapter->pmtx);
1881 	if (cause & F_MC7_CM)
1882 		mc7_intr_handler(&adapter->cm);
1883 	if (cause & F_CIM)
1884 		cim_intr_handler(adapter);
1885 	if (cause & F_TP1)
1886 		tp_intr_handler(adapter);
1887 	if (cause & F_ULP2_RX)
1888 		ulprx_intr_handler(adapter);
1889 	if (cause & F_ULP2_TX)
1890 		ulptx_intr_handler(adapter);
1891 	if (cause & F_PM1_RX)
1892 		pmrx_intr_handler(adapter);
1893 	if (cause & F_PM1_TX)
1894 		pmtx_intr_handler(adapter);
1895 	if (cause & F_CPL_SWITCH)
1896 		cplsw_intr_handler(adapter);
1897 	if (cause & F_MPS0)
1898 		mps_intr_handler(adapter);
1899 	if (cause & F_MC5A)
1900 		t3_mc5_intr_handler(&adapter->mc5);
1901 	if (cause & F_XGMAC0_0)
1902 		mac_intr_handler(adapter, 0);
1903 	if (cause & F_XGMAC0_1)
1904 		mac_intr_handler(adapter, 1);
1905 	if (cause & F_T3DBG)
1906 		t3_os_ext_intr_handler(adapter);
1907 
1908 	/* Clear the interrupts just processed. */
1909 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1910 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1911 	return 1;
1912 }
1913 
1914 static unsigned int calc_gpio_intr(adapter_t *adap)
1915 {
1916 	unsigned int i, gpi_intr = 0;
1917 
1918 	for_each_port(adap, i)
1919 		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1920 		    adapter_info(adap)->gpio_intr[i])
1921 			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1922 	return gpi_intr;
1923 }
1924 
1925 /**
1926  *	t3_intr_enable - enable interrupts
1927  *	@adapter: the adapter whose interrupts should be enabled
1928  *
1929  *	Enable interrupts by setting the interrupt enable registers of the
1930  *	various HW modules and then enabling the top-level interrupt
1931  *	concentrator.
1932  */
1933 void t3_intr_enable(adapter_t *adapter)
1934 {
1935 	static struct addr_val_pair intr_en_avp[] = {
1936 		{ A_MC7_INT_ENABLE, MC7_INTR_MASK },
1937 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1938 			MC7_INTR_MASK },
1939 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1940 			MC7_INTR_MASK },
1941 		{ A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1942 		{ A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1943 		{ A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1944 		{ A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1945 		{ A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1946 		{ A_MPS_INT_ENABLE, MPS_INTR_MASK },
1947 	};
1948 
1949 	adapter->slow_intr_mask = PL_INTR_MASK;
1950 
1951 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1952 	t3_write_reg(adapter, A_TP_INT_ENABLE,
1953 		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1954 	t3_write_reg(adapter, A_SG_INT_ENABLE,
1955 		     adapter->params.rev >= T3_REV_C ?
1956 		     SGE_INTR_MASK | F_FLEMPTY : SGE_INTR_MASK);
1957 
1958 	if (adapter->params.rev > 0) {
1959 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1960 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1961 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1962 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1963 			     F_PBL_BOUND_ERR_CH1);
1964 	} else {
1965 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1966 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1967 	}
1968 
1969 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1970 
1971 	if (is_pcie(adapter))
1972 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1973 	else
1974 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1975 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1976 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);          /* flush */
1977 }
1978 
1979 /**
1980  *	t3_intr_disable - disable a card's interrupts
1981  *	@adapter: the adapter whose interrupts should be disabled
1982  *
1983  *	Disable interrupts.  We only disable the top-level interrupt
1984  *	concentrator and the SGE data interrupts.
1985  */
1986 void t3_intr_disable(adapter_t *adapter)
1987 {
1988 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1989 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);  /* flush */
1990 	adapter->slow_intr_mask = 0;
1991 }
1992 
1993 /**
1994  *	t3_intr_clear - clear all interrupts
1995  *	@adapter: the adapter whose interrupts should be cleared
1996  *
1997  *	Clears all interrupts.
1998  */
1999 void t3_intr_clear(adapter_t *adapter)
2000 {
2001 	static const unsigned int cause_reg_addr[] = {
2002 		A_SG_INT_CAUSE,
2003 		A_SG_RSPQ_FL_STATUS,
2004 		A_PCIX_INT_CAUSE,
2005 		A_MC7_INT_CAUSE,
2006 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2007 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2008 		A_CIM_HOST_INT_CAUSE,
2009 		A_TP_INT_CAUSE,
2010 		A_MC5_DB_INT_CAUSE,
2011 		A_ULPRX_INT_CAUSE,
2012 		A_ULPTX_INT_CAUSE,
2013 		A_CPL_INTR_CAUSE,
2014 		A_PM1_TX_INT_CAUSE,
2015 		A_PM1_RX_INT_CAUSE,
2016 		A_MPS_INT_CAUSE,
2017 		A_T3DBG_INT_CAUSE,
2018 	};
2019 	unsigned int i;
2020 
2021 	/* Clear PHY and MAC interrupts for each port. */
2022 	for_each_port(adapter, i)
2023 		t3_port_intr_clear(adapter, i);
2024 
2025 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2026 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2027 
2028 	if (is_pcie(adapter))
2029 		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2030 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2031 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0);          /* flush */
2032 }
2033 
2034 /**
2035  *	t3_port_intr_enable - enable port-specific interrupts
2036  *	@adapter: associated adapter
2037  *	@idx: index of port whose interrupts should be enabled
2038  *
2039  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2040  *	adapter port.
2041  */
2042 void t3_port_intr_enable(adapter_t *adapter, int idx)
2043 {
2044 	struct port_info *pi = adap2pinfo(adapter, idx);
2045 
2046 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2047 	pi->phy.ops->intr_enable(&pi->phy);
2048 }
2049 
2050 /**
2051  *	t3_port_intr_disable - disable port-specific interrupts
2052  *	@adapter: associated adapter
2053  *	@idx: index of port whose interrupts should be disabled
2054  *
2055  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2056  *	adapter port.
2057  */
2058 void t3_port_intr_disable(adapter_t *adapter, int idx)
2059 {
2060 	struct port_info *pi = adap2pinfo(adapter, idx);
2061 
2062 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2063 	pi->phy.ops->intr_disable(&pi->phy);
2064 }
2065 
2066 /**
2067  *	t3_port_intr_clear - clear port-specific interrupts
2068  *	@adapter: associated adapter
2069  *	@idx: index of port whose interrupts to clear
2070  *
2071  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2072  *	adapter port.
2073  */
2074 void t3_port_intr_clear(adapter_t *adapter, int idx)
2075 {
2076 	struct port_info *pi = adap2pinfo(adapter, idx);
2077 
2078 	t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2079 	pi->phy.ops->intr_clear(&pi->phy);
2080 }
2081 
2082 #define SG_CONTEXT_CMD_ATTEMPTS 100
2083 
2084 /**
2085  * 	t3_sge_write_context - write an SGE context
2086  * 	@adapter: the adapter
2087  * 	@id: the context id
2088  * 	@type: the context type
2089  *
2090  * 	Program an SGE context with the values already loaded in the
2091  * 	CONTEXT_DATA? registers.
2092  */
2093 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2094 				unsigned int type)
2095 {
2096 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2097 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2098 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2099 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2100 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2101 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2102 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2103 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2104 }
2105 
2106 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2107 {
2108 	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2109 	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2110 	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2111 	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2112 	return t3_sge_write_context(adap, id, type);
2113 }
2114 
2115 /**
2116  *	t3_sge_init_ecntxt - initialize an SGE egress context
2117  *	@adapter: the adapter to configure
2118  *	@id: the context id
2119  *	@gts_enable: whether to enable GTS for the context
2120  *	@type: the egress context type
2121  *	@respq: associated response queue
2122  *	@base_addr: base address of queue
2123  *	@size: number of queue entries
2124  *	@token: uP token
2125  *	@gen: initial generation value for the context
2126  *	@cidx: consumer pointer
2127  *
2128  *	Initialize an SGE egress context and make it ready for use.  If the
2129  *	platform allows concurrent context operations, the caller is
2130  *	responsible for appropriate locking.
2131  */
2132 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2133 		       enum sge_context_type type, int respq, u64 base_addr,
2134 		       unsigned int size, unsigned int token, int gen,
2135 		       unsigned int cidx)
2136 {
2137 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2138 
2139 	if (base_addr & 0xfff)     /* must be 4K aligned */
2140 		return -EINVAL;
2141 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2142 		return -EBUSY;
2143 
2144 	base_addr >>= 12;
2145 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2146 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2147 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2148 		     V_EC_BASE_LO((u32)base_addr & 0xffff));
2149 	base_addr >>= 16;
2150 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2151 	base_addr >>= 32;
2152 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2153 		     V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2154 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2155 		     F_EC_VALID);
2156 	return t3_sge_write_context(adapter, id, F_EGRESS);
2157 }
2158 
2159 /**
2160  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2161  *	@adapter: the adapter to configure
2162  *	@id: the context id
2163  *	@gts_enable: whether to enable GTS for the context
2164  *	@base_addr: base address of queue
2165  *	@size: number of queue entries
2166  *	@bsize: size of each buffer for this queue
2167  *	@cong_thres: threshold to signal congestion to upstream producers
2168  *	@gen: initial generation value for the context
2169  *	@cidx: consumer pointer
2170  *
2171  *	Initialize an SGE free list context and make it ready for use.  The
2172  *	caller is responsible for ensuring only one context operation occurs
2173  *	at a time.
2174  */
2175 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2176 			u64 base_addr, unsigned int size, unsigned int bsize,
2177 			unsigned int cong_thres, int gen, unsigned int cidx)
2178 {
2179 	if (base_addr & 0xfff)     /* must be 4K aligned */
2180 		return -EINVAL;
2181 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2182 		return -EBUSY;
2183 
2184 	base_addr >>= 12;
2185 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2186 	base_addr >>= 32;
2187 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2188 		     V_FL_BASE_HI((u32)base_addr) |
2189 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2190 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2191 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2192 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2193 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2194 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2195 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2196 	return t3_sge_write_context(adapter, id, F_FREELIST);
2197 }
2198 
2199 /**
2200  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2201  *	@adapter: the adapter to configure
2202  *	@id: the context id
2203  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2204  *	@base_addr: base address of queue
2205  *	@size: number of queue entries
2206  *	@fl_thres: threshold for selecting the normal or jumbo free list
2207  *	@gen: initial generation value for the context
2208  *	@cidx: consumer pointer
2209  *
2210  *	Initialize an SGE response queue context and make it ready for use.
2211  *	The caller is responsible for ensuring only one context operation
2212  *	occurs at a time.
2213  */
2214 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2215 			 u64 base_addr, unsigned int size,
2216 			 unsigned int fl_thres, int gen, unsigned int cidx)
2217 {
2218 	unsigned int intr = 0;
2219 
2220 	if (base_addr & 0xfff)     /* must be 4K aligned */
2221 		return -EINVAL;
2222 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2223 		return -EBUSY;
2224 
2225 	base_addr >>= 12;
2226 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2227 		     V_CQ_INDEX(cidx));
2228 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2229 	base_addr >>= 32;
2230 	if (irq_vec_idx >= 0)
2231 		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2232 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2233 		     V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2234 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2235 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2236 }
2237 
2238 /**
2239  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2240  *	@adapter: the adapter to configure
2241  *	@id: the context id
2242  *	@base_addr: base address of queue
2243  *	@size: number of queue entries
2244  *	@rspq: response queue for async notifications
2245  *	@ovfl_mode: CQ overflow mode
2246  *	@credits: completion queue credits
2247  *	@credit_thres: the credit threshold
2248  *
2249  *	Initialize an SGE completion queue context and make it ready for use.
2250  *	The caller is responsible for ensuring only one context operation
2251  *	occurs at a time.
2252  */
2253 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2254 			unsigned int size, int rspq, int ovfl_mode,
2255 			unsigned int credits, unsigned int credit_thres)
2256 {
2257 	if (base_addr & 0xfff)     /* must be 4K aligned */
2258 		return -EINVAL;
2259 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2260 		return -EBUSY;
2261 
2262 	base_addr >>= 12;
2263 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2264 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2265 	base_addr >>= 32;
2266 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2267 		     V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2268 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2269 		     V_CQ_ERR(ovfl_mode));
2270 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2271 		     V_CQ_CREDIT_THRES(credit_thres));
2272 	return t3_sge_write_context(adapter, id, F_CQ);
2273 }
2274 
2275 /**
2276  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2277  *	@adapter: the adapter
2278  *	@id: the egress context id
2279  *	@enable: enable (1) or disable (0) the context
2280  *
2281  *	Enable or disable an SGE egress context.  The caller is responsible for
2282  *	ensuring only one context operation occurs at a time.
2283  */
2284 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2285 {
2286 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2287 		return -EBUSY;
2288 
2289 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2290 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2291 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2292 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2293 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2294 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2295 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2296 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2297 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2298 }
2299 
2300 /**
2301  *	t3_sge_disable_fl - disable an SGE free-buffer list
2302  *	@adapter: the adapter
2303  *	@id: the free list context id
2304  *
2305  *	Disable an SGE free-buffer list.  The caller is responsible for
2306  *	ensuring only one context operation occurs at a time.
2307  */
2308 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2309 {
2310 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2311 		return -EBUSY;
2312 
2313 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2314 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2315 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2316 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2317 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2318 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2319 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2320 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2321 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2322 }
2323 
2324 /**
2325  *	t3_sge_disable_rspcntxt - disable an SGE response queue
2326  *	@adapter: the adapter
2327  *	@id: the response queue context id
2328  *
2329  *	Disable an SGE response queue.  The caller is responsible for
2330  *	ensuring only one context operation occurs at a time.
2331  */
2332 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2333 {
2334 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2335 		return -EBUSY;
2336 
2337 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2338 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2339 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2340 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2341 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2342 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2343 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2344 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2345 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2346 }
2347 
2348 /**
2349  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2350  *	@adapter: the adapter
2351  *	@id: the completion queue context id
2352  *
2353  *	Disable an SGE completion queue.  The caller is responsible for
2354  *	ensuring only one context operation occurs at a time.
2355  */
2356 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2357 {
2358 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2359 		return -EBUSY;
2360 
2361 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2362 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2363 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2364 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2365 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2366 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2367 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2368 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2369 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2370 }
2371 
2372 /**
2373  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2374  *	@adapter: the adapter
2375  *	@id: the context id
2376  *	@op: the operation to perform
2377  *	@credits: credits to return to the CQ
2378  *
2379  *	Perform the selected operation on an SGE completion queue context.
2380  *	The caller is responsible for ensuring only one context operation
2381  *	occurs at a time.
2382  *
2383  *	For most operations the function returns the current HW position in
2384  *	the completion queue.
2385  */
2386 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2387 		      unsigned int credits)
2388 {
2389 	u32 val;
2390 
2391 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2392 		return -EBUSY;
2393 
2394 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2395 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2396 		     V_CONTEXT(id) | F_CQ);
2397 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2398 				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2399 		return -EIO;
2400 
2401 	if (op >= 2 && op < 7) {
2402 		if (adapter->params.rev > 0)
2403 			return G_CQ_INDEX(val);
2404 
2405 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2406 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2407 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2408 				    F_CONTEXT_CMD_BUSY, 0,
2409 				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2410 			return -EIO;
2411 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2412 	}
2413 	return 0;
2414 }
2415 
2416 /**
2417  * 	t3_sge_read_context - read an SGE context
2418  * 	@type: the context type
2419  * 	@adapter: the adapter
2420  * 	@id: the context id
2421  * 	@data: holds the retrieved context
2422  *
2423  * 	Read an SGE egress context.  The caller is responsible for ensuring
2424  * 	only one context operation occurs at a time.
2425  */
2426 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2427 			       unsigned int id, u32 data[4])
2428 {
2429 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2430 		return -EBUSY;
2431 
2432 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2433 		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2434 	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2435 			    SG_CONTEXT_CMD_ATTEMPTS, 1))
2436 		return -EIO;
2437 	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2438 	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2439 	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2440 	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2441 	return 0;
2442 }
2443 
2444 /**
2445  * 	t3_sge_read_ecntxt - read an SGE egress context
2446  * 	@adapter: the adapter
2447  * 	@id: the context id
2448  * 	@data: holds the retrieved context
2449  *
2450  * 	Read an SGE egress context.  The caller is responsible for ensuring
2451  * 	only one context operation occurs at a time.
2452  */
2453 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2454 {
2455 	if (id >= 65536)
2456 		return -EINVAL;
2457 	return t3_sge_read_context(F_EGRESS, adapter, id, data);
2458 }
2459 
2460 /**
2461  * 	t3_sge_read_cq - read an SGE CQ context
2462  * 	@adapter: the adapter
2463  * 	@id: the context id
2464  * 	@data: holds the retrieved context
2465  *
2466  * 	Read an SGE CQ context.  The caller is responsible for ensuring
2467  * 	only one context operation occurs at a time.
2468  */
2469 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2470 {
2471 	if (id >= 65536)
2472 		return -EINVAL;
2473 	return t3_sge_read_context(F_CQ, adapter, id, data);
2474 }
2475 
2476 /**
2477  * 	t3_sge_read_fl - read an SGE free-list context
2478  * 	@adapter: the adapter
2479  * 	@id: the context id
2480  * 	@data: holds the retrieved context
2481  *
2482  * 	Read an SGE free-list context.  The caller is responsible for ensuring
2483  * 	only one context operation occurs at a time.
2484  */
2485 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2486 {
2487 	if (id >= SGE_QSETS * 2)
2488 		return -EINVAL;
2489 	return t3_sge_read_context(F_FREELIST, adapter, id, data);
2490 }
2491 
2492 /**
2493  * 	t3_sge_read_rspq - read an SGE response queue context
2494  * 	@adapter: the adapter
2495  * 	@id: the context id
2496  * 	@data: holds the retrieved context
2497  *
2498  * 	Read an SGE response queue context.  The caller is responsible for
2499  * 	ensuring only one context operation occurs at a time.
2500  */
2501 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2502 {
2503 	if (id >= SGE_QSETS)
2504 		return -EINVAL;
2505 	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2506 }
2507 
2508 /**
2509  *	t3_config_rss - configure Rx packet steering
2510  *	@adapter: the adapter
2511  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2512  *	@cpus: values for the CPU lookup table (0xff terminated)
2513  *	@rspq: values for the response queue lookup table (0xffff terminated)
2514  *
2515  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2516  *	the values for the CPU and response queue lookup tables.  If they
2517  *	provide fewer values than the size of the tables the supplied values
2518  *	are used repeatedly until the tables are fully populated.
2519  */
2520 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2521 		   const u16 *rspq)
2522 {
2523 	int i, j, cpu_idx = 0, q_idx = 0;
2524 
2525 	if (cpus)
2526 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2527 			u32 val = i << 16;
2528 
2529 			for (j = 0; j < 2; ++j) {
2530 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2531 				if (cpus[cpu_idx] == 0xff)
2532 					cpu_idx = 0;
2533 			}
2534 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2535 		}
2536 
2537 	if (rspq)
2538 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2539 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2540 				     (i << 16) | rspq[q_idx++]);
2541 			if (rspq[q_idx] == 0xffff)
2542 				q_idx = 0;
2543 		}
2544 
2545 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2546 }
2547 
2548 /**
2549  *	t3_read_rss - read the contents of the RSS tables
2550  *	@adapter: the adapter
2551  *	@lkup: holds the contents of the RSS lookup table
2552  *	@map: holds the contents of the RSS map table
2553  *
2554  *	Reads the contents of the receive packet steering tables.
2555  */
2556 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2557 {
2558 	int i;
2559 	u32 val;
2560 
2561 	if (lkup)
2562 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2563 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2564 				     0xffff0000 | i);
2565 			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2566 			if (!(val & 0x80000000))
2567 				return -EAGAIN;
2568 			*lkup++ = (u8)val;
2569 			*lkup++ = (u8)(val >> 8);
2570 		}
2571 
2572 	if (map)
2573 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2574 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2575 				     0xffff0000 | i);
2576 			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2577 			if (!(val & 0x80000000))
2578 				return -EAGAIN;
2579 			*map++ = (u16)val;
2580 		}
2581 	return 0;
2582 }
2583 
2584 /**
2585  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2586  *	@adap: the adapter
2587  *	@enable: 1 to select offload mode, 0 for regular NIC
2588  *
2589  *	Switches TP to NIC/offload mode.
2590  */
2591 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2592 {
2593 	if (is_offload(adap) || !enable)
2594 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2595 				 V_NICMODE(!enable));
2596 }
2597 
2598 /**
2599  *	tp_wr_bits_indirect - set/clear bits in an indirect TP register
2600  *	@adap: the adapter
2601  *	@addr: the indirect TP register address
2602  *	@mask: specifies the field within the register to modify
2603  *	@val: new value for the field
2604  *
2605  *	Sets a field of an indirect TP register to the given value.
2606  */
2607 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
2608 				unsigned int mask, unsigned int val)
2609 {
2610 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2611 	val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2612 	t3_write_reg(adap, A_TP_PIO_DATA, val);
2613 }
2614 
2615 /**
2616  *	t3_enable_filters - enable the HW filters
2617  *	@adap: the adapter
2618  *
2619  *	Enables the HW filters for NIC traffic.
2620  */
2621 void t3_enable_filters(adapter_t *adap)
2622 {
2623 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
2624 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
2625 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
2626 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
2627 }
2628 
2629 /**
2630  *	pm_num_pages - calculate the number of pages of the payload memory
2631  *	@mem_size: the size of the payload memory
2632  *	@pg_size: the size of each payload memory page
2633  *
2634  *	Calculate the number of pages, each of the given size, that fit in a
2635  *	memory of the specified size, respecting the HW requirement that the
2636  *	number of pages must be a multiple of 24.
2637  */
2638 static inline unsigned int pm_num_pages(unsigned int mem_size,
2639 					unsigned int pg_size)
2640 {
2641 	unsigned int n = mem_size / pg_size;
2642 
2643 	return n - n % 24;
2644 }
2645 
2646 #define mem_region(adap, start, size, reg) \
2647 	t3_write_reg((adap), A_ ## reg, (start)); \
2648 	start += size
2649 
2650 /**
2651  *	partition_mem - partition memory and configure TP memory settings
2652  *	@adap: the adapter
2653  *	@p: the TP parameters
2654  *
2655  *	Partitions context and payload memory and configures TP's memory
2656  *	registers.
2657  */
2658 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2659 {
2660 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2661 	unsigned int timers = 0, timers_shift = 22;
2662 
2663 	if (adap->params.rev > 0) {
2664 		if (tids <= 16 * 1024) {
2665 			timers = 1;
2666 			timers_shift = 16;
2667 		} else if (tids <= 64 * 1024) {
2668 			timers = 2;
2669 			timers_shift = 18;
2670 		} else if (tids <= 256 * 1024) {
2671 			timers = 3;
2672 			timers_shift = 20;
2673 		}
2674 	}
2675 
2676 	t3_write_reg(adap, A_TP_PMM_SIZE,
2677 		     p->chan_rx_size | (p->chan_tx_size >> 16));
2678 
2679 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2680 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2681 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2682 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2683 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2684 
2685 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2686 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2687 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2688 
2689 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2690 	/* Add a bit of headroom and make multiple of 24 */
2691 	pstructs += 48;
2692 	pstructs -= pstructs % 24;
2693 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2694 
2695 	m = tids * TCB_SIZE;
2696 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2697 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2698 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2699 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2700 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2701 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2702 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2703 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2704 
2705 	m = (m + 4095) & ~0xfff;
2706 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2707 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2708 
2709 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2710 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2711 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2712 	if (tids < m)
2713 		adap->params.mc5.nservers += m - tids;
2714 }
2715 
2716 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2717 {
2718 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2719 	t3_write_reg(adap, A_TP_PIO_DATA, val);
2720 }
2721 
2722 static void tp_config(adapter_t *adap, const struct tp_params *p)
2723 {
2724 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2725 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2726 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2727 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2728 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2729 		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2730 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2731 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2732 		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2733 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2734 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2735 			 F_IPV6ENABLE | F_NICMODE);
2736 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2737 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2738 	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2739 			 adap->params.rev > 0 ? F_ENABLEESND :
2740 			 			F_T3A_ENABLEESND);
2741 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2742 			 F_ENABLEEPCMDAFULL,
2743 			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2744 			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2745 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2746 			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2747 			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2748 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2749 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2750 
2751 	if (adap->params.rev > 0) {
2752 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2753 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
2754 				 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
2755 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2756 		tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
2757 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
2758 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
2759 	} else
2760 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2761 
2762 	if (adap->params.rev == T3_REV_C)
2763 		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2764 				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2765 				 V_TABLELATENCYDELTA(4));
2766 
2767 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2768 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2769 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2770 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2771 
2772 	if (adap->params.nports > 2) {
2773 		t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
2774 				 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
2775 				 F_ENABLERXPORTFROMADDR);
2776 		tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
2777 				    V_RXMAPMODE(M_RXMAPMODE), 0);
2778 		tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
2779 			       V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
2780 			       F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
2781 			       F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
2782 		tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
2783 		tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
2784 		tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
2785 	}
2786 }
2787 
2788 /* TCP timer values in ms */
2789 #define TP_DACK_TIMER 50
2790 #define TP_RTO_MIN    250
2791 
2792 /**
2793  *	tp_set_timers - set TP timing parameters
2794  *	@adap: the adapter to set
2795  *	@core_clk: the core clock frequency in Hz
2796  *
2797  *	Set TP's timing parameters, such as the various timer resolutions and
2798  *	the TCP timer values.
2799  */
2800 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2801 {
2802 	unsigned int tre = adap->params.tp.tre;
2803 	unsigned int dack_re = adap->params.tp.dack_re;
2804 	unsigned int tstamp_re = fls(core_clk / 1000);     /* 1ms, at least */
2805 	unsigned int tps = core_clk >> tre;
2806 
2807 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2808 		     V_DELAYEDACKRESOLUTION(dack_re) |
2809 		     V_TIMESTAMPRESOLUTION(tstamp_re));
2810 	t3_write_reg(adap, A_TP_DACK_TIMER,
2811 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2812 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2813 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2814 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2815 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2816 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2817 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2818 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2819 		     V_KEEPALIVEMAX(9));
2820 
2821 #define SECONDS * tps
2822 
2823 	t3_write_reg(adap, A_TP_MSL,
2824 		     adap->params.rev > 0 ? 0 : 2 SECONDS);
2825 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2826 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2827 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2828 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2829 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2830 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2831 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2832 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2833 
2834 #undef SECONDS
2835 }
2836 
2837 #ifdef CONFIG_CHELSIO_T3_CORE
2838 /**
2839  *	t3_tp_set_coalescing_size - set receive coalescing size
2840  *	@adap: the adapter
2841  *	@size: the receive coalescing size
2842  *	@psh: whether a set PSH bit should deliver coalesced data
2843  *
2844  *	Set the receive coalescing size and PSH bit handling.
2845  */
2846 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2847 {
2848 	u32 val;
2849 
2850 	if (size > MAX_RX_COALESCING_LEN)
2851 		return -EINVAL;
2852 
2853 	val = t3_read_reg(adap, A_TP_PARA_REG3);
2854 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2855 
2856 	if (size) {
2857 		val |= F_RXCOALESCEENABLE;
2858 		if (psh)
2859 			val |= F_RXCOALESCEPSHEN;
2860 		size = min(MAX_RX_COALESCING_LEN, size);
2861 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2862 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2863 	}
2864 	t3_write_reg(adap, A_TP_PARA_REG3, val);
2865 	return 0;
2866 }
2867 
2868 /**
2869  *	t3_tp_set_max_rxsize - set the max receive size
2870  *	@adap: the adapter
2871  *	@size: the max receive size
2872  *
2873  *	Set TP's max receive size.  This is the limit that applies when
2874  *	receive coalescing is disabled.
2875  */
2876 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2877 {
2878 	t3_write_reg(adap, A_TP_PARA_REG7,
2879 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2880 }
2881 
2882 static void __devinit init_mtus(unsigned short mtus[])
2883 {
2884 	/*
2885 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2886 	 * it can accomodate max size TCP/IP headers when SACK and timestamps
2887 	 * are enabled and still have at least 8 bytes of payload.
2888 	 */
2889 	mtus[0] = 88;
2890 	mtus[1] = 88;
2891 	mtus[2] = 256;
2892 	mtus[3] = 512;
2893 	mtus[4] = 576;
2894 	mtus[5] = 1024;
2895 	mtus[6] = 1280;
2896 	mtus[7] = 1492;
2897 	mtus[8] = 1500;
2898 	mtus[9] = 2002;
2899 	mtus[10] = 2048;
2900 	mtus[11] = 4096;
2901 	mtus[12] = 4352;
2902 	mtus[13] = 8192;
2903 	mtus[14] = 9000;
2904 	mtus[15] = 9600;
2905 }
2906 
2907 /**
2908  *	init_cong_ctrl - initialize congestion control parameters
2909  *	@a: the alpha values for congestion control
2910  *	@b: the beta values for congestion control
2911  *
2912  *	Initialize the congestion control parameters.
2913  */
2914 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2915 {
2916 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2917 	a[9] = 2;
2918 	a[10] = 3;
2919 	a[11] = 4;
2920 	a[12] = 5;
2921 	a[13] = 6;
2922 	a[14] = 7;
2923 	a[15] = 8;
2924 	a[16] = 9;
2925 	a[17] = 10;
2926 	a[18] = 14;
2927 	a[19] = 17;
2928 	a[20] = 21;
2929 	a[21] = 25;
2930 	a[22] = 30;
2931 	a[23] = 35;
2932 	a[24] = 45;
2933 	a[25] = 60;
2934 	a[26] = 80;
2935 	a[27] = 100;
2936 	a[28] = 200;
2937 	a[29] = 300;
2938 	a[30] = 400;
2939 	a[31] = 500;
2940 
2941 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2942 	b[9] = b[10] = 1;
2943 	b[11] = b[12] = 2;
2944 	b[13] = b[14] = b[15] = b[16] = 3;
2945 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2946 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2947 	b[28] = b[29] = 6;
2948 	b[30] = b[31] = 7;
2949 }
2950 
2951 /* The minimum additive increment value for the congestion control table */
2952 #define CC_MIN_INCR 2U
2953 
2954 /**
2955  *	t3_load_mtus - write the MTU and congestion control HW tables
2956  *	@adap: the adapter
2957  *	@mtus: the unrestricted values for the MTU table
2958  *	@alpha: the values for the congestion control alpha parameter
2959  *	@beta: the values for the congestion control beta parameter
2960  *	@mtu_cap: the maximum permitted effective MTU
2961  *
2962  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2963  *	Update the high-speed congestion control table with the supplied alpha,
2964  * 	beta, and MTUs.
2965  */
2966 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2967 		  unsigned short alpha[NCCTRL_WIN],
2968 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2969 {
2970 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2971 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2972 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2973 		28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2974 
2975 	unsigned int i, w;
2976 
2977 	for (i = 0; i < NMTUS; ++i) {
2978 		unsigned int mtu = min(mtus[i], mtu_cap);
2979 		unsigned int log2 = fls(mtu);
2980 
2981 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
2982 			log2--;
2983 		t3_write_reg(adap, A_TP_MTU_TABLE,
2984 			     (i << 24) | (log2 << 16) | mtu);
2985 
2986 		for (w = 0; w < NCCTRL_WIN; ++w) {
2987 			unsigned int inc;
2988 
2989 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2990 				  CC_MIN_INCR);
2991 
2992 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2993 				     (w << 16) | (beta[w] << 13) | inc);
2994 		}
2995 	}
2996 }
2997 
2998 /**
2999  *	t3_read_hw_mtus - returns the values in the HW MTU table
3000  *	@adap: the adapter
3001  *	@mtus: where to store the HW MTU values
3002  *
3003  *	Reads the HW MTU table.
3004  */
3005 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3006 {
3007 	int i;
3008 
3009 	for (i = 0; i < NMTUS; ++i) {
3010 		unsigned int val;
3011 
3012 		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3013 		val = t3_read_reg(adap, A_TP_MTU_TABLE);
3014 		mtus[i] = val & 0x3fff;
3015 	}
3016 }
3017 
3018 /**
3019  *	t3_get_cong_cntl_tab - reads the congestion control table
3020  *	@adap: the adapter
3021  *	@incr: where to store the alpha values
3022  *
3023  *	Reads the additive increments programmed into the HW congestion
3024  *	control table.
3025  */
3026 void t3_get_cong_cntl_tab(adapter_t *adap,
3027 			  unsigned short incr[NMTUS][NCCTRL_WIN])
3028 {
3029 	unsigned int mtu, w;
3030 
3031 	for (mtu = 0; mtu < NMTUS; ++mtu)
3032 		for (w = 0; w < NCCTRL_WIN; ++w) {
3033 			t3_write_reg(adap, A_TP_CCTRL_TABLE,
3034 				     0xffff0000 | (mtu << 5) | w);
3035 			incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3036 				        A_TP_CCTRL_TABLE) & 0x1fff;
3037 		}
3038 }
3039 
3040 /**
3041  *	t3_tp_get_mib_stats - read TP's MIB counters
3042  *	@adap: the adapter
3043  *	@tps: holds the returned counter values
3044  *
3045  *	Returns the values of TP's MIB counters.
3046  */
3047 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3048 {
3049 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3050 			 sizeof(*tps) / sizeof(u32), 0);
3051 }
3052 
3053 /**
3054  *	t3_read_pace_tbl - read the pace table
3055  *	@adap: the adapter
3056  *	@pace_vals: holds the returned values
3057  *
3058  *	Returns the values of TP's pace table in nanoseconds.
3059  */
3060 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3061 {
3062 	unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3063 
3064 	for (i = 0; i < NTX_SCHED; i++) {
3065 		t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3066 		pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3067 	}
3068 }
3069 
3070 /**
3071  *	t3_set_pace_tbl - set the pace table
3072  *	@adap: the adapter
3073  *	@pace_vals: the pace values in nanoseconds
3074  *	@start: index of the first entry in the HW pace table to set
3075  *	@n: how many entries to set
3076  *
3077  *	Sets (a subset of the) HW pace table.
3078  */
3079 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3080 		     unsigned int start, unsigned int n)
3081 {
3082 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3083 
3084 	for ( ; n; n--, start++, pace_vals++)
3085 		t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3086 			     ((*pace_vals + tick_ns / 2) / tick_ns));
3087 }
3088 
3089 #define ulp_region(adap, name, start, len) \
3090 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3091 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3092 		     (start) + (len) - 1); \
3093 	start += len
3094 
3095 #define ulptx_region(adap, name, start, len) \
3096 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3097 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3098 		     (start) + (len) - 1)
3099 
3100 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3101 {
3102 	unsigned int m = p->chan_rx_size;
3103 
3104 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3105 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3106 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3107 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3108 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3109 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3110 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3111 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3112 }
3113 
3114 
3115 /**
3116  *	t3_set_proto_sram - set the contents of the protocol sram
3117  *	@adapter: the adapter
3118  *	@data: the protocol image
3119  *
3120  *	Write the contents of the protocol SRAM.
3121  */
3122 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3123 {
3124 	int i;
3125 	const u32 *buf = (const u32 *)data;
3126 
3127 	for (i = 0; i < PROTO_SRAM_LINES; i++) {
3128 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3129 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3130 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3131 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3132 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3133 
3134 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3135 		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3136 			return -EIO;
3137 	}
3138 	return 0;
3139 }
3140 #endif
3141 
3142 /**
3143  *	t3_config_trace_filter - configure one of the tracing filters
3144  *	@adapter: the adapter
3145  *	@tp: the desired trace filter parameters
3146  *	@filter_index: which filter to configure
3147  *	@invert: if set non-matching packets are traced instead of matching ones
3148  *	@enable: whether to enable or disable the filter
3149  *
3150  *	Configures one of the tracing filters available in HW.
3151  */
3152 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3153 			    int filter_index, int invert, int enable)
3154 {
3155 	u32 addr, key[4], mask[4];
3156 
3157 	key[0] = tp->sport | (tp->sip << 16);
3158 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
3159 	key[2] = tp->dip;
3160 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3161 
3162 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3163 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3164 	mask[2] = tp->dip_mask;
3165 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3166 
3167 	if (invert)
3168 		key[3] |= (1 << 29);
3169 	if (enable)
3170 		key[3] |= (1 << 28);
3171 
3172 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3173 	tp_wr_indirect(adapter, addr++, key[0]);
3174 	tp_wr_indirect(adapter, addr++, mask[0]);
3175 	tp_wr_indirect(adapter, addr++, key[1]);
3176 	tp_wr_indirect(adapter, addr++, mask[1]);
3177 	tp_wr_indirect(adapter, addr++, key[2]);
3178 	tp_wr_indirect(adapter, addr++, mask[2]);
3179 	tp_wr_indirect(adapter, addr++, key[3]);
3180 	tp_wr_indirect(adapter, addr,   mask[3]);
3181 	(void) t3_read_reg(adapter, A_TP_PIO_DATA);
3182 }
3183 
3184 /**
3185  *	t3_config_sched - configure a HW traffic scheduler
3186  *	@adap: the adapter
3187  *	@kbps: target rate in Kbps
3188  *	@sched: the scheduler index
3189  *
3190  *	Configure a Tx HW scheduler for the target rate.
3191  */
3192 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3193 {
3194 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3195 	unsigned int clk = adap->params.vpd.cclk * 1000;
3196 	unsigned int selected_cpt = 0, selected_bpt = 0;
3197 
3198 	if (kbps > 0) {
3199 		kbps *= 125;     /* -> bytes */
3200 		for (cpt = 1; cpt <= 255; cpt++) {
3201 			tps = clk / cpt;
3202 			bpt = (kbps + tps / 2) / tps;
3203 			if (bpt > 0 && bpt <= 255) {
3204 				v = bpt * tps;
3205 				delta = v >= kbps ? v - kbps : kbps - v;
3206 				if (delta < mindelta) {
3207 					mindelta = delta;
3208 					selected_cpt = cpt;
3209 					selected_bpt = bpt;
3210 				}
3211 			} else if (selected_cpt)
3212 				break;
3213 		}
3214 		if (!selected_cpt)
3215 			return -EINVAL;
3216 	}
3217 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3218 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3219 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3220 	if (sched & 1)
3221 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3222 	else
3223 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3224 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3225 	return 0;
3226 }
3227 
3228 /**
3229  *	t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3230  *	@adap: the adapter
3231  *	@sched: the scheduler index
3232  *	@ipg: the interpacket delay in tenths of nanoseconds
3233  *
3234  *	Set the interpacket delay for a HW packet rate scheduler.
3235  */
3236 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3237 {
3238 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3239 
3240 	/* convert ipg to nearest number of core clocks */
3241 	ipg *= core_ticks_per_usec(adap);
3242 	ipg = (ipg + 5000) / 10000;
3243 	if (ipg > 0xffff)
3244 		return -EINVAL;
3245 
3246 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3247 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3248 	if (sched & 1)
3249 		v = (v & 0xffff) | (ipg << 16);
3250 	else
3251 		v = (v & 0xffff0000) | ipg;
3252 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3253 	t3_read_reg(adap, A_TP_TM_PIO_DATA);
3254 	return 0;
3255 }
3256 
3257 /**
3258  *	t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3259  *	@adap: the adapter
3260  *	@sched: the scheduler index
3261  *	@kbps: the byte rate in Kbps
3262  *	@ipg: the interpacket delay in tenths of nanoseconds
3263  *
3264  *	Return the current configuration of a HW Tx scheduler.
3265  */
3266 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3267 		     unsigned int *ipg)
3268 {
3269 	unsigned int v, addr, bpt, cpt;
3270 
3271 	if (kbps) {
3272 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3273 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3274 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3275 		if (sched & 1)
3276 			v >>= 16;
3277 		bpt = (v >> 8) & 0xff;
3278 		cpt = v & 0xff;
3279 		if (!cpt)
3280 			*kbps = 0;        /* scheduler disabled */
3281 		else {
3282 			v = (adap->params.vpd.cclk * 1000) / cpt;
3283 			*kbps = (v * bpt) / 125;
3284 		}
3285 	}
3286 	if (ipg) {
3287 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3288 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3289 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3290 		if (sched & 1)
3291 			v >>= 16;
3292 		v &= 0xffff;
3293 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3294 	}
3295 }
3296 
3297 /**
3298  *	tp_init - configure TP
3299  *	@adap: the adapter
3300  *	@p: TP configuration parameters
3301  *
3302  *	Initializes the TP HW module.
3303  */
3304 static int tp_init(adapter_t *adap, const struct tp_params *p)
3305 {
3306 	int busy = 0;
3307 
3308 	tp_config(adap, p);
3309 	t3_set_vlan_accel(adap, 3, 0);
3310 
3311 	if (is_offload(adap)) {
3312 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3313 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3314 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3315 				       0, 1000, 5);
3316 		if (busy)
3317 			CH_ERR(adap, "TP initialization timed out\n");
3318 	}
3319 
3320 	if (!busy)
3321 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3322 	return busy;
3323 }
3324 
3325 /**
3326  *	t3_mps_set_active_ports - configure port failover
3327  *	@adap: the adapter
3328  *	@port_mask: bitmap of active ports
3329  *
3330  *	Sets the active ports according to the supplied bitmap.
3331  */
3332 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3333 {
3334 	if (port_mask & ~((1 << adap->params.nports) - 1))
3335 		return -EINVAL;
3336 	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3337 			 port_mask << S_PORT0ACTIVE);
3338 	return 0;
3339 }
3340 
3341 /**
3342  * 	chan_init_hw - channel-dependent HW initialization
3343  *	@adap: the adapter
3344  *	@chan_map: bitmap of Tx channels being used
3345  *
3346  *	Perform the bits of HW initialization that are dependent on the Tx
3347  *	channels being used.
3348  */
3349 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3350 {
3351 	int i;
3352 
3353 	if (chan_map != 3) {                                 /* one channel */
3354 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3355 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3356 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3357 			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3358 					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3359 		t3_write_reg(adap, A_PM1_TX_CFG,
3360 			     chan_map == 1 ? 0xffffffff : 0);
3361 		if (chan_map == 2)
3362 			t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3363 				     V_TX_MOD_QUEUE_REQ_MAP(0xff));
3364 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3365 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3366 	} else {                                             /* two channels */
3367 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3368 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3369 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3370 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3371 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3372 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3373 			     F_ENFORCEPKT);
3374 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3375 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3376 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3377 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3378 		for (i = 0; i < 16; i++)
3379 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3380 				     (i << 16) | 0x1010);
3381 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3382 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3383 	}
3384 }
3385 
3386 static int calibrate_xgm(adapter_t *adapter)
3387 {
3388 	if (uses_xaui(adapter)) {
3389 		unsigned int v, i;
3390 
3391 		for (i = 0; i < 5; ++i) {
3392 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3393 			(void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3394 			msleep(1);
3395 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3396 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3397 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3398 					     V_XAUIIMP(G_CALIMP(v) >> 2));
3399 				return 0;
3400 			}
3401 		}
3402 		CH_ERR(adapter, "MAC calibration failed\n");
3403 		return -1;
3404 	} else {
3405 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3406 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3407 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3408 				 F_XGM_IMPSETUPDATE);
3409 	}
3410 	return 0;
3411 }
3412 
3413 static void calibrate_xgm_t3b(adapter_t *adapter)
3414 {
3415 	if (!uses_xaui(adapter)) {
3416 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3417 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3418 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3419 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3420 				 F_XGM_IMPSETUPDATE);
3421 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3422 				 0);
3423 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3424 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3425 	}
3426 }
3427 
3428 struct mc7_timing_params {
3429 	unsigned char ActToPreDly;
3430 	unsigned char ActToRdWrDly;
3431 	unsigned char PreCyc;
3432 	unsigned char RefCyc[5];
3433 	unsigned char BkCyc;
3434 	unsigned char WrToRdDly;
3435 	unsigned char RdToWrDly;
3436 };
3437 
3438 /*
3439  * Write a value to a register and check that the write completed.  These
3440  * writes normally complete in a cycle or two, so one read should suffice.
3441  * The very first read exists to flush the posted write to the device.
3442  */
3443 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3444 {
3445 	t3_write_reg(adapter,	addr, val);
3446 	(void) t3_read_reg(adapter, addr);                   /* flush */
3447 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3448 		return 0;
3449 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3450 	return -EIO;
3451 }
3452 
3453 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3454 {
3455 	static const unsigned int mc7_mode[] = {
3456 		0x632, 0x642, 0x652, 0x432, 0x442
3457 	};
3458 	static const struct mc7_timing_params mc7_timings[] = {
3459 		{ 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3460 		{ 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3461 		{ 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3462 		{ 9,  3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3463 		{ 9,  4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3464 	};
3465 
3466 	u32 val;
3467 	unsigned int width, density, slow, attempts;
3468 	adapter_t *adapter = mc7->adapter;
3469 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3470 
3471 	if (!mc7->size)
3472 		return 0;
3473 
3474 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3475 	slow = val & F_SLOW;
3476 	width = G_WIDTH(val);
3477 	density = G_DEN(val);
3478 
3479 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3480 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  /* flush */
3481 	msleep(1);
3482 
3483 	if (!slow) {
3484 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3485 		(void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3486 		msleep(1);
3487 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3488 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3489 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3490 			       mc7->name);
3491 			goto out_fail;
3492 		}
3493 	}
3494 
3495 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3496 		     V_ACTTOPREDLY(p->ActToPreDly) |
3497 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3498 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3499 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3500 
3501 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3502 		     val | F_CLKEN | F_TERM150);
3503 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3504 
3505 	if (!slow)
3506 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3507 				 F_DLLENB);
3508 	udelay(1);
3509 
3510 	val = slow ? 3 : 6;
3511 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3512 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3513 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3514 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3515 		goto out_fail;
3516 
3517 	if (!slow) {
3518 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3519 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3520 				 F_DLLRST, 0);
3521 		udelay(5);
3522 	}
3523 
3524 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3525 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3526 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3527 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3528 		       mc7_mode[mem_type]) ||
3529 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3530 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3531 		goto out_fail;
3532 
3533 	/* clock value is in KHz */
3534 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;  /* ns */
3535 	mc7_clock /= 1000000;                          /* KHz->MHz, ns->us */
3536 
3537 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3538 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3539 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3540 
3541 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3542 		     F_ECCGENEN | F_ECCCHKEN);
3543 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3544 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3545 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3546 		     (mc7->size << width) - 1);
3547 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3548 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3549 
3550 	attempts = 50;
3551 	do {
3552 		msleep(250);
3553 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3554 	} while ((val & F_BUSY) && --attempts);
3555 	if (val & F_BUSY) {
3556 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3557 		goto out_fail;
3558 	}
3559 
3560 	/* Enable normal memory accesses. */
3561 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3562 	return 0;
3563 
3564  out_fail:
3565 	return -1;
3566 }
3567 
3568 static void config_pcie(adapter_t *adap)
3569 {
3570 	static const u16 ack_lat[4][6] = {
3571 		{ 237, 416, 559, 1071, 2095, 4143 },
3572 		{ 128, 217, 289, 545, 1057, 2081 },
3573 		{ 73, 118, 154, 282, 538, 1050 },
3574 		{ 67, 107, 86, 150, 278, 534 }
3575 	};
3576 	static const u16 rpl_tmr[4][6] = {
3577 		{ 711, 1248, 1677, 3213, 6285, 12429 },
3578 		{ 384, 651, 867, 1635, 3171, 6243 },
3579 		{ 219, 354, 462, 846, 1614, 3150 },
3580 		{ 201, 321, 258, 450, 834, 1602 }
3581 	};
3582 
3583 	u16 val;
3584 	unsigned int log2_width, pldsize;
3585 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3586 
3587 	t3_os_pci_read_config_2(adap,
3588 				adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3589 				&val);
3590 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3591 
3592 	t3_os_pci_read_config_2(adap,
3593 				adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3594 			       	&val);
3595 
3596 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3597 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3598 			G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3599 	log2_width = fls(adap->params.pci.width) - 1;
3600 	acklat = ack_lat[log2_width][pldsize];
3601 	if (val & 1)                            /* check LOsEnable */
3602 		acklat += fst_trn_tx * 4;
3603 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3604 
3605 	if (adap->params.rev == 0)
3606 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3607 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3608 				 V_T3A_ACKLAT(acklat));
3609 	else
3610 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3611 				 V_ACKLAT(acklat));
3612 
3613 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3614 			 V_REPLAYLMT(rpllmt));
3615 
3616 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3617 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3618 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3619 }
3620 
3621 /**
3622  * 	t3_init_hw - initialize and configure T3 HW modules
3623  * 	@adapter: the adapter
3624  * 	@fw_params: initial parameters to pass to firmware (optional)
3625  *
3626  *	Initialize and configure T3 HW modules.  This performs the
3627  *	initialization steps that need to be done once after a card is reset.
3628  *	MAC and PHY initialization is handled separarely whenever a port is
3629  *	enabled.
3630  *
3631  *	@fw_params are passed to FW and their value is platform dependent.
3632  *	Only the top 8 bits are available for use, the rest must be 0.
3633  */
3634 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3635 {
3636 	int err = -EIO, attempts, i;
3637 	const struct vpd_params *vpd = &adapter->params.vpd;
3638 
3639 	if (adapter->params.rev > 0)
3640 		calibrate_xgm_t3b(adapter);
3641 	else if (calibrate_xgm(adapter))
3642 		goto out_err;
3643 
3644 	if (adapter->params.nports > 2)
3645 		t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
3646 
3647 	if (vpd->mclk) {
3648 		partition_mem(adapter, &adapter->params.tp);
3649 
3650 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3651 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3652 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3653 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3654 			        adapter->params.mc5.nfilters,
3655 			       	adapter->params.mc5.nroutes))
3656 			goto out_err;
3657 
3658 		for (i = 0; i < 32; i++)
3659 			if (clear_sge_ctxt(adapter, i, F_CQ))
3660 				goto out_err;
3661 	}
3662 
3663 	if (tp_init(adapter, &adapter->params.tp))
3664 		goto out_err;
3665 
3666 #ifdef CONFIG_CHELSIO_T3_CORE
3667 	t3_tp_set_coalescing_size(adapter,
3668 				  min(adapter->params.sge.max_pkt_size,
3669 				      MAX_RX_COALESCING_LEN), 1);
3670 	t3_tp_set_max_rxsize(adapter,
3671 			     min(adapter->params.sge.max_pkt_size, 16384U));
3672 	ulp_config(adapter, &adapter->params.tp);
3673 #endif
3674 	if (is_pcie(adapter))
3675 		config_pcie(adapter);
3676 	else
3677 		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3678 				 F_DMASTOPEN | F_CLIDECEN);
3679 
3680 	if (adapter->params.rev == T3_REV_C)
3681 		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3682 				 F_CFG_CQE_SOP_MASK);
3683 
3684 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3685 	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3686 	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3687 	chan_init_hw(adapter, adapter->params.chan_map);
3688 	t3_sge_init(adapter, &adapter->params.sge);
3689 
3690 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3691 
3692 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3693 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3694 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3695 	(void) t3_read_reg(adapter, A_CIM_BOOT_CFG);    /* flush */
3696 
3697 	attempts = 100;
3698 	do {                          /* wait for uP to initialize */
3699 		msleep(20);
3700 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3701 	if (!attempts) {
3702 		CH_ERR(adapter, "uP initialization timed out\n");
3703 		goto out_err;
3704 	}
3705 
3706 	err = 0;
3707  out_err:
3708 	return err;
3709 }
3710 
3711 /**
3712  *	get_pci_mode - determine a card's PCI mode
3713  *	@adapter: the adapter
3714  *	@p: where to store the PCI settings
3715  *
3716  *	Determines a card's PCI mode and associated parameters, such as speed
3717  *	and width.
3718  */
3719 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3720 {
3721 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3722 	u32 pci_mode, pcie_cap;
3723 
3724 	pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3725 	if (pcie_cap) {
3726 		u16 val;
3727 
3728 		p->variant = PCI_VARIANT_PCIE;
3729 		p->pcie_cap_addr = pcie_cap;
3730 		t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3731 					&val);
3732 		p->width = (val >> 4) & 0x3f;
3733 		return;
3734 	}
3735 
3736 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3737 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3738 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3739 	pci_mode = G_PCIXINITPAT(pci_mode);
3740 	if (pci_mode == 0)
3741 		p->variant = PCI_VARIANT_PCI;
3742 	else if (pci_mode < 4)
3743 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3744 	else if (pci_mode < 8)
3745 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3746 	else
3747 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3748 }
3749 
3750 /**
3751  *	init_link_config - initialize a link's SW state
3752  *	@lc: structure holding the link state
3753  *	@caps: link capabilities
3754  *
3755  *	Initializes the SW state maintained for each link, including the link's
3756  *	capabilities and default speed/duplex/flow-control/autonegotiation
3757  *	settings.
3758  */
3759 static void __devinit init_link_config(struct link_config *lc,
3760 				       unsigned int caps)
3761 {
3762 	lc->supported = caps;
3763 	lc->requested_speed = lc->speed = SPEED_INVALID;
3764 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3765 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3766 	if (lc->supported & SUPPORTED_Autoneg) {
3767 		lc->advertising = lc->supported;
3768 		lc->autoneg = AUTONEG_ENABLE;
3769 		lc->requested_fc |= PAUSE_AUTONEG;
3770 	} else {
3771 		lc->advertising = 0;
3772 		lc->autoneg = AUTONEG_DISABLE;
3773 	}
3774 }
3775 
3776 /**
3777  *	mc7_calc_size - calculate MC7 memory size
3778  *	@cfg: the MC7 configuration
3779  *
3780  *	Calculates the size of an MC7 memory in bytes from the value of its
3781  *	configuration register.
3782  */
3783 static unsigned int __devinit mc7_calc_size(u32 cfg)
3784 {
3785 	unsigned int width = G_WIDTH(cfg);
3786 	unsigned int banks = !!(cfg & F_BKS) + 1;
3787 	unsigned int org = !!(cfg & F_ORG) + 1;
3788 	unsigned int density = G_DEN(cfg);
3789 	unsigned int MBs = ((256 << density) * banks) / (org << width);
3790 
3791 	return MBs << 20;
3792 }
3793 
3794 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3795 			       unsigned int base_addr, const char *name)
3796 {
3797 	u32 cfg;
3798 
3799 	mc7->adapter = adapter;
3800 	mc7->name = name;
3801 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3802 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3803 	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3804 	mc7->width = G_WIDTH(cfg);
3805 }
3806 
3807 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3808 {
3809 	mac->adapter = adapter;
3810 	mac->multiport = adapter->params.nports > 2;
3811 	if (mac->multiport) {
3812 		mac->ext_port = (unsigned char)index;
3813 		mac->nucast = 8;
3814 		index = 0;
3815 	} else
3816 		mac->nucast = 1;
3817 
3818 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3819 
3820 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3821 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3822 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3823 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3824 				 F_ENRGMII, 0);
3825 	}
3826 }
3827 
3828 /**
3829  *	early_hw_init - HW initialization done at card detection time
3830  *	@adapter: the adapter
3831  *	@ai: contains information about the adapter type and properties
3832  *
3833  *	Perfoms the part of HW initialization that is done early on when the
3834  *	driver first detecs the card.  Most of the HW state is initialized
3835  *	lazily later on when a port or an offload function are first used.
3836  */
3837 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3838 {
3839 	u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
3840 			      3 : 2);
3841 
3842 	mi1_init(adapter, ai);
3843 	t3_write_reg(adapter, A_I2C_CFG,                  /* set for 80KHz */
3844 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3845 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3846 		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3847 	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3848 	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3849 
3850 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3851 		val |= F_ENRGMII;
3852 
3853 	/* Enable MAC clocks so we can access the registers */
3854 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3855 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3856 
3857 	val |= F_CLKDIVRESET_;
3858 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3859 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3860 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3861 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3862 }
3863 
3864 /**
3865  *	t3_reset_adapter - reset the adapter
3866  *	@adapter: the adapter
3867  *
3868  * 	Reset the adapter.
3869  */
3870 static int t3_reset_adapter(adapter_t *adapter)
3871 {
3872 	int i, save_and_restore_pcie =
3873 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3874 	uint16_t devid = 0;
3875 
3876 	if (save_and_restore_pcie)
3877 		t3_os_pci_save_state(adapter);
3878 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3879 
3880  	/*
3881 	 * Delay. Give Some time to device to reset fully.
3882 	 * XXX The delay time should be modified.
3883 	 */
3884 	for (i = 0; i < 10; i++) {
3885 		msleep(50);
3886 		t3_os_pci_read_config_2(adapter, 0x00, &devid);
3887 		if (devid == 0x1425)
3888 			break;
3889 	}
3890 
3891 	if (devid != 0x1425)
3892 		return -1;
3893 
3894 	if (save_and_restore_pcie)
3895 		t3_os_pci_restore_state(adapter);
3896 	return 0;
3897 }
3898 
3899 static int init_parity(adapter_t *adap)
3900 {
3901 	int i, err, addr;
3902 
3903 	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3904 		return -EBUSY;
3905 
3906 	for (err = i = 0; !err && i < 16; i++)
3907 		err = clear_sge_ctxt(adap, i, F_EGRESS);
3908 	for (i = 0xfff0; !err && i <= 0xffff; i++)
3909 		err = clear_sge_ctxt(adap, i, F_EGRESS);
3910 	for (i = 0; !err && i < SGE_QSETS; i++)
3911 		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3912 	if (err)
3913 		return err;
3914 
3915 	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3916 	for (i = 0; i < 4; i++)
3917 		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3918 			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3919 				     F_IBQDBGWR | V_IBQDBGQID(i) |
3920 				     V_IBQDBGADDR(addr));
3921 			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3922 					      F_IBQDBGBUSY, 0, 2, 1);
3923 			if (err)
3924 				return err;
3925 		}
3926 	return 0;
3927 }
3928 
3929 /**
3930  *	t3_prep_adapter - prepare SW and HW for operation
3931  *	@adapter: the adapter
3932  *	@ai: contains information about the adapter type and properties
3933  *
3934  *	Initialize adapter SW state for the various HW modules, set initial
3935  *	values for some adapter tunables, take PHYs out of reset, and
3936  *	initialize the MDIO interface.
3937  */
3938 int __devinit t3_prep_adapter(adapter_t *adapter,
3939 			      const struct adapter_info *ai, int reset)
3940 {
3941 	int ret;
3942 	unsigned int i, j = 0;
3943 
3944 	get_pci_mode(adapter, &adapter->params.pci);
3945 
3946 	adapter->params.info = ai;
3947 	adapter->params.nports = ai->nports0 + ai->nports1;
3948 	adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3949 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3950 	adapter->params.linkpoll_period = 0;
3951 	if (adapter->params.nports > 2)
3952 		adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
3953 	else
3954 		adapter->params.stats_update_period = is_10G(adapter) ?
3955 			MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3956 	adapter->params.pci.vpd_cap_addr =
3957 		t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3958 
3959 	ret = get_vpd_params(adapter, &adapter->params.vpd);
3960 	if (ret < 0)
3961 		return ret;
3962 
3963 	if (reset && t3_reset_adapter(adapter))
3964 		return -1;
3965 
3966 	t3_sge_prep(adapter, &adapter->params.sge);
3967 
3968 	if (adapter->params.vpd.mclk) {
3969 		struct tp_params *p = &adapter->params.tp;
3970 
3971 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3972 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3973 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3974 
3975 		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3976 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3977 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3978 		p->cm_size = t3_mc7_size(&adapter->cm);
3979 		p->chan_rx_size = p->pmrx_size / 2;     /* only 1 Rx channel */
3980 		p->chan_tx_size = p->pmtx_size / p->nchan;
3981 		p->rx_pg_size = 64 * 1024;
3982 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3983 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3984 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3985 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3986 			       adapter->params.rev > 0 ? 12 : 6;
3987 		p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
3988 			 1;
3989 		p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
3990 	}
3991 
3992 	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3993 				  t3_mc7_size(&adapter->pmtx) &&
3994 				  t3_mc7_size(&adapter->cm);
3995 
3996 	if (is_offload(adapter)) {
3997 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3998 		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3999 					       DEFAULT_NFILTERS : 0;
4000 		adapter->params.mc5.nroutes = 0;
4001 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4002 
4003 #ifdef CONFIG_CHELSIO_T3_CORE
4004 		init_mtus(adapter->params.mtus);
4005 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4006 #endif
4007 	}
4008 
4009 	early_hw_init(adapter, ai);
4010 	ret = init_parity(adapter);
4011 	if (ret)
4012 		return ret;
4013 
4014 	if (adapter->params.nports > 2 &&
4015 	    (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4016 		return ret;
4017 
4018 	for_each_port(adapter, i) {
4019 		u8 hw_addr[6];
4020 		const struct port_type_info *pti;
4021 		struct port_info *p = adap2pinfo(adapter, i);
4022 
4023 		while (!adapter->params.vpd.port_type[j])
4024 			++j;
4025 
4026 		pti = &port_types[adapter->params.vpd.port_type[j]];
4027 		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
4028 				    ai->mdio_ops);
4029 		if (ret)
4030 			return ret;
4031 		mac_prep(&p->mac, adapter, j);
4032 		++j;
4033 
4034 		/*
4035 		 * The VPD EEPROM stores the base Ethernet address for the
4036 		 * card.  A port's address is derived from the base by adding
4037 		 * the port's index to the base's low octet.
4038 		 */
4039 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4040 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4041 
4042 		t3_os_set_hw_addr(adapter, i, hw_addr);
4043 		init_link_config(&p->link_config, p->phy.caps);
4044 		p->phy.ops->power_down(&p->phy, 1);
4045 		if (!(p->phy.caps & SUPPORTED_IRQ))
4046 			adapter->params.linkpoll_period = 10;
4047 	}
4048 
4049 	return 0;
4050 }
4051 
4052 /**
4053  *	t3_reinit_adapter - prepare HW for operation again
4054  *	@adapter: the adapter
4055  *
4056  *	Put HW in the same state as @t3_prep_adapter without any changes to
4057  *	SW state.  This is a cut down version of @t3_prep_adapter intended
4058  *	to be used after events that wipe out HW state but preserve SW state,
4059  *	e.g., EEH.  The device must be reset before calling this.
4060  */
4061 int t3_reinit_adapter(adapter_t *adap)
4062 {
4063 	unsigned int i;
4064 	int ret, j = -1;
4065 
4066 	early_hw_init(adap, adap->params.info);
4067 	ret = init_parity(adap);
4068 	if (ret)
4069 		return ret;
4070 
4071 	if (adap->params.nports > 2 &&
4072 	    (ret = t3_vsc7323_init(adap, adap->params.nports)))
4073 		return ret;
4074 
4075 	for_each_port(adap, i) {
4076 		const struct port_type_info *pti;
4077 		struct port_info *p = adap2pinfo(adap, i);
4078 
4079 		while (!adap->params.vpd.port_type[++j])
4080 			;
4081 
4082 		pti = &port_types[adap->params.vpd.port_type[j]];
4083 		ret = pti->phy_prep(&p->phy, adap, p->phy.addr, NULL);
4084 		if (ret)
4085 			return ret;
4086 		p->phy.ops->power_down(&p->phy, 1);
4087 	}
4088 	return 0;
4089 }
4090 
4091 void t3_led_ready(adapter_t *adapter)
4092 {
4093 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4094 			 F_GPIO0_OUT_VAL);
4095 }
4096 
4097 void t3_port_failover(adapter_t *adapter, int port)
4098 {
4099 	u32 val;
4100 
4101 	val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4102 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4103 			 val);
4104 }
4105 
4106 void t3_failover_done(adapter_t *adapter, int port)
4107 {
4108 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4109 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4110 }
4111 
4112 void t3_failover_clear(adapter_t *adapter)
4113 {
4114 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4115 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4116 }
4117