xref: /freebsd/sys/dev/cxgb/common/cxgb_t3_hw.c (revision e28a4053)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 
34 #include <cxgb_include.h>
35 
36 #undef msleep
37 #define msleep t3_os_sleep
38 
39 /**
40  *	t3_wait_op_done_val - wait until an operation is completed
41  *	@adapter: the adapter performing the operation
42  *	@reg: the register to check for completion
43  *	@mask: a single-bit field within @reg that indicates completion
44  *	@polarity: the value of the field when the operation is completed
45  *	@attempts: number of check iterations
46  *	@delay: delay in usecs between iterations
47  *	@valp: where to store the value of the register at completion time
48  *
49  *	Wait until an operation is completed by checking a bit in a register
50  *	up to @attempts times.  If @valp is not NULL the value of the register
51  *	at the time it indicated completion is stored there.  Returns 0 if the
52  *	operation completes and	-EAGAIN	otherwise.
53  */
54 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
55 			int attempts, int delay, u32 *valp)
56 {
57 	while (1) {
58 		u32 val = t3_read_reg(adapter, reg);
59 
60 		if (!!(val & mask) == polarity) {
61 			if (valp)
62 				*valp = val;
63 			return 0;
64 		}
65 		if (--attempts == 0)
66 			return -EAGAIN;
67 		if (delay)
68 			udelay(delay);
69 	}
70 }
71 
72 /**
73  *	t3_write_regs - write a bunch of registers
74  *	@adapter: the adapter to program
75  *	@p: an array of register address/register value pairs
76  *	@n: the number of address/value pairs
77  *	@offset: register address offset
78  *
79  *	Takes an array of register address/register value pairs and writes each
80  *	value to the corresponding register.  Register addresses are adjusted
81  *	by the supplied offset.
82  */
83 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
84 		   unsigned int offset)
85 {
86 	while (n--) {
87 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
88 		p++;
89 	}
90 }
91 
92 /**
93  *	t3_set_reg_field - set a register field to a value
94  *	@adapter: the adapter to program
95  *	@addr: the register address
96  *	@mask: specifies the portion of the register to modify
97  *	@val: the new value for the register field
98  *
99  *	Sets a register field specified by the supplied mask to the
100  *	given value.
101  */
102 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
103 {
104 	u32 v = t3_read_reg(adapter, addr) & ~mask;
105 
106 	t3_write_reg(adapter, addr, v | val);
107 	(void) t3_read_reg(adapter, addr);      /* flush */
108 }
109 
110 /**
111  *	t3_read_indirect - read indirectly addressed registers
112  *	@adap: the adapter
113  *	@addr_reg: register holding the indirect address
114  *	@data_reg: register holding the value of the indirect register
115  *	@vals: where the read register values are stored
116  *	@start_idx: index of first indirect register to read
117  *	@nregs: how many indirect registers to read
118  *
119  *	Reads registers that are accessed indirectly through an address/data
120  *	register pair.
121  */
122 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
123 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
124 		      unsigned int start_idx)
125 {
126 	while (nregs--) {
127 		t3_write_reg(adap, addr_reg, start_idx);
128 		*vals++ = t3_read_reg(adap, data_reg);
129 		start_idx++;
130 	}
131 }
132 
133 /**
134  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
135  *	@mc7: identifies MC7 to read from
136  *	@start: index of first 64-bit word to read
137  *	@n: number of 64-bit words to read
138  *	@buf: where to store the read result
139  *
140  *	Read n 64-bit words from MC7 starting at word start, using backdoor
141  *	accesses.
142  */
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144                    u64 *buf)
145 {
146 	static int shift[] = { 0, 0, 16, 24 };
147 	static int step[]  = { 0, 32, 16, 8 };
148 
149 	unsigned int size64 = mc7->size / 8;  /* # of 64-bit words */
150 	adapter_t *adap = mc7->adapter;
151 
152 	if (start >= size64 || start + n > size64)
153 		return -EINVAL;
154 
155 	start *= (8 << mc7->width);
156 	while (n--) {
157 		int i;
158 		u64 val64 = 0;
159 
160 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 			int attempts = 10;
162 			u32 val;
163 
164 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
165 				       start);
166 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
167 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
168 			while ((val & F_BUSY) && attempts--)
169 				val = t3_read_reg(adap,
170 						  mc7->offset + A_MC7_BD_OP);
171 			if (val & F_BUSY)
172 				return -EIO;
173 
174 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
175 			if (mc7->width == 0) {
176 				val64 = t3_read_reg(adap,
177 						mc7->offset + A_MC7_BD_DATA0);
178 				val64 |= (u64)val << 32;
179 			} else {
180 				if (mc7->width > 1)
181 					val >>= shift[mc7->width];
182 				val64 |= (u64)val << (step[mc7->width] * i);
183 			}
184 			start += 8;
185 		}
186 		*buf++ = val64;
187 	}
188 	return 0;
189 }
190 
191 /*
192  * Low-level I2C read and write routines.  These simply read and write a
193  * single byte with the option of indicating a "continue" if another operation
194  * is to be chained.  Generally most code will use higher-level routines to
195  * read and write to I2C Slave Devices.
196  */
197 #define I2C_ATTEMPTS 100
198 
199 /*
200  * Read an 8-bit value from the I2C bus.  If the "chained" parameter is
201  * non-zero then a STOP bit will not be written after the read command.  On
202  * error (the read timed out, etc.), a negative errno will be returned (e.g.
203  * -EAGAIN, etc.).  On success, the 8-bit value read from the I2C bus is
204  * stored into the buffer *valp and the value of the I2C ACK bit is returned
205  * as a 0/1 value.
206  */
207 int t3_i2c_read8(adapter_t *adapter, int chained, u8 *valp)
208 {
209 	int ret;
210 	u32 opval;
211 	MDIO_LOCK(adapter);
212 	t3_write_reg(adapter, A_I2C_OP,
213 		     F_I2C_READ | (chained ? F_I2C_CONT : 0));
214 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
215 				  I2C_ATTEMPTS, 10, &opval);
216 	if (ret >= 0) {
217 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
218 		*valp = G_I2C_DATA(t3_read_reg(adapter, A_I2C_DATA));
219 	}
220 	MDIO_UNLOCK(adapter);
221 	return ret;
222 }
223 
224 /*
225  * Write an 8-bit value to the I2C bus.  If the "chained" parameter is
226  * non-zero, then a STOP bit will not be written after the write command.  On
227  * error (the write timed out, etc.), a negative errno will be returned (e.g.
228  * -EAGAIN, etc.).  On success, the value of the I2C ACK bit is returned as a
229  * 0/1 value.
230  */
231 int t3_i2c_write8(adapter_t *adapter, int chained, u8 val)
232 {
233 	int ret;
234 	u32 opval;
235 	MDIO_LOCK(adapter);
236 	t3_write_reg(adapter, A_I2C_DATA, V_I2C_DATA(val));
237 	t3_write_reg(adapter, A_I2C_OP,
238 		     F_I2C_WRITE | (chained ? F_I2C_CONT : 0));
239 	ret = t3_wait_op_done_val(adapter, A_I2C_OP, F_I2C_BUSY, 0,
240 				  I2C_ATTEMPTS, 10, &opval);
241 	if (ret >= 0)
242 		ret = ((opval & F_I2C_ACK) == F_I2C_ACK);
243 	MDIO_UNLOCK(adapter);
244 	return ret;
245 }
246 
247 /*
248  * Initialize MI1.
249  */
250 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
251 {
252         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
253         u32 val = F_PREEN | V_CLKDIV(clkdiv);
254 
255         t3_write_reg(adap, A_MI1_CFG, val);
256 }
257 
258 #define MDIO_ATTEMPTS 20
259 
260 /*
261  * MI1 read/write operations for clause 22 PHYs.
262  */
263 int t3_mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
264 		int reg_addr, unsigned int *valp)
265 {
266 	int ret;
267 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
268 
269 	if (mmd_addr)
270 		return -EINVAL;
271 
272 	MDIO_LOCK(adapter);
273 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
274 	t3_write_reg(adapter, A_MI1_ADDR, addr);
275 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
276 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
277 	if (!ret)
278 		*valp = t3_read_reg(adapter, A_MI1_DATA);
279 	MDIO_UNLOCK(adapter);
280 	return ret;
281 }
282 
283 int t3_mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
284 		 int reg_addr, unsigned int val)
285 {
286 	int ret;
287 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
288 
289 	if (mmd_addr)
290 		return -EINVAL;
291 
292 	MDIO_LOCK(adapter);
293 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
294 	t3_write_reg(adapter, A_MI1_ADDR, addr);
295 	t3_write_reg(adapter, A_MI1_DATA, val);
296 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
297 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
298 	MDIO_UNLOCK(adapter);
299 	return ret;
300 }
301 
302 static struct mdio_ops mi1_mdio_ops = {
303 	t3_mi1_read,
304 	t3_mi1_write
305 };
306 
307 /*
308  * MI1 read/write operations for clause 45 PHYs.
309  */
310 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
311 			int reg_addr, unsigned int *valp)
312 {
313 	int ret;
314 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
315 
316 	MDIO_LOCK(adapter);
317 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
318 	t3_write_reg(adapter, A_MI1_ADDR, addr);
319 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
320 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
321 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
322 	if (!ret) {
323 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
324 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
325 				      MDIO_ATTEMPTS, 10);
326 		if (!ret)
327 			*valp = t3_read_reg(adapter, A_MI1_DATA);
328 	}
329 	MDIO_UNLOCK(adapter);
330 	return ret;
331 }
332 
333 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
334 			 int reg_addr, unsigned int val)
335 {
336 	int ret;
337 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
338 
339 	MDIO_LOCK(adapter);
340 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
341 	t3_write_reg(adapter, A_MI1_ADDR, addr);
342 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
343 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
344 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
345 	if (!ret) {
346 		t3_write_reg(adapter, A_MI1_DATA, val);
347 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
348 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
349 				      MDIO_ATTEMPTS, 10);
350 	}
351 	MDIO_UNLOCK(adapter);
352 	return ret;
353 }
354 
355 static struct mdio_ops mi1_mdio_ext_ops = {
356 	mi1_ext_read,
357 	mi1_ext_write
358 };
359 
360 /**
361  *	t3_mdio_change_bits - modify the value of a PHY register
362  *	@phy: the PHY to operate on
363  *	@mmd: the device address
364  *	@reg: the register address
365  *	@clear: what part of the register value to mask off
366  *	@set: what part of the register value to set
367  *
368  *	Changes the value of a PHY register by applying a mask to its current
369  *	value and ORing the result with a new value.
370  */
371 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
372 			unsigned int set)
373 {
374 	int ret;
375 	unsigned int val;
376 
377 	ret = mdio_read(phy, mmd, reg, &val);
378 	if (!ret) {
379 		val &= ~clear;
380 		ret = mdio_write(phy, mmd, reg, val | set);
381 	}
382 	return ret;
383 }
384 
385 /**
386  *	t3_phy_reset - reset a PHY block
387  *	@phy: the PHY to operate on
388  *	@mmd: the device address of the PHY block to reset
389  *	@wait: how long to wait for the reset to complete in 1ms increments
390  *
391  *	Resets a PHY block and optionally waits for the reset to complete.
392  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
393  *	for 10G PHYs.
394  */
395 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
396 {
397 	int err;
398 	unsigned int ctl;
399 
400 	err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
401 	if (err || !wait)
402 		return err;
403 
404 	do {
405 		err = mdio_read(phy, mmd, MII_BMCR, &ctl);
406 		if (err)
407 			return err;
408 		ctl &= BMCR_RESET;
409 		if (ctl)
410 			msleep(1);
411 	} while (ctl && --wait);
412 
413 	return ctl ? -1 : 0;
414 }
415 
416 /**
417  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
418  *	@phy: the PHY to operate on
419  *	@advert: bitmap of capabilities the PHY should advertise
420  *
421  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
422  *	requested capabilities.
423  */
424 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
425 {
426 	int err;
427 	unsigned int val = 0;
428 
429 	err = mdio_read(phy, 0, MII_CTRL1000, &val);
430 	if (err)
431 		return err;
432 
433 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
434 	if (advert & ADVERTISED_1000baseT_Half)
435 		val |= ADVERTISE_1000HALF;
436 	if (advert & ADVERTISED_1000baseT_Full)
437 		val |= ADVERTISE_1000FULL;
438 
439 	err = mdio_write(phy, 0, MII_CTRL1000, val);
440 	if (err)
441 		return err;
442 
443 	val = 1;
444 	if (advert & ADVERTISED_10baseT_Half)
445 		val |= ADVERTISE_10HALF;
446 	if (advert & ADVERTISED_10baseT_Full)
447 		val |= ADVERTISE_10FULL;
448 	if (advert & ADVERTISED_100baseT_Half)
449 		val |= ADVERTISE_100HALF;
450 	if (advert & ADVERTISED_100baseT_Full)
451 		val |= ADVERTISE_100FULL;
452 	if (advert & ADVERTISED_Pause)
453 		val |= ADVERTISE_PAUSE_CAP;
454 	if (advert & ADVERTISED_Asym_Pause)
455 		val |= ADVERTISE_PAUSE_ASYM;
456 	return mdio_write(phy, 0, MII_ADVERTISE, val);
457 }
458 
459 /**
460  *	t3_phy_advertise_fiber - set fiber PHY advertisement register
461  *	@phy: the PHY to operate on
462  *	@advert: bitmap of capabilities the PHY should advertise
463  *
464  *	Sets a fiber PHY's advertisement register to advertise the
465  *	requested capabilities.
466  */
467 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
468 {
469 	unsigned int val = 0;
470 
471 	if (advert & ADVERTISED_1000baseT_Half)
472 		val |= ADVERTISE_1000XHALF;
473 	if (advert & ADVERTISED_1000baseT_Full)
474 		val |= ADVERTISE_1000XFULL;
475 	if (advert & ADVERTISED_Pause)
476 		val |= ADVERTISE_1000XPAUSE;
477 	if (advert & ADVERTISED_Asym_Pause)
478 		val |= ADVERTISE_1000XPSE_ASYM;
479 	return mdio_write(phy, 0, MII_ADVERTISE, val);
480 }
481 
482 /**
483  *	t3_set_phy_speed_duplex - force PHY speed and duplex
484  *	@phy: the PHY to operate on
485  *	@speed: requested PHY speed
486  *	@duplex: requested PHY duplex
487  *
488  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
489  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
490  */
491 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
492 {
493 	int err;
494 	unsigned int ctl;
495 
496 	err = mdio_read(phy, 0, MII_BMCR, &ctl);
497 	if (err)
498 		return err;
499 
500 	if (speed >= 0) {
501 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
502 		if (speed == SPEED_100)
503 			ctl |= BMCR_SPEED100;
504 		else if (speed == SPEED_1000)
505 			ctl |= BMCR_SPEED1000;
506 	}
507 	if (duplex >= 0) {
508 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
509 		if (duplex == DUPLEX_FULL)
510 			ctl |= BMCR_FULLDPLX;
511 	}
512 	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for GigE */
513 		ctl |= BMCR_ANENABLE;
514 	return mdio_write(phy, 0, MII_BMCR, ctl);
515 }
516 
517 int t3_phy_lasi_intr_enable(struct cphy *phy)
518 {
519 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
520 }
521 
522 int t3_phy_lasi_intr_disable(struct cphy *phy)
523 {
524 	return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
525 }
526 
527 int t3_phy_lasi_intr_clear(struct cphy *phy)
528 {
529 	u32 val;
530 
531 	return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
532 }
533 
534 int t3_phy_lasi_intr_handler(struct cphy *phy)
535 {
536 	unsigned int status;
537 	int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
538 
539 	if (err)
540 		return err;
541 	return (status & 1) ?  cphy_cause_link_change : 0;
542 }
543 
544 static struct adapter_info t3_adap_info[] = {
545 	{ 1, 1, 0,
546 	  F_GPIO2_OEN | F_GPIO4_OEN |
547 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
548 	  &mi1_mdio_ops, "Chelsio PE9000" },
549 	{ 1, 1, 0,
550 	  F_GPIO2_OEN | F_GPIO4_OEN |
551 	  F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
552 	  &mi1_mdio_ops, "Chelsio T302" },
553 	{ 1, 0, 0,
554 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
555 	  F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
556 	  { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
557 	  &mi1_mdio_ext_ops, "Chelsio T310" },
558 	{ 1, 1, 0,
559 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
560 	  F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
561 	  F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
562 	  { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
563 	  &mi1_mdio_ext_ops, "Chelsio T320" },
564 	{ 4, 0, 0,
565 	  F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
566 	  F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
567 	  { S_GPIO1, S_GPIO2, S_GPIO3, S_GPIO4 }, SUPPORTED_AUI,
568 	  &mi1_mdio_ops, "Chelsio T304" },
569 	{ 0 },
570 	{ 1, 0, 0,
571 	  F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
572 	  F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
573 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
574 	  &mi1_mdio_ext_ops, "Chelsio T310" },
575 	{ 1, 0, 0,
576 	  F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
577 	  F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
578 	  { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
579 	  &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
580 };
581 
582 /*
583  * Return the adapter_info structure with a given index.  Out-of-range indices
584  * return NULL.
585  */
586 const struct adapter_info *t3_get_adapter_info(unsigned int id)
587 {
588 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
589 }
590 
591 struct port_type_info {
592 	int (*phy_prep)(pinfo_t *pinfo, int phy_addr,
593 			const struct mdio_ops *ops);
594 };
595 
596 static struct port_type_info port_types[] = {
597 	{ NULL },
598 	{ t3_ael1002_phy_prep },
599 	{ t3_vsc8211_phy_prep },
600 	{ t3_mv88e1xxx_phy_prep },
601 	{ t3_xaui_direct_phy_prep },
602 	{ t3_ael2005_phy_prep },
603 	{ t3_qt2045_phy_prep },
604 	{ t3_ael1006_phy_prep },
605 	{ t3_tn1010_phy_prep },
606 	{ t3_aq100x_phy_prep },
607 	{ t3_ael2020_phy_prep },
608 };
609 
610 #define VPD_ENTRY(name, len) \
611 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
612 
613 /*
614  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
615  * VPD-R sections.
616  */
617 struct t3_vpd {
618 	u8  id_tag;
619 	u8  id_len[2];
620 	u8  id_data[16];
621 	u8  vpdr_tag;
622 	u8  vpdr_len[2];
623 	VPD_ENTRY(pn, 16);                     /* part number */
624 	VPD_ENTRY(ec, ECNUM_LEN);              /* EC level */
625 	VPD_ENTRY(sn, SERNUM_LEN);             /* serial number */
626 	VPD_ENTRY(na, 12);                     /* MAC address base */
627 	VPD_ENTRY(cclk, 6);                    /* core clock */
628 	VPD_ENTRY(mclk, 6);                    /* mem clock */
629 	VPD_ENTRY(uclk, 6);                    /* uP clk */
630 	VPD_ENTRY(mdc, 6);                     /* MDIO clk */
631 	VPD_ENTRY(mt, 2);                      /* mem timing */
632 	VPD_ENTRY(xaui0cfg, 6);                /* XAUI0 config */
633 	VPD_ENTRY(xaui1cfg, 6);                /* XAUI1 config */
634 	VPD_ENTRY(port0, 2);                   /* PHY0 complex */
635 	VPD_ENTRY(port1, 2);                   /* PHY1 complex */
636 	VPD_ENTRY(port2, 2);                   /* PHY2 complex */
637 	VPD_ENTRY(port3, 2);                   /* PHY3 complex */
638 	VPD_ENTRY(rv, 1);                      /* csum */
639 	u32 pad;                  /* for multiple-of-4 sizing and alignment */
640 };
641 
642 #define EEPROM_MAX_POLL   40
643 #define EEPROM_STAT_ADDR  0x4000
644 #define VPD_BASE          0xc00
645 
646 /**
647  *	t3_seeprom_read - read a VPD EEPROM location
648  *	@adapter: adapter to read
649  *	@addr: EEPROM address
650  *	@data: where to store the read data
651  *
652  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
653  *	VPD ROM capability.  A zero is written to the flag bit when the
654  *	addres is written to the control register.  The hardware device will
655  *	set the flag to 1 when 4 bytes have been read into the data register.
656  */
657 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
658 {
659 	u16 val;
660 	int attempts = EEPROM_MAX_POLL;
661 	unsigned int base = adapter->params.pci.vpd_cap_addr;
662 
663 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
664 		return -EINVAL;
665 
666 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
667 	do {
668 		udelay(10);
669 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
670 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
671 
672 	if (!(val & PCI_VPD_ADDR_F)) {
673 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
674 		return -EIO;
675 	}
676 	t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
677 	*data = le32_to_cpu(*data);
678 	return 0;
679 }
680 
681 /**
682  *	t3_seeprom_write - write a VPD EEPROM location
683  *	@adapter: adapter to write
684  *	@addr: EEPROM address
685  *	@data: value to write
686  *
687  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
688  *	VPD ROM capability.
689  */
690 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
691 {
692 	u16 val;
693 	int attempts = EEPROM_MAX_POLL;
694 	unsigned int base = adapter->params.pci.vpd_cap_addr;
695 
696 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
697 		return -EINVAL;
698 
699 	t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
700 				 cpu_to_le32(data));
701 	t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
702 				 (u16)addr | PCI_VPD_ADDR_F);
703 	do {
704 		msleep(1);
705 		t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
706 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
707 
708 	if (val & PCI_VPD_ADDR_F) {
709 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
710 		return -EIO;
711 	}
712 	return 0;
713 }
714 
715 /**
716  *	t3_seeprom_wp - enable/disable EEPROM write protection
717  *	@adapter: the adapter
718  *	@enable: 1 to enable write protection, 0 to disable it
719  *
720  *	Enables or disables write protection on the serial EEPROM.
721  */
722 int t3_seeprom_wp(adapter_t *adapter, int enable)
723 {
724 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
725 }
726 
727 /*
728  * Convert a character holding a hex digit to a number.
729  */
730 static unsigned int hex2int(unsigned char c)
731 {
732 	return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
733 }
734 
735 /**
736  * 	get_desc_len - get the length of a vpd descriptor.
737  *	@adapter: the adapter
738  *	@offset: first byte offset of the vpd descriptor
739  *
740  *	Retrieves the length of the small/large resource
741  *	data type starting at offset.
742  */
743 static int get_desc_len(adapter_t *adapter, u32 offset)
744 {
745 	u32 read_offset, tmp, shift, len = 0;
746 	u8 tag, buf[8];
747 	int ret;
748 
749 	read_offset = offset & 0xfffffffc;
750 	shift = offset & 0x03;
751 
752 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
753 	if (ret < 0)
754 		return ret;
755 
756 	*((u32 *)buf) = cpu_to_le32(tmp);
757 
758 	tag = buf[shift];
759 	if (tag & 0x80) {
760 		ret = t3_seeprom_read(adapter, read_offset + 4, &tmp);
761 		if (ret < 0)
762 			return ret;
763 
764 		*((u32 *)(&buf[4])) = cpu_to_le32(tmp);
765 		len = (buf[shift + 1] & 0xff) +
766 		      ((buf[shift+2] << 8) & 0xff00) + 3;
767 	} else
768 		len = (tag & 0x07) + 1;
769 
770 	return len;
771 }
772 
773 /**
774  *	is_end_tag - Check if a vpd tag is the end tag.
775  *	@adapter: the adapter
776  *	@offset: first byte offset of the tag
777  *
778  *	Checks if the tag located at offset is the end tag.
779  */
780 static int is_end_tag(adapter_t * adapter, u32 offset)
781 {
782 	u32 read_offset, shift, ret, tmp;
783 	u8 buf[4];
784 
785 	read_offset = offset & 0xfffffffc;
786 	shift = offset & 0x03;
787 
788 	ret = t3_seeprom_read(adapter, read_offset, &tmp);
789 	if (ret)
790 		return ret;
791 	*((u32 *)buf) = cpu_to_le32(tmp);
792 
793 	if (buf[shift] == 0x78)
794 		return 1;
795 	else
796 		return 0;
797 }
798 
799 /**
800  *	t3_get_vpd_len - computes the length of a vpd structure
801  *	@adapter: the adapter
802  *	@vpd: contains the offset of first byte of vpd
803  *
804  *	Computes the lentgh of the vpd structure starting at vpd->offset.
805  */
806 
807 int t3_get_vpd_len(adapter_t * adapter, struct generic_vpd *vpd)
808 {
809 	u32 len=0, offset;
810 	int inc, ret;
811 
812 	offset = vpd->offset;
813 
814 	while (offset < (vpd->offset + MAX_VPD_BYTES)) {
815 		ret = is_end_tag(adapter, offset);
816 		if (ret < 0)
817 			return ret;
818 		else if (ret == 1)
819 			break;
820 
821 		inc = get_desc_len(adapter, offset);
822 		if (inc < 0)
823 			return inc;
824 		len += inc;
825 		offset += inc;
826 	}
827 	return (len + 1);
828 }
829 
830 /**
831  *	t3_read_vpd - reads the stream of bytes containing a vpd structure
832  *	@adapter: the adapter
833  *	@vpd: contains a buffer that would hold the stream of bytes
834  *
835  *	Reads the vpd structure starting at vpd->offset into vpd->data,
836  *	the length of the byte stream to read is vpd->len.
837  */
838 
839 int t3_read_vpd(adapter_t *adapter, struct generic_vpd *vpd)
840 {
841 	u32 i, ret;
842 
843 	for (i = 0; i < vpd->len; i += 4) {
844 		ret = t3_seeprom_read(adapter, vpd->offset + i,
845 				      (u32 *) &(vpd->data[i]));
846 		if (ret)
847 			return ret;
848 	}
849 
850 	return 0;
851 }
852 
853 
854 /**
855  *	get_vpd_params - read VPD parameters from VPD EEPROM
856  *	@adapter: adapter to read
857  *	@p: where to store the parameters
858  *
859  *	Reads card parameters stored in VPD EEPROM.
860  */
861 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
862 {
863 	int i, addr, ret;
864 	struct t3_vpd vpd;
865 
866 	/*
867 	 * Card information is normally at VPD_BASE but some early cards had
868 	 * it at 0.
869 	 */
870 	ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
871 	if (ret)
872 		return ret;
873 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
874 
875 	for (i = 0; i < sizeof(vpd); i += 4) {
876 		ret = t3_seeprom_read(adapter, addr + i,
877 				      (u32 *)((u8 *)&vpd + i));
878 		if (ret)
879 			return ret;
880 	}
881 
882 	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
883 	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
884 	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
885 	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
886 	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
887 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
888 	memcpy(p->ec, vpd.ec_data, ECNUM_LEN);
889 
890 	/* Old eeproms didn't have port information */
891 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
892 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
893 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
894 	} else {
895 		p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
896 		p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
897 		p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
898 		p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
899 		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
900 		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
901 	}
902 
903 	for (i = 0; i < 6; i++)
904 		p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
905 				 hex2int(vpd.na_data[2 * i + 1]);
906 	return 0;
907 }
908 
909 /* BIOS boot header */
910 typedef struct boot_header_s {
911 	u8	signature[2];	/* signature */
912 	u8	length;		/* image length (include header) */
913 	u8	offset[4];	/* initialization vector */
914 	u8	reserved[19];	/* reserved */
915 	u8	exheader[2];	/* offset to expansion header */
916 } boot_header_t;
917 
918 /* serial flash and firmware constants */
919 enum {
920 	SF_ATTEMPTS = 5,           /* max retries for SF1 operations */
921 	SF_SEC_SIZE = 64 * 1024,   /* serial flash sector size */
922 	SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
923 
924 	/* flash command opcodes */
925 	SF_PROG_PAGE    = 2,       /* program page */
926 	SF_WR_DISABLE   = 4,       /* disable writes */
927 	SF_RD_STATUS    = 5,       /* read status register */
928 	SF_WR_ENABLE    = 6,       /* enable writes */
929 	SF_RD_DATA_FAST = 0xb,     /* read flash */
930 	SF_ERASE_SECTOR = 0xd8,    /* erase sector */
931 
932 	FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
933 	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
934 	FW_VERS_ADDR_PRE8 = 0x77ffc,/* flash address holding FW version pre8 */
935 	FW_MIN_SIZE = 8,           /* at least version and csum */
936 	FW_MAX_SIZE = FW_VERS_ADDR - FW_FLASH_BOOT_ADDR,
937 	FW_MAX_SIZE_PRE8 = FW_VERS_ADDR_PRE8 - FW_FLASH_BOOT_ADDR,
938 
939 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
940 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
941 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
942 	BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
943 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment  */
944 };
945 
946 /**
947  *	sf1_read - read data from the serial flash
948  *	@adapter: the adapter
949  *	@byte_cnt: number of bytes to read
950  *	@cont: whether another operation will be chained
951  *	@valp: where to store the read data
952  *
953  *	Reads up to 4 bytes of data from the serial flash.  The location of
954  *	the read needs to be specified prior to calling this by issuing the
955  *	appropriate commands to the serial flash.
956  */
957 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
958 		    u32 *valp)
959 {
960 	int ret;
961 
962 	if (!byte_cnt || byte_cnt > 4)
963 		return -EINVAL;
964 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
965 		return -EBUSY;
966 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
967 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
968 	if (!ret)
969 		*valp = t3_read_reg(adapter, A_SF_DATA);
970 	return ret;
971 }
972 
973 /**
974  *	sf1_write - write data to the serial flash
975  *	@adapter: the adapter
976  *	@byte_cnt: number of bytes to write
977  *	@cont: whether another operation will be chained
978  *	@val: value to write
979  *
980  *	Writes up to 4 bytes of data to the serial flash.  The location of
981  *	the write needs to be specified prior to calling this by issuing the
982  *	appropriate commands to the serial flash.
983  */
984 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
985 		     u32 val)
986 {
987 	if (!byte_cnt || byte_cnt > 4)
988 		return -EINVAL;
989 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
990 		return -EBUSY;
991 	t3_write_reg(adapter, A_SF_DATA, val);
992 	t3_write_reg(adapter, A_SF_OP,
993 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
994 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
995 }
996 
997 /**
998  *	flash_wait_op - wait for a flash operation to complete
999  *	@adapter: the adapter
1000  *	@attempts: max number of polls of the status register
1001  *	@delay: delay between polls in ms
1002  *
1003  *	Wait for a flash operation to complete by polling the status register.
1004  */
1005 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
1006 {
1007 	int ret;
1008 	u32 status;
1009 
1010 	while (1) {
1011 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
1012 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
1013 			return ret;
1014 		if (!(status & 1))
1015 			return 0;
1016 		if (--attempts == 0)
1017 			return -EAGAIN;
1018 		if (delay)
1019 			msleep(delay);
1020 	}
1021 }
1022 
1023 /**
1024  *	t3_read_flash - read words from serial flash
1025  *	@adapter: the adapter
1026  *	@addr: the start address for the read
1027  *	@nwords: how many 32-bit words to read
1028  *	@data: where to store the read data
1029  *	@byte_oriented: whether to store data as bytes or as words
1030  *
1031  *	Read the specified number of 32-bit words from the serial flash.
1032  *	If @byte_oriented is set the read data is stored as a byte array
1033  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
1034  *	natural endianess.
1035  */
1036 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
1037 		  u32 *data, int byte_oriented)
1038 {
1039 	int ret;
1040 
1041 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
1042 		return -EINVAL;
1043 
1044 	addr = swab32(addr) | SF_RD_DATA_FAST;
1045 
1046 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
1047 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
1048 		return ret;
1049 
1050 	for ( ; nwords; nwords--, data++) {
1051 		ret = sf1_read(adapter, 4, nwords > 1, data);
1052 		if (ret)
1053 			return ret;
1054 		if (byte_oriented)
1055 			*data = htonl(*data);
1056 	}
1057 	return 0;
1058 }
1059 
1060 /**
1061  *	t3_write_flash - write up to a page of data to the serial flash
1062  *	@adapter: the adapter
1063  *	@addr: the start address to write
1064  *	@n: length of data to write
1065  *	@data: the data to write
1066  *	@byte_oriented: whether to store data as bytes or as words
1067  *
1068  *	Writes up to a page of data (256 bytes) to the serial flash starting
1069  *	at the given address.
1070  *	If @byte_oriented is set the write data is stored as a 32-bit
1071  *	big-endian array, otherwise in the processor's native endianess.
1072  *
1073  */
1074 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
1075 			  unsigned int n, const u8 *data,
1076 			  int byte_oriented)
1077 {
1078 	int ret;
1079 	u32 buf[64];
1080 	unsigned int c, left, val, offset = addr & 0xff;
1081 
1082 	if (addr + n > SF_SIZE || offset + n > 256)
1083 		return -EINVAL;
1084 
1085 	val = swab32(addr) | SF_PROG_PAGE;
1086 
1087 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1088 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
1089 		return ret;
1090 
1091 	for (left = n; left; left -= c) {
1092 		c = min(left, 4U);
1093 		val = *(const u32*)data;
1094 		data += c;
1095 		if (byte_oriented)
1096 			val = htonl(val);
1097 
1098 		ret = sf1_write(adapter, c, c != left, val);
1099 		if (ret)
1100 			return ret;
1101 	}
1102 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
1103 		return ret;
1104 
1105 	/* Read the page to verify the write succeeded */
1106 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
1107 			    byte_oriented);
1108 	if (ret)
1109 		return ret;
1110 
1111 	if (memcmp(data - n, (u8 *)buf + offset, n))
1112 		return -EIO;
1113 	return 0;
1114 }
1115 
1116 /**
1117  *	t3_get_tp_version - read the tp sram version
1118  *	@adapter: the adapter
1119  *	@vers: where to place the version
1120  *
1121  *	Reads the protocol sram version from sram.
1122  */
1123 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
1124 {
1125 	int ret;
1126 
1127 	/* Get version loaded in SRAM */
1128 	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
1129 	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
1130 			      1, 1, 5, 1);
1131 	if (ret)
1132 		return ret;
1133 
1134 	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1135 
1136 	return 0;
1137 }
1138 
1139 /**
1140  *	t3_check_tpsram_version - read the tp sram version
1141  *	@adapter: the adapter
1142  *
1143  */
1144 int t3_check_tpsram_version(adapter_t *adapter)
1145 {
1146 	int ret;
1147 	u32 vers;
1148 	unsigned int major, minor;
1149 
1150 	if (adapter->params.rev == T3_REV_A)
1151 		return 0;
1152 
1153 
1154 	ret = t3_get_tp_version(adapter, &vers);
1155 	if (ret)
1156 		return ret;
1157 
1158 	vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
1159 
1160 	major = G_TP_VERSION_MAJOR(vers);
1161 	minor = G_TP_VERSION_MINOR(vers);
1162 
1163 	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1164 		return 0;
1165 	else {
1166 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
1167 		       "driver compiled for version %d.%d\n", major, minor,
1168 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
1169 	}
1170 	return -EINVAL;
1171 }
1172 
1173 /**
1174  *	t3_check_tpsram - check if provided protocol SRAM
1175  *			  is compatible with this driver
1176  *	@adapter: the adapter
1177  *	@tp_sram: the firmware image to write
1178  *	@size: image size
1179  *
1180  *	Checks if an adapter's tp sram is compatible with the driver.
1181  *	Returns 0 if the versions are compatible, a negative error otherwise.
1182  */
1183 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
1184 {
1185 	u32 csum;
1186 	unsigned int i;
1187 	const u32 *p = (const u32 *)tp_sram;
1188 
1189 	/* Verify checksum */
1190 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1191 		csum += ntohl(p[i]);
1192 	if (csum != 0xffffffff) {
1193 		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1194 		       csum);
1195 		return -EINVAL;
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 enum fw_version_type {
1202 	FW_VERSION_N3,
1203 	FW_VERSION_T3
1204 };
1205 
1206 /**
1207  *	t3_get_fw_version - read the firmware version
1208  *	@adapter: the adapter
1209  *	@vers: where to place the version
1210  *
1211  *	Reads the FW version from flash. Note that we had to move the version
1212  *	due to FW size. If we don't find a valid FW version in the new location
1213  *	we fall back and read the old location.
1214  */
1215 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
1216 {
1217 	int ret = t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1218 	if (!ret && *vers != 0xffffffff)
1219 		return 0;
1220 	else
1221 		return t3_read_flash(adapter, FW_VERS_ADDR_PRE8, 1, vers, 0);
1222 }
1223 
1224 /**
1225  *	t3_check_fw_version - check if the FW is compatible with this driver
1226  *	@adapter: the adapter
1227  *
1228  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1229  *	if the versions are compatible, a negative error otherwise.
1230  */
1231 int t3_check_fw_version(adapter_t *adapter)
1232 {
1233 	int ret;
1234 	u32 vers;
1235 	unsigned int type, major, minor;
1236 
1237 	ret = t3_get_fw_version(adapter, &vers);
1238 	if (ret)
1239 		return ret;
1240 
1241 	type = G_FW_VERSION_TYPE(vers);
1242 	major = G_FW_VERSION_MAJOR(vers);
1243 	minor = G_FW_VERSION_MINOR(vers);
1244 
1245 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1246 	    minor == FW_VERSION_MINOR)
1247 		return 0;
1248 
1249 	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1250 		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1251 		        "driver compiled for version %u.%u\n", major, minor,
1252 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1253 	else {
1254 		CH_WARN(adapter, "found newer FW version(%u.%u), "
1255 		        "driver compiled for version %u.%u\n", major, minor,
1256 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1257 			return 0;
1258 	}
1259 	return -EINVAL;
1260 }
1261 
1262 /**
1263  *	t3_flash_erase_sectors - erase a range of flash sectors
1264  *	@adapter: the adapter
1265  *	@start: the first sector to erase
1266  *	@end: the last sector to erase
1267  *
1268  *	Erases the sectors in the given range.
1269  */
1270 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1271 {
1272 	while (start <= end) {
1273 		int ret;
1274 
1275 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1276 		    (ret = sf1_write(adapter, 4, 0,
1277 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1278 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1279 			return ret;
1280 		start++;
1281 	}
1282 	return 0;
1283 }
1284 
1285 /*
1286  *	t3_load_fw - download firmware
1287  *	@adapter: the adapter
1288  *	@fw_data: the firmware image to write
1289  *	@size: image size
1290  *
1291  *	Write the supplied firmware image to the card's serial flash.
1292  *	The FW image has the following sections: @size - 8 bytes of code and
1293  *	data, followed by 4 bytes of FW version, followed by the 32-bit
1294  *	1's complement checksum of the whole image.
1295  */
1296 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1297 {
1298 	u32 version, csum, fw_version_addr;
1299 	unsigned int i;
1300 	const u32 *p = (const u32 *)fw_data;
1301 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1302 
1303 	if ((size & 3) || size < FW_MIN_SIZE)
1304 		return -EINVAL;
1305 	if (size - 8 > FW_MAX_SIZE)
1306 		return -EFBIG;
1307 
1308 	version = ntohl(*(const u32 *)(fw_data + size - 8));
1309 	if (G_FW_VERSION_MAJOR(version) < 8) {
1310 
1311 		fw_version_addr = FW_VERS_ADDR_PRE8;
1312 
1313 		if (size - 8 > FW_MAX_SIZE_PRE8)
1314 			return -EFBIG;
1315 	} else
1316 		fw_version_addr = FW_VERS_ADDR;
1317 
1318 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1319 		csum += ntohl(p[i]);
1320 	if (csum != 0xffffffff) {
1321 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1322 		       csum);
1323 		return -EINVAL;
1324 	}
1325 
1326 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1327 	if (ret)
1328 		goto out;
1329 
1330 	size -= 8;  /* trim off version and checksum */
1331 	for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1332 		unsigned int chunk_size = min(size, 256U);
1333 
1334 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data, 1);
1335 		if (ret)
1336 			goto out;
1337 
1338 		addr += chunk_size;
1339 		fw_data += chunk_size;
1340 		size -= chunk_size;
1341 	}
1342 
1343 	ret = t3_write_flash(adapter, fw_version_addr, 4, fw_data, 1);
1344 out:
1345 	if (ret)
1346 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1347 	return ret;
1348 }
1349 
1350 /*
1351  *	t3_load_boot - download boot flash
1352  *	@adapter: the adapter
1353  *	@boot_data: the boot image to write
1354  *	@size: image size
1355  *
1356  *	Write the supplied boot image to the card's serial flash.
1357  *	The boot image has the following sections: a 28-byte header and the
1358  *	boot image.
1359  */
1360 int t3_load_boot(adapter_t *adapter, u8 *boot_data, unsigned int size)
1361 {
1362 	boot_header_t *header = (boot_header_t *)boot_data;
1363 	int ret;
1364 	unsigned int addr;
1365 	unsigned int boot_sector = BOOT_FLASH_BOOT_ADDR >> 16;
1366 	unsigned int boot_end = (BOOT_FLASH_BOOT_ADDR + size - 1) >> 16;
1367 
1368 	/*
1369 	 * Perform some primitive sanity testing to avoid accidentally
1370 	 * writing garbage over the boot sectors.  We ought to check for
1371 	 * more but it's not worth it for now ...
1372 	 */
1373 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1374 		CH_ERR(adapter, "boot image too small/large\n");
1375 		return -EFBIG;
1376 	}
1377 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE) {
1378 		CH_ERR(adapter, "boot image missing signature\n");
1379 		return -EINVAL;
1380 	}
1381 	if (header->length * BOOT_SIZE_INC != size) {
1382 		CH_ERR(adapter, "boot image header length != image length\n");
1383 		return -EINVAL;
1384 	}
1385 
1386 	ret = t3_flash_erase_sectors(adapter, boot_sector, boot_end);
1387 	if (ret)
1388 		goto out;
1389 
1390 	for (addr = BOOT_FLASH_BOOT_ADDR; size; ) {
1391 		unsigned int chunk_size = min(size, 256U);
1392 
1393 		ret = t3_write_flash(adapter, addr, chunk_size, boot_data, 0);
1394 		if (ret)
1395 			goto out;
1396 
1397 		addr += chunk_size;
1398 		boot_data += chunk_size;
1399 		size -= chunk_size;
1400 	}
1401 
1402 out:
1403 	if (ret)
1404 		CH_ERR(adapter, "boot image download failed, error %d\n", ret);
1405 	return ret;
1406 }
1407 
1408 #define CIM_CTL_BASE 0x2000
1409 
1410 /**
1411  *	t3_cim_ctl_blk_read - read a block from CIM control region
1412  *	@adap: the adapter
1413  *	@addr: the start address within the CIM control region
1414  *	@n: number of words to read
1415  *	@valp: where to store the result
1416  *
1417  *	Reads a block of 4-byte words from the CIM control region.
1418  */
1419 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1420 			unsigned int *valp)
1421 {
1422 	int ret = 0;
1423 
1424 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1425 		return -EBUSY;
1426 
1427 	for ( ; !ret && n--; addr += 4) {
1428 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1429 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1430 				      0, 5, 2);
1431 		if (!ret)
1432 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1433 	}
1434 	return ret;
1435 }
1436 
1437 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1438 			       u32 *rx_hash_high, u32 *rx_hash_low)
1439 {
1440 	/* stop Rx unicast traffic */
1441 	t3_mac_disable_exact_filters(mac);
1442 
1443 	/* stop broadcast, multicast, promiscuous mode traffic */
1444 	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG + mac->offset);
1445 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset,
1446 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1447 			 F_DISBCAST);
1448 
1449 	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH +
1450 	    mac->offset);
1451 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset, 0);
1452 
1453 	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW +
1454 	    mac->offset);
1455 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset, 0);
1456 
1457 	/* Leave time to drain max RX fifo */
1458 	msleep(1);
1459 }
1460 
1461 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1462 			       u32 rx_hash_high, u32 rx_hash_low)
1463 {
1464 	t3_mac_enable_exact_filters(mac);
1465 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG + mac->offset,
1466 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1467 			 rx_cfg);
1468 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH + mac->offset,
1469 	    rx_hash_high);
1470 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW + mac->offset,
1471 	    rx_hash_low);
1472 }
1473 
1474 static int t3_detect_link_fault(adapter_t *adapter, int port_id)
1475 {
1476 	struct port_info *pi = adap2pinfo(adapter, port_id);
1477 	struct cmac *mac = &pi->mac;
1478 	uint32_t rx_cfg, rx_hash_high, rx_hash_low;
1479 	int link_fault;
1480 
1481 	/* stop rx */
1482 	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1483 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1484 
1485 	/* clear status and make sure intr is enabled */
1486 	(void) t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1487 	t3_xgm_intr_enable(adapter, port_id);
1488 
1489 	/* restart rx */
1490 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, F_RXEN);
1491 	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1492 
1493 	link_fault = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1494 	return (link_fault & F_LINKFAULTCHANGE ? 1 : 0);
1495 }
1496 
1497 static void t3_clear_faults(adapter_t *adapter, int port_id)
1498 {
1499 	struct port_info *pi = adap2pinfo(adapter, port_id);
1500 	struct cmac *mac = &pi->mac;
1501 
1502 	if (adapter->params.nports <= 2) {
1503 		t3_xgm_intr_disable(adapter, pi->port_id);
1504 		t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1505 		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, F_XGM_INT);
1506 		t3_set_reg_field(adapter, A_XGM_INT_ENABLE + mac->offset,
1507 				 F_XGM_INT, F_XGM_INT);
1508 		t3_xgm_intr_enable(adapter, pi->port_id);
1509 	}
1510 }
1511 
1512 /**
1513  *	t3_link_changed - handle interface link changes
1514  *	@adapter: the adapter
1515  *	@port_id: the port index that changed link state
1516  *
1517  *	Called when a port's link settings change to propagate the new values
1518  *	to the associated PHY and MAC.  After performing the common tasks it
1519  *	invokes an OS-specific handler.
1520  */
1521 void t3_link_changed(adapter_t *adapter, int port_id)
1522 {
1523 	int link_ok, speed, duplex, fc, link_fault;
1524 	struct port_info *pi = adap2pinfo(adapter, port_id);
1525 	struct cphy *phy = &pi->phy;
1526 	struct cmac *mac = &pi->mac;
1527 	struct link_config *lc = &pi->link_config;
1528 
1529 	link_ok = lc->link_ok;
1530 	speed = lc->speed;
1531 	duplex = lc->duplex;
1532 	fc = lc->fc;
1533 	link_fault = 0;
1534 
1535 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1536 
1537 	if (link_ok == 0)
1538 		pi->link_fault = LF_NO;
1539 
1540 	if (lc->requested_fc & PAUSE_AUTONEG)
1541 		fc &= lc->requested_fc;
1542 	else
1543 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1544 
1545 	/* Update mac speed before checking for link fault. */
1546 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE &&
1547 	    (speed != lc->speed || duplex != lc->duplex || fc != lc->fc))
1548 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1549 
1550 	/*
1551 	 * Check for link faults if any of these is true:
1552 	 * a) A link fault is suspected, and PHY says link ok
1553 	 * b) PHY link transitioned from down -> up
1554 	 */
1555 	if (adapter->params.nports <= 2 &&
1556 	    ((pi->link_fault && link_ok) || (!lc->link_ok && link_ok))) {
1557 
1558 		link_fault = t3_detect_link_fault(adapter, port_id);
1559 		if (link_fault) {
1560 			if (pi->link_fault != LF_YES) {
1561 				mac->stats.link_faults++;
1562 				pi->link_fault = LF_YES;
1563 			}
1564 
1565 			if (uses_xaui(adapter)) {
1566 				if (adapter->params.rev >= T3_REV_C)
1567 					t3c_pcs_force_los(mac);
1568 				else
1569 					t3b_pcs_reset(mac);
1570 			}
1571 
1572 			/* Don't report link up */
1573 			link_ok = 0;
1574 		} else {
1575 			/* clear faults here if this was a false alarm. */
1576 			if (pi->link_fault == LF_MAYBE &&
1577 			    link_ok && lc->link_ok)
1578 				t3_clear_faults(adapter, port_id);
1579 
1580 			pi->link_fault = LF_NO;
1581 		}
1582 	}
1583 
1584 	if (link_ok == lc->link_ok && speed == lc->speed &&
1585 	    duplex == lc->duplex && fc == lc->fc)
1586 		return;                            /* nothing changed */
1587 
1588 	lc->link_ok = (unsigned char)link_ok;
1589 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1590 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1591 	lc->fc = fc;
1592 
1593 	if (link_ok) {
1594 
1595 		/* down -> up, or up -> up with changed settings */
1596 
1597 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1598 
1599 			if (adapter->params.rev >= T3_REV_C)
1600 				t3c_pcs_force_los(mac);
1601 			else
1602 				t3b_pcs_reset(mac);
1603 
1604 			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1605 				     F_TXACTENABLE | F_RXEN);
1606 		}
1607 
1608 		/* disable TX FIFO drain */
1609 		t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + mac->offset,
1610 				 F_ENDROPPKT, 0);
1611 
1612 		t3_mac_enable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1613 		t3_set_reg_field(adapter, A_XGM_STAT_CTRL + mac->offset,
1614 				 F_CLRSTATS, 1);
1615 		t3_clear_faults(adapter, port_id);
1616 
1617 	} else {
1618 
1619 		/* up -> down */
1620 
1621 		if (adapter->params.rev > 0 && uses_xaui(adapter)) {
1622 			t3_write_reg(adapter,
1623 				     A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1624 		}
1625 
1626 		t3_xgm_intr_disable(adapter, pi->port_id);
1627 		if (adapter->params.nports <= 2) {
1628 			t3_set_reg_field(adapter,
1629 					 A_XGM_INT_ENABLE + mac->offset,
1630 					 F_XGM_INT, 0);
1631 
1632 			t3_mac_disable(mac, MAC_DIRECTION_RX);
1633 
1634 			/*
1635 			 * Make sure Tx FIFO continues to drain, even as rxen is
1636 			 * left high to help detect and indicate remote faults.
1637 			 */
1638 			t3_set_reg_field(adapter,
1639 			    A_XGM_TXFIFO_CFG + mac->offset, 0, F_ENDROPPKT);
1640 			t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1641 			t3_write_reg(adapter,
1642 			    A_XGM_TX_CTRL + mac->offset, F_TXEN);
1643 			t3_write_reg(adapter,
1644 			    A_XGM_RX_CTRL + mac->offset, F_RXEN);
1645 		}
1646 	}
1647 
1648 	t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc,
1649 	    mac->was_reset);
1650 	mac->was_reset = 0;
1651 }
1652 
1653 /**
1654  *	t3_link_start - apply link configuration to MAC/PHY
1655  *	@phy: the PHY to setup
1656  *	@mac: the MAC to setup
1657  *	@lc: the requested link configuration
1658  *
1659  *	Set up a port's MAC and PHY according to a desired link configuration.
1660  *	- If the PHY can auto-negotiate first decide what to advertise, then
1661  *	  enable/disable auto-negotiation as desired, and reset.
1662  *	- If the PHY does not auto-negotiate just reset it.
1663  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1664  *	  otherwise do it later based on the outcome of auto-negotiation.
1665  */
1666 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1667 {
1668 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1669 
1670 	lc->link_ok = 0;
1671 	if (lc->supported & SUPPORTED_Autoneg) {
1672 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1673 		if (fc) {
1674 			lc->advertising |= ADVERTISED_Asym_Pause;
1675 			if (fc & PAUSE_RX)
1676 				lc->advertising |= ADVERTISED_Pause;
1677 		}
1678 
1679 		phy->ops->advertise(phy, lc->advertising);
1680 
1681 		if (lc->autoneg == AUTONEG_DISABLE) {
1682 			lc->speed = lc->requested_speed;
1683 			lc->duplex = lc->requested_duplex;
1684 			lc->fc = (unsigned char)fc;
1685 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1686 						   fc);
1687 			/* Also disables autoneg */
1688 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1689 			/* PR 5666. Power phy up when doing an ifup */
1690 			if (!is_10G(phy->adapter))
1691 				phy->ops->power_down(phy, 0);
1692 		} else
1693 			phy->ops->autoneg_enable(phy);
1694 	} else {
1695 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1696 		lc->fc = (unsigned char)fc;
1697 		phy->ops->reset(phy, 0);
1698 	}
1699 	return 0;
1700 }
1701 
1702 /**
1703  *	t3_set_vlan_accel - control HW VLAN extraction
1704  *	@adapter: the adapter
1705  *	@ports: bitmap of adapter ports to operate on
1706  *	@on: enable (1) or disable (0) HW VLAN extraction
1707  *
1708  *	Enables or disables HW extraction of VLAN tags for the given port.
1709  */
1710 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1711 {
1712 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1713 			 ports << S_VLANEXTRACTIONENABLE,
1714 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1715 }
1716 
1717 struct intr_info {
1718 	unsigned int mask;       /* bits to check in interrupt status */
1719 	const char *msg;         /* message to print or NULL */
1720 	short stat_idx;          /* stat counter to increment or -1 */
1721 	unsigned short fatal;    /* whether the condition reported is fatal */
1722 };
1723 
1724 /**
1725  *	t3_handle_intr_status - table driven interrupt handler
1726  *	@adapter: the adapter that generated the interrupt
1727  *	@reg: the interrupt status register to process
1728  *	@mask: a mask to apply to the interrupt status
1729  *	@acts: table of interrupt actions
1730  *	@stats: statistics counters tracking interrupt occurences
1731  *
1732  *	A table driven interrupt handler that applies a set of masks to an
1733  *	interrupt status word and performs the corresponding actions if the
1734  *	interrupts described by the mask have occured.  The actions include
1735  *	optionally printing a warning or alert message, and optionally
1736  *	incrementing a stat counter.  The table is terminated by an entry
1737  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1738  */
1739 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1740 				 unsigned int mask,
1741 				 const struct intr_info *acts,
1742 				 unsigned long *stats)
1743 {
1744 	int fatal = 0;
1745 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1746 
1747 	for ( ; acts->mask; ++acts) {
1748 		if (!(status & acts->mask)) continue;
1749 		if (acts->fatal) {
1750 			fatal++;
1751 			CH_ALERT(adapter, "%s (0x%x)\n",
1752 				 acts->msg, status & acts->mask);
1753 		} else if (acts->msg)
1754 			CH_WARN(adapter, "%s (0x%x)\n",
1755 				acts->msg, status & acts->mask);
1756 		if (acts->stat_idx >= 0)
1757 			stats[acts->stat_idx]++;
1758 	}
1759 	if (status)                           /* clear processed interrupts */
1760 		t3_write_reg(adapter, reg, status);
1761 	return fatal;
1762 }
1763 
1764 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1765 		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1766 		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1767 		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1768 		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1769 		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1770 		       F_HIRCQPARITYERROR)
1771 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1772 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1773 		       F_NFASRCHFAIL)
1774 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1775 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1776 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1777 		       F_TXFIFO_UNDERRUN)
1778 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1779 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1780 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1781 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1782 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1783 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1784 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1785 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1786 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1787 			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1788 			F_TXPARERR | V_BISTERR(M_BISTERR))
1789 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1790 			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1791 			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1792 #define ULPTX_INTR_MASK 0xfc
1793 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1794 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1795 			 F_ZERO_SWITCH_ERROR)
1796 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1797 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1798 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1799 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1800 		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1801 		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1802 		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1803 		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1804 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1805 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1806 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1807 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1808 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1809 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1810 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1811 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1812 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1813 		       V_MCAPARERRENB(M_MCAPARERRENB))
1814 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1815 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1816 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1817 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1818 		      F_MPS0 | F_CPL_SWITCH)
1819 /*
1820  * Interrupt handler for the PCIX1 module.
1821  */
1822 static void pci_intr_handler(adapter_t *adapter)
1823 {
1824 	static struct intr_info pcix1_intr_info[] = {
1825 		{ F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1826 		{ F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1827 		{ F_RCVTARABT, "PCI received target abort", -1, 1 },
1828 		{ F_RCVMSTABT, "PCI received master abort", -1, 1 },
1829 		{ F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1830 		{ F_DETPARERR, "PCI detected parity error", -1, 1 },
1831 		{ F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1832 		{ F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1833 		{ F_RCVSPLCMPERR, "PCI received split completion error", -1,
1834 		  1 },
1835 		{ F_DETCORECCERR, "PCI correctable ECC error",
1836 		  STAT_PCI_CORR_ECC, 0 },
1837 		{ F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1838 		{ F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1839 		{ V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1840 		  1 },
1841 		{ V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1842 		  1 },
1843 		{ V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1844 		  1 },
1845 		{ V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1846 		  "error", -1, 1 },
1847 		{ 0 }
1848 	};
1849 
1850 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1851 				  pcix1_intr_info, adapter->irq_stats))
1852 		t3_fatal_err(adapter);
1853 }
1854 
1855 /*
1856  * Interrupt handler for the PCIE module.
1857  */
1858 static void pcie_intr_handler(adapter_t *adapter)
1859 {
1860 	static struct intr_info pcie_intr_info[] = {
1861 		{ F_PEXERR, "PCI PEX error", -1, 1 },
1862 		{ F_UNXSPLCPLERRR,
1863 		  "PCI unexpected split completion DMA read error", -1, 1 },
1864 		{ F_UNXSPLCPLERRC,
1865 		  "PCI unexpected split completion DMA command error", -1, 1 },
1866 		{ F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1867 		{ F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1868 		{ F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1869 		{ F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1870 		{ V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1871 		  "PCI MSI-X table/PBA parity error", -1, 1 },
1872 		{ F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1 },
1873 		{ F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1 },
1874 		{ F_RXPARERR, "PCI Rx parity error", -1, 1 },
1875 		{ F_TXPARERR, "PCI Tx parity error", -1, 1 },
1876 		{ V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1877 		{ 0 }
1878 	};
1879 
1880 	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1881 		CH_ALERT(adapter, "PEX error code 0x%x\n",
1882 			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1883 
1884 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1885 				  pcie_intr_info, adapter->irq_stats))
1886 		t3_fatal_err(adapter);
1887 }
1888 
1889 /*
1890  * TP interrupt handler.
1891  */
1892 static void tp_intr_handler(adapter_t *adapter)
1893 {
1894 	static struct intr_info tp_intr_info[] = {
1895 		{ 0xffffff,  "TP parity error", -1, 1 },
1896 		{ 0x1000000, "TP out of Rx pages", -1, 1 },
1897 		{ 0x2000000, "TP out of Tx pages", -1, 1 },
1898 		{ 0 }
1899 	};
1900 	static struct intr_info tp_intr_info_t3c[] = {
1901 		{ 0x1fffffff,  "TP parity error", -1, 1 },
1902 		{ F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1 },
1903 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1904 		{ 0 }
1905 	};
1906 
1907 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1908 				  adapter->params.rev < T3_REV_C ?
1909 					tp_intr_info : tp_intr_info_t3c, NULL))
1910 		t3_fatal_err(adapter);
1911 }
1912 
1913 /*
1914  * CIM interrupt handler.
1915  */
1916 static void cim_intr_handler(adapter_t *adapter)
1917 {
1918 	static struct intr_info cim_intr_info[] = {
1919 		{ F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1920 		{ F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1921 		{ F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1922 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1923 		{ F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1924 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1925 		{ F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1926 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1927 		{ F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1928 		{ F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1929 		{ F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1930 		{ F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1931 		{ F_DRAMPARERR, "CIM DRAM parity error", -1, 1 },
1932 		{ F_ICACHEPARERR, "CIM icache parity error", -1, 1 },
1933 		{ F_DCACHEPARERR, "CIM dcache parity error", -1, 1 },
1934 		{ F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1 },
1935 		{ F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1 },
1936 		{ F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1 },
1937 		{ F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1 },
1938 		{ F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1 },
1939 		{ F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1 },
1940 		{ F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1 },
1941 		{ F_ITAGPARERR, "CIM itag parity error", -1, 1 },
1942 		{ F_DTAGPARERR, "CIM dtag parity error", -1, 1 },
1943 		{ 0 }
1944         };
1945 
1946 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, CIM_INTR_MASK,
1947 				  cim_intr_info, NULL))
1948 		t3_fatal_err(adapter);
1949 }
1950 
1951 /*
1952  * ULP RX interrupt handler.
1953  */
1954 static void ulprx_intr_handler(adapter_t *adapter)
1955 {
1956 	static struct intr_info ulprx_intr_info[] = {
1957 		{ F_PARERRDATA, "ULP RX data parity error", -1, 1 },
1958 		{ F_PARERRPCMD, "ULP RX command parity error", -1, 1 },
1959 		{ F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1 },
1960 		{ F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1 },
1961 		{ F_ARBFPERR, "ULP RX ArbF parity error", -1, 1 },
1962 		{ F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1 },
1963 		{ F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1 },
1964 		{ F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1 },
1965 		{ 0 }
1966         };
1967 
1968 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1969 				  ulprx_intr_info, NULL))
1970 		t3_fatal_err(adapter);
1971 }
1972 
1973 /*
1974  * ULP TX interrupt handler.
1975  */
1976 static void ulptx_intr_handler(adapter_t *adapter)
1977 {
1978 	static struct intr_info ulptx_intr_info[] = {
1979 		{ F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1980 		  STAT_ULP_CH0_PBL_OOB, 0 },
1981 		{ F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1982 		  STAT_ULP_CH1_PBL_OOB, 0 },
1983 		{ 0xfc, "ULP TX parity error", -1, 1 },
1984 		{ 0 }
1985         };
1986 
1987 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1988 				  ulptx_intr_info, adapter->irq_stats))
1989 		t3_fatal_err(adapter);
1990 }
1991 
1992 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1993 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1994 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1995 	F_ICSPI1_TX_FRAMING_ERROR)
1996 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1997 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1998 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1999 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
2000 
2001 /*
2002  * PM TX interrupt handler.
2003  */
2004 static void pmtx_intr_handler(adapter_t *adapter)
2005 {
2006 	static struct intr_info pmtx_intr_info[] = {
2007 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2008 		{ ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
2009 		{ OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
2010 		{ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
2011 		  "PMTX ispi parity error", -1, 1 },
2012 		{ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
2013 		  "PMTX ospi parity error", -1, 1 },
2014 		{ 0 }
2015         };
2016 
2017 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
2018 				  pmtx_intr_info, NULL))
2019 		t3_fatal_err(adapter);
2020 }
2021 
2022 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
2023 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
2024 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
2025 	F_IESPI1_TX_FRAMING_ERROR)
2026 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
2027 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
2028 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
2029 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
2030 
2031 /*
2032  * PM RX interrupt handler.
2033  */
2034 static void pmrx_intr_handler(adapter_t *adapter)
2035 {
2036 	static struct intr_info pmrx_intr_info[] = {
2037 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2038 		{ IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
2039 		{ OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
2040 		{ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
2041 		  "PMRX ispi parity error", -1, 1 },
2042 		{ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
2043 		  "PMRX ospi parity error", -1, 1 },
2044 		{ 0 }
2045         };
2046 
2047 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
2048 				  pmrx_intr_info, NULL))
2049 		t3_fatal_err(adapter);
2050 }
2051 
2052 /*
2053  * CPL switch interrupt handler.
2054  */
2055 static void cplsw_intr_handler(adapter_t *adapter)
2056 {
2057 	static struct intr_info cplsw_intr_info[] = {
2058 		{ F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1 },
2059 		{ F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
2060 		{ F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
2061 		{ F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
2062 		{ F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
2063 		{ F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
2064 		{ 0 }
2065         };
2066 
2067 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
2068 				  cplsw_intr_info, NULL))
2069 		t3_fatal_err(adapter);
2070 }
2071 
2072 /*
2073  * MPS interrupt handler.
2074  */
2075 static void mps_intr_handler(adapter_t *adapter)
2076 {
2077 	static struct intr_info mps_intr_info[] = {
2078 		{ 0x1ff, "MPS parity error", -1, 1 },
2079 		{ 0 }
2080 	};
2081 
2082 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
2083 				  mps_intr_info, NULL))
2084 		t3_fatal_err(adapter);
2085 }
2086 
2087 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
2088 
2089 /*
2090  * MC7 interrupt handler.
2091  */
2092 static void mc7_intr_handler(struct mc7 *mc7)
2093 {
2094 	adapter_t *adapter = mc7->adapter;
2095 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
2096 
2097 	if (cause & F_CE) {
2098 		mc7->stats.corr_err++;
2099 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
2100 			"data 0x%x 0x%x 0x%x\n", mc7->name,
2101 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
2102 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
2103 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
2104 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
2105 	}
2106 
2107 	if (cause & F_UE) {
2108 		mc7->stats.uncorr_err++;
2109 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
2110 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
2111 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
2112 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
2113 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
2114 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
2115 	}
2116 
2117 	if (G_PE(cause)) {
2118 		mc7->stats.parity_err++;
2119 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
2120 			 mc7->name, G_PE(cause));
2121 	}
2122 
2123 	if (cause & F_AE) {
2124 		u32 addr = 0;
2125 
2126 		if (adapter->params.rev > 0)
2127 			addr = t3_read_reg(adapter,
2128 					   mc7->offset + A_MC7_ERR_ADDR);
2129 		mc7->stats.addr_err++;
2130 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
2131 			 mc7->name, addr);
2132 	}
2133 
2134 	if (cause & MC7_INTR_FATAL)
2135 		t3_fatal_err(adapter);
2136 
2137 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
2138 }
2139 
2140 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
2141 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
2142 /*
2143  * XGMAC interrupt handler.
2144  */
2145 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
2146 {
2147 	u32 cause;
2148 	struct port_info *pi;
2149 	struct cmac *mac;
2150 
2151 	idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
2152 	pi = adap2pinfo(adap, idx);
2153 	mac = &pi->mac;
2154 
2155 	/*
2156 	 * We mask out interrupt causes for which we're not taking interrupts.
2157 	 * This allows us to use polling logic to monitor some of the other
2158 	 * conditions when taking interrupts would impose too much load on the
2159 	 * system.
2160 	 */
2161 	cause = (t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset)
2162 		 & ~(F_RXFIFO_OVERFLOW));
2163 
2164 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
2165 		mac->stats.tx_fifo_parity_err++;
2166 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
2167 	}
2168 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
2169 		mac->stats.rx_fifo_parity_err++;
2170 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
2171 	}
2172 	if (cause & F_TXFIFO_UNDERRUN)
2173 		mac->stats.tx_fifo_urun++;
2174 	if (cause & F_RXFIFO_OVERFLOW)
2175 		mac->stats.rx_fifo_ovfl++;
2176 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
2177 		mac->stats.serdes_signal_loss++;
2178 	if (cause & F_XAUIPCSCTCERR)
2179 		mac->stats.xaui_pcs_ctc_err++;
2180 	if (cause & F_XAUIPCSALIGNCHANGE)
2181 		mac->stats.xaui_pcs_align_change++;
2182 	if (cause & F_XGM_INT &
2183 	    t3_read_reg(adap, A_XGM_INT_ENABLE + mac->offset)) {
2184 		t3_set_reg_field(adap, A_XGM_INT_ENABLE + mac->offset,
2185 		    F_XGM_INT, 0);
2186 
2187 		/* link fault suspected */
2188 		pi->link_fault = LF_MAYBE;
2189 		t3_os_link_intr(pi);
2190 	}
2191 
2192 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
2193 
2194 	if (cause & XGM_INTR_FATAL)
2195 		t3_fatal_err(adap);
2196 
2197 	return cause != 0;
2198 }
2199 
2200 /*
2201  * Interrupt handler for PHY events.
2202  */
2203 static int phy_intr_handler(adapter_t *adapter)
2204 {
2205 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
2206 
2207 	for_each_port(adapter, i) {
2208 		struct port_info *p = adap2pinfo(adapter, i);
2209 
2210 		if (!(p->phy.caps & SUPPORTED_IRQ))
2211 			continue;
2212 
2213 		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
2214 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
2215 
2216 			if (phy_cause & cphy_cause_link_change)
2217 				t3_os_link_intr(p);
2218 			if (phy_cause & cphy_cause_fifo_error)
2219 				p->phy.fifo_errors++;
2220 			if (phy_cause & cphy_cause_module_change)
2221 				t3_os_phymod_changed(adapter, i);
2222 			if (phy_cause & cphy_cause_alarm)
2223 				CH_WARN(adapter, "Operation affected due to "
2224 				    "adverse environment.  Check the spec "
2225 				    "sheet for corrective action.");
2226 		}
2227 	}
2228 
2229 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
2230 	return 0;
2231 }
2232 
2233 /**
2234  *	t3_slow_intr_handler - control path interrupt handler
2235  *	@adapter: the adapter
2236  *
2237  *	T3 interrupt handler for non-data interrupt events, e.g., errors.
2238  *	The designation 'slow' is because it involves register reads, while
2239  *	data interrupts typically don't involve any MMIOs.
2240  */
2241 int t3_slow_intr_handler(adapter_t *adapter)
2242 {
2243 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
2244 
2245 	cause &= adapter->slow_intr_mask;
2246 	if (!cause)
2247 		return 0;
2248 	if (cause & F_PCIM0) {
2249 		if (is_pcie(adapter))
2250 			pcie_intr_handler(adapter);
2251 		else
2252 			pci_intr_handler(adapter);
2253 	}
2254 	if (cause & F_SGE3)
2255 		t3_sge_err_intr_handler(adapter);
2256 	if (cause & F_MC7_PMRX)
2257 		mc7_intr_handler(&adapter->pmrx);
2258 	if (cause & F_MC7_PMTX)
2259 		mc7_intr_handler(&adapter->pmtx);
2260 	if (cause & F_MC7_CM)
2261 		mc7_intr_handler(&adapter->cm);
2262 	if (cause & F_CIM)
2263 		cim_intr_handler(adapter);
2264 	if (cause & F_TP1)
2265 		tp_intr_handler(adapter);
2266 	if (cause & F_ULP2_RX)
2267 		ulprx_intr_handler(adapter);
2268 	if (cause & F_ULP2_TX)
2269 		ulptx_intr_handler(adapter);
2270 	if (cause & F_PM1_RX)
2271 		pmrx_intr_handler(adapter);
2272 	if (cause & F_PM1_TX)
2273 		pmtx_intr_handler(adapter);
2274 	if (cause & F_CPL_SWITCH)
2275 		cplsw_intr_handler(adapter);
2276 	if (cause & F_MPS0)
2277 		mps_intr_handler(adapter);
2278 	if (cause & F_MC5A)
2279 		t3_mc5_intr_handler(&adapter->mc5);
2280 	if (cause & F_XGMAC0_0)
2281 		mac_intr_handler(adapter, 0);
2282 	if (cause & F_XGMAC0_1)
2283 		mac_intr_handler(adapter, 1);
2284 	if (cause & F_T3DBG)
2285 		phy_intr_handler(adapter);
2286 
2287 	/* Clear the interrupts just processed. */
2288 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
2289 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2290 	return 1;
2291 }
2292 
2293 static unsigned int calc_gpio_intr(adapter_t *adap)
2294 {
2295 	unsigned int i, gpi_intr = 0;
2296 
2297 	for_each_port(adap, i)
2298 		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
2299 		    adapter_info(adap)->gpio_intr[i])
2300 			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
2301 	return gpi_intr;
2302 }
2303 
2304 /**
2305  *	t3_intr_enable - enable interrupts
2306  *	@adapter: the adapter whose interrupts should be enabled
2307  *
2308  *	Enable interrupts by setting the interrupt enable registers of the
2309  *	various HW modules and then enabling the top-level interrupt
2310  *	concentrator.
2311  */
2312 void t3_intr_enable(adapter_t *adapter)
2313 {
2314 	static struct addr_val_pair intr_en_avp[] = {
2315 		{ A_MC7_INT_ENABLE, MC7_INTR_MASK },
2316 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2317 			MC7_INTR_MASK },
2318 		{ A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2319 			MC7_INTR_MASK },
2320 		{ A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
2321 		{ A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
2322 		{ A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
2323 		{ A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
2324 		{ A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
2325 		{ A_MPS_INT_ENABLE, MPS_INTR_MASK },
2326 	};
2327 
2328 	adapter->slow_intr_mask = PL_INTR_MASK;
2329 
2330 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2331 	t3_write_reg(adapter, A_TP_INT_ENABLE,
2332 		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2333 	t3_write_reg(adapter, A_SG_INT_ENABLE, SGE_INTR_MASK);
2334 
2335 	if (adapter->params.rev > 0) {
2336 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2337 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2338 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2339 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2340 			     F_PBL_BOUND_ERR_CH1);
2341 	} else {
2342 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2343 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2344 	}
2345 
2346 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2347 
2348 	if (is_pcie(adapter))
2349 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2350 	else
2351 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2352 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2353 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);          /* flush */
2354 }
2355 
2356 /**
2357  *	t3_intr_disable - disable a card's interrupts
2358  *	@adapter: the adapter whose interrupts should be disabled
2359  *
2360  *	Disable interrupts.  We only disable the top-level interrupt
2361  *	concentrator and the SGE data interrupts.
2362  */
2363 void t3_intr_disable(adapter_t *adapter)
2364 {
2365 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2366 	(void) t3_read_reg(adapter, A_PL_INT_ENABLE0);  /* flush */
2367 	adapter->slow_intr_mask = 0;
2368 }
2369 
2370 /**
2371  *	t3_intr_clear - clear all interrupts
2372  *	@adapter: the adapter whose interrupts should be cleared
2373  *
2374  *	Clears all interrupts.
2375  */
2376 void t3_intr_clear(adapter_t *adapter)
2377 {
2378 	static const unsigned int cause_reg_addr[] = {
2379 		A_SG_INT_CAUSE,
2380 		A_SG_RSPQ_FL_STATUS,
2381 		A_PCIX_INT_CAUSE,
2382 		A_MC7_INT_CAUSE,
2383 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2384 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2385 		A_CIM_HOST_INT_CAUSE,
2386 		A_TP_INT_CAUSE,
2387 		A_MC5_DB_INT_CAUSE,
2388 		A_ULPRX_INT_CAUSE,
2389 		A_ULPTX_INT_CAUSE,
2390 		A_CPL_INTR_CAUSE,
2391 		A_PM1_TX_INT_CAUSE,
2392 		A_PM1_RX_INT_CAUSE,
2393 		A_MPS_INT_CAUSE,
2394 		A_T3DBG_INT_CAUSE,
2395 	};
2396 	unsigned int i;
2397 
2398 	/* Clear PHY and MAC interrupts for each port. */
2399 	for_each_port(adapter, i)
2400 		t3_port_intr_clear(adapter, i);
2401 
2402 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2403 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2404 
2405 	if (is_pcie(adapter))
2406 		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2407 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2408 	(void) t3_read_reg(adapter, A_PL_INT_CAUSE0);          /* flush */
2409 }
2410 
2411 void t3_xgm_intr_enable(adapter_t *adapter, int idx)
2412 {
2413 	struct port_info *pi = adap2pinfo(adapter, idx);
2414 
2415 	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2416 		     XGM_EXTRA_INTR_MASK);
2417 }
2418 
2419 void t3_xgm_intr_disable(adapter_t *adapter, int idx)
2420 {
2421 	struct port_info *pi = adap2pinfo(adapter, idx);
2422 
2423 	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2424 		     0x7ff);
2425 }
2426 
2427 /**
2428  *	t3_port_intr_enable - enable port-specific interrupts
2429  *	@adapter: associated adapter
2430  *	@idx: index of port whose interrupts should be enabled
2431  *
2432  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2433  *	adapter port.
2434  */
2435 void t3_port_intr_enable(adapter_t *adapter, int idx)
2436 {
2437 	struct port_info *pi = adap2pinfo(adapter, idx);
2438 
2439 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
2440 	pi->phy.ops->intr_enable(&pi->phy);
2441 }
2442 
2443 /**
2444  *	t3_port_intr_disable - disable port-specific interrupts
2445  *	@adapter: associated adapter
2446  *	@idx: index of port whose interrupts should be disabled
2447  *
2448  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2449  *	adapter port.
2450  */
2451 void t3_port_intr_disable(adapter_t *adapter, int idx)
2452 {
2453 	struct port_info *pi = adap2pinfo(adapter, idx);
2454 
2455 	t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
2456 	pi->phy.ops->intr_disable(&pi->phy);
2457 }
2458 
2459 /**
2460  *	t3_port_intr_clear - clear port-specific interrupts
2461  *	@adapter: associated adapter
2462  *	@idx: index of port whose interrupts to clear
2463  *
2464  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2465  *	adapter port.
2466  */
2467 void t3_port_intr_clear(adapter_t *adapter, int idx)
2468 {
2469 	struct port_info *pi = adap2pinfo(adapter, idx);
2470 
2471 	t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
2472 	pi->phy.ops->intr_clear(&pi->phy);
2473 }
2474 
2475 #define SG_CONTEXT_CMD_ATTEMPTS 100
2476 
2477 /**
2478  * 	t3_sge_write_context - write an SGE context
2479  * 	@adapter: the adapter
2480  * 	@id: the context id
2481  * 	@type: the context type
2482  *
2483  * 	Program an SGE context with the values already loaded in the
2484  * 	CONTEXT_DATA? registers.
2485  */
2486 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
2487 				unsigned int type)
2488 {
2489 	if (type == F_RESPONSEQ) {
2490 		/*
2491 		 * Can't write the Response Queue Context bits for
2492 		 * Interrupt Armed or the Reserve bits after the chip
2493 		 * has been initialized out of reset.  Writing to these
2494 		 * bits can confuse the hardware.
2495 		 */
2496 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2497 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2498 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2499 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2500 	} else {
2501 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2502 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2503 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2504 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2505 	}
2506 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2507 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2508 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2509 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2510 }
2511 
2512 /**
2513  *	clear_sge_ctxt - completely clear an SGE context
2514  *	@adapter: the adapter
2515  *	@id: the context id
2516  *	@type: the context type
2517  *
2518  *	Completely clear an SGE context.  Used predominantly at post-reset
2519  *	initialization.  Note in particular that we don't skip writing to any
2520  *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2521  *	does ...
2522  */
2523 static int clear_sge_ctxt(adapter_t *adap, unsigned int id, unsigned int type)
2524 {
2525 	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2526 	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2527 	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2528 	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2529 	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2530 	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2531 	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2532 	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2533 	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2534 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2535 	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2536 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2537 }
2538 
2539 /**
2540  *	t3_sge_init_ecntxt - initialize an SGE egress context
2541  *	@adapter: the adapter to configure
2542  *	@id: the context id
2543  *	@gts_enable: whether to enable GTS for the context
2544  *	@type: the egress context type
2545  *	@respq: associated response queue
2546  *	@base_addr: base address of queue
2547  *	@size: number of queue entries
2548  *	@token: uP token
2549  *	@gen: initial generation value for the context
2550  *	@cidx: consumer pointer
2551  *
2552  *	Initialize an SGE egress context and make it ready for use.  If the
2553  *	platform allows concurrent context operations, the caller is
2554  *	responsible for appropriate locking.
2555  */
2556 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2557 		       enum sge_context_type type, int respq, u64 base_addr,
2558 		       unsigned int size, unsigned int token, int gen,
2559 		       unsigned int cidx)
2560 {
2561 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2562 
2563 	if (base_addr & 0xfff)     /* must be 4K aligned */
2564 		return -EINVAL;
2565 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2566 		return -EBUSY;
2567 
2568 	base_addr >>= 12;
2569 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2570 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2571 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2572 		     V_EC_BASE_LO((u32)base_addr & 0xffff));
2573 	base_addr >>= 16;
2574 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
2575 	base_addr >>= 32;
2576 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2577 		     V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
2578 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2579 		     F_EC_VALID);
2580 	return t3_sge_write_context(adapter, id, F_EGRESS);
2581 }
2582 
2583 /**
2584  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2585  *	@adapter: the adapter to configure
2586  *	@id: the context id
2587  *	@gts_enable: whether to enable GTS for the context
2588  *	@base_addr: base address of queue
2589  *	@size: number of queue entries
2590  *	@bsize: size of each buffer for this queue
2591  *	@cong_thres: threshold to signal congestion to upstream producers
2592  *	@gen: initial generation value for the context
2593  *	@cidx: consumer pointer
2594  *
2595  *	Initialize an SGE free list context and make it ready for use.  The
2596  *	caller is responsible for ensuring only one context operation occurs
2597  *	at a time.
2598  */
2599 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
2600 			u64 base_addr, unsigned int size, unsigned int bsize,
2601 			unsigned int cong_thres, int gen, unsigned int cidx)
2602 {
2603 	if (base_addr & 0xfff)     /* must be 4K aligned */
2604 		return -EINVAL;
2605 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2606 		return -EBUSY;
2607 
2608 	base_addr >>= 12;
2609 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
2610 	base_addr >>= 32;
2611 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2612 		     V_FL_BASE_HI((u32)base_addr) |
2613 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2614 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2615 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2616 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2617 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2618 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2619 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2620 	return t3_sge_write_context(adapter, id, F_FREELIST);
2621 }
2622 
2623 /**
2624  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2625  *	@adapter: the adapter to configure
2626  *	@id: the context id
2627  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2628  *	@base_addr: base address of queue
2629  *	@size: number of queue entries
2630  *	@fl_thres: threshold for selecting the normal or jumbo free list
2631  *	@gen: initial generation value for the context
2632  *	@cidx: consumer pointer
2633  *
2634  *	Initialize an SGE response queue context and make it ready for use.
2635  *	The caller is responsible for ensuring only one context operation
2636  *	occurs at a time.
2637  */
2638 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
2639 			 u64 base_addr, unsigned int size,
2640 			 unsigned int fl_thres, int gen, unsigned int cidx)
2641 {
2642 	unsigned int ctrl, intr = 0;
2643 
2644 	if (base_addr & 0xfff)     /* must be 4K aligned */
2645 		return -EINVAL;
2646 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2647 		return -EBUSY;
2648 
2649 	base_addr >>= 12;
2650 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2651 		     V_CQ_INDEX(cidx));
2652 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2653 	base_addr >>= 32;
2654         ctrl = t3_read_reg(adapter, A_SG_CONTROL);
2655         if ((irq_vec_idx > 0) ||
2656 		((irq_vec_idx == 0) && !(ctrl & F_ONEINTMULTQ)))
2657                 	intr = F_RQ_INTR_EN;
2658         if (irq_vec_idx >= 0)
2659                 intr |= V_RQ_MSI_VEC(irq_vec_idx);
2660 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2661 		     V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2662 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2663 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2664 }
2665 
2666 /**
2667  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2668  *	@adapter: the adapter to configure
2669  *	@id: the context id
2670  *	@base_addr: base address of queue
2671  *	@size: number of queue entries
2672  *	@rspq: response queue for async notifications
2673  *	@ovfl_mode: CQ overflow mode
2674  *	@credits: completion queue credits
2675  *	@credit_thres: the credit threshold
2676  *
2677  *	Initialize an SGE completion queue context and make it ready for use.
2678  *	The caller is responsible for ensuring only one context operation
2679  *	occurs at a time.
2680  */
2681 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2682 			unsigned int size, int rspq, int ovfl_mode,
2683 			unsigned int credits, unsigned int credit_thres)
2684 {
2685 	if (base_addr & 0xfff)     /* must be 4K aligned */
2686 		return -EINVAL;
2687 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2688 		return -EBUSY;
2689 
2690 	base_addr >>= 12;
2691 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2692 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2693 	base_addr >>= 32;
2694 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2695 		     V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2696 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2697 		     V_CQ_ERR(ovfl_mode));
2698 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2699 		     V_CQ_CREDIT_THRES(credit_thres));
2700 	return t3_sge_write_context(adapter, id, F_CQ);
2701 }
2702 
2703 /**
2704  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2705  *	@adapter: the adapter
2706  *	@id: the egress context id
2707  *	@enable: enable (1) or disable (0) the context
2708  *
2709  *	Enable or disable an SGE egress context.  The caller is responsible for
2710  *	ensuring only one context operation occurs at a time.
2711  */
2712 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2713 {
2714 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2715 		return -EBUSY;
2716 
2717 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2718 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2719 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2720 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2721 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2722 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2723 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2724 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2725 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2726 }
2727 
2728 /**
2729  *	t3_sge_disable_fl - disable an SGE free-buffer list
2730  *	@adapter: the adapter
2731  *	@id: the free list context id
2732  *
2733  *	Disable an SGE free-buffer list.  The caller is responsible for
2734  *	ensuring only one context operation occurs at a time.
2735  */
2736 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2737 {
2738 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2739 		return -EBUSY;
2740 
2741 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2742 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2743 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2744 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2745 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2746 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2747 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2748 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2749 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2750 }
2751 
2752 /**
2753  *	t3_sge_disable_rspcntxt - disable an SGE response queue
2754  *	@adapter: the adapter
2755  *	@id: the response queue context id
2756  *
2757  *	Disable an SGE response queue.  The caller is responsible for
2758  *	ensuring only one context operation occurs at a time.
2759  */
2760 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2761 {
2762 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2763 		return -EBUSY;
2764 
2765 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2766 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2767 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2768 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2769 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2770 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2771 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2772 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2773 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2774 }
2775 
2776 /**
2777  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2778  *	@adapter: the adapter
2779  *	@id: the completion queue context id
2780  *
2781  *	Disable an SGE completion queue.  The caller is responsible for
2782  *	ensuring only one context operation occurs at a time.
2783  */
2784 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2785 {
2786 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2787 		return -EBUSY;
2788 
2789 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2790 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2791 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2792 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2793 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2794 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2795 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2796 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2797 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2798 }
2799 
2800 /**
2801  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2802  *	@adapter: the adapter
2803  *	@id: the context id
2804  *	@op: the operation to perform
2805  *	@credits: credits to return to the CQ
2806  *
2807  *	Perform the selected operation on an SGE completion queue context.
2808  *	The caller is responsible for ensuring only one context operation
2809  *	occurs at a time.
2810  *
2811  *	For most operations the function returns the current HW position in
2812  *	the completion queue.
2813  */
2814 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2815 		      unsigned int credits)
2816 {
2817 	u32 val;
2818 
2819 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2820 		return -EBUSY;
2821 
2822 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2823 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2824 		     V_CONTEXT(id) | F_CQ);
2825 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2826 				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2827 		return -EIO;
2828 
2829 	if (op >= 2 && op < 7) {
2830 		if (adapter->params.rev > 0)
2831 			return G_CQ_INDEX(val);
2832 
2833 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2834 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2835 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2836 				    F_CONTEXT_CMD_BUSY, 0,
2837 				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2838 			return -EIO;
2839 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2840 	}
2841 	return 0;
2842 }
2843 
2844 /**
2845  * 	t3_sge_read_context - read an SGE context
2846  * 	@type: the context type
2847  * 	@adapter: the adapter
2848  * 	@id: the context id
2849  * 	@data: holds the retrieved context
2850  *
2851  * 	Read an SGE egress context.  The caller is responsible for ensuring
2852  * 	only one context operation occurs at a time.
2853  */
2854 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2855 			       unsigned int id, u32 data[4])
2856 {
2857 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2858 		return -EBUSY;
2859 
2860 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2861 		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2862 	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2863 			    SG_CONTEXT_CMD_ATTEMPTS, 1))
2864 		return -EIO;
2865 	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2866 	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2867 	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2868 	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2869 	return 0;
2870 }
2871 
2872 /**
2873  * 	t3_sge_read_ecntxt - read an SGE egress context
2874  * 	@adapter: the adapter
2875  * 	@id: the context id
2876  * 	@data: holds the retrieved context
2877  *
2878  * 	Read an SGE egress context.  The caller is responsible for ensuring
2879  * 	only one context operation occurs at a time.
2880  */
2881 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2882 {
2883 	if (id >= 65536)
2884 		return -EINVAL;
2885 	return t3_sge_read_context(F_EGRESS, adapter, id, data);
2886 }
2887 
2888 /**
2889  * 	t3_sge_read_cq - read an SGE CQ context
2890  * 	@adapter: the adapter
2891  * 	@id: the context id
2892  * 	@data: holds the retrieved context
2893  *
2894  * 	Read an SGE CQ context.  The caller is responsible for ensuring
2895  * 	only one context operation occurs at a time.
2896  */
2897 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2898 {
2899 	if (id >= 65536)
2900 		return -EINVAL;
2901 	return t3_sge_read_context(F_CQ, adapter, id, data);
2902 }
2903 
2904 /**
2905  * 	t3_sge_read_fl - read an SGE free-list context
2906  * 	@adapter: the adapter
2907  * 	@id: the context id
2908  * 	@data: holds the retrieved context
2909  *
2910  * 	Read an SGE free-list context.  The caller is responsible for ensuring
2911  * 	only one context operation occurs at a time.
2912  */
2913 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2914 {
2915 	if (id >= SGE_QSETS * 2)
2916 		return -EINVAL;
2917 	return t3_sge_read_context(F_FREELIST, adapter, id, data);
2918 }
2919 
2920 /**
2921  * 	t3_sge_read_rspq - read an SGE response queue context
2922  * 	@adapter: the adapter
2923  * 	@id: the context id
2924  * 	@data: holds the retrieved context
2925  *
2926  * 	Read an SGE response queue context.  The caller is responsible for
2927  * 	ensuring only one context operation occurs at a time.
2928  */
2929 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2930 {
2931 	if (id >= SGE_QSETS)
2932 		return -EINVAL;
2933 	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2934 }
2935 
2936 /**
2937  *	t3_config_rss - configure Rx packet steering
2938  *	@adapter: the adapter
2939  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2940  *	@cpus: values for the CPU lookup table (0xff terminated)
2941  *	@rspq: values for the response queue lookup table (0xffff terminated)
2942  *
2943  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2944  *	the values for the CPU and response queue lookup tables.  If they
2945  *	provide fewer values than the size of the tables the supplied values
2946  *	are used repeatedly until the tables are fully populated.
2947  */
2948 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2949 		   const u16 *rspq)
2950 {
2951 	int i, j, cpu_idx = 0, q_idx = 0;
2952 
2953 	if (cpus)
2954 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2955 			u32 val = i << 16;
2956 
2957 			for (j = 0; j < 2; ++j) {
2958 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2959 				if (cpus[cpu_idx] == 0xff)
2960 					cpu_idx = 0;
2961 			}
2962 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2963 		}
2964 
2965 	if (rspq)
2966 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2967 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2968 				     (i << 16) | rspq[q_idx++]);
2969 			if (rspq[q_idx] == 0xffff)
2970 				q_idx = 0;
2971 		}
2972 
2973 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2974 }
2975 
2976 /**
2977  *	t3_read_rss - read the contents of the RSS tables
2978  *	@adapter: the adapter
2979  *	@lkup: holds the contents of the RSS lookup table
2980  *	@map: holds the contents of the RSS map table
2981  *
2982  *	Reads the contents of the receive packet steering tables.
2983  */
2984 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2985 {
2986 	int i;
2987 	u32 val;
2988 
2989 	if (lkup)
2990 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2991 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2992 				     0xffff0000 | i);
2993 			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2994 			if (!(val & 0x80000000))
2995 				return -EAGAIN;
2996 			*lkup++ = (u8)val;
2997 			*lkup++ = (u8)(val >> 8);
2998 		}
2999 
3000 	if (map)
3001 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
3002 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
3003 				     0xffff0000 | i);
3004 			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
3005 			if (!(val & 0x80000000))
3006 				return -EAGAIN;
3007 			*map++ = (u16)val;
3008 		}
3009 	return 0;
3010 }
3011 
3012 /**
3013  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
3014  *	@adap: the adapter
3015  *	@enable: 1 to select offload mode, 0 for regular NIC
3016  *
3017  *	Switches TP to NIC/offload mode.
3018  */
3019 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
3020 {
3021 	if (is_offload(adap) || !enable)
3022 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
3023 				 V_NICMODE(!enable));
3024 }
3025 
3026 /**
3027  *	tp_wr_bits_indirect - set/clear bits in an indirect TP register
3028  *	@adap: the adapter
3029  *	@addr: the indirect TP register address
3030  *	@mask: specifies the field within the register to modify
3031  *	@val: new value for the field
3032  *
3033  *	Sets a field of an indirect TP register to the given value.
3034  */
3035 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
3036 				unsigned int mask, unsigned int val)
3037 {
3038 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3039 	val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3040 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3041 }
3042 
3043 /**
3044  *	t3_enable_filters - enable the HW filters
3045  *	@adap: the adapter
3046  *
3047  *	Enables the HW filters for NIC traffic.
3048  */
3049 void t3_enable_filters(adapter_t *adap)
3050 {
3051 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
3052 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
3053 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
3054 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
3055 }
3056 
3057 /**
3058  *	t3_disable_filters - disable the HW filters
3059  *	@adap: the adapter
3060  *
3061  *	Disables the HW filters for NIC traffic.
3062  */
3063 void t3_disable_filters(adapter_t *adap)
3064 {
3065 	/* note that we don't want to revert to NIC-only mode */
3066 	t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_FILTEREN, 0);
3067 	t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG,
3068 			 V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP), 0);
3069 	tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, F_LOOKUPEVERYPKT, 0);
3070 }
3071 
3072 /**
3073  *	pm_num_pages - calculate the number of pages of the payload memory
3074  *	@mem_size: the size of the payload memory
3075  *	@pg_size: the size of each payload memory page
3076  *
3077  *	Calculate the number of pages, each of the given size, that fit in a
3078  *	memory of the specified size, respecting the HW requirement that the
3079  *	number of pages must be a multiple of 24.
3080  */
3081 static inline unsigned int pm_num_pages(unsigned int mem_size,
3082 					unsigned int pg_size)
3083 {
3084 	unsigned int n = mem_size / pg_size;
3085 
3086 	return n - n % 24;
3087 }
3088 
3089 #define mem_region(adap, start, size, reg) \
3090 	t3_write_reg((adap), A_ ## reg, (start)); \
3091 	start += size
3092 
3093 /**
3094  *	partition_mem - partition memory and configure TP memory settings
3095  *	@adap: the adapter
3096  *	@p: the TP parameters
3097  *
3098  *	Partitions context and payload memory and configures TP's memory
3099  *	registers.
3100  */
3101 static void partition_mem(adapter_t *adap, const struct tp_params *p)
3102 {
3103 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
3104 	unsigned int timers = 0, timers_shift = 22;
3105 
3106 	if (adap->params.rev > 0) {
3107 		if (tids <= 16 * 1024) {
3108 			timers = 1;
3109 			timers_shift = 16;
3110 		} else if (tids <= 64 * 1024) {
3111 			timers = 2;
3112 			timers_shift = 18;
3113 		} else if (tids <= 256 * 1024) {
3114 			timers = 3;
3115 			timers_shift = 20;
3116 		}
3117 	}
3118 
3119 	t3_write_reg(adap, A_TP_PMM_SIZE,
3120 		     p->chan_rx_size | (p->chan_tx_size >> 16));
3121 
3122 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
3123 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
3124 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
3125 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
3126 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
3127 
3128 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
3129 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
3130 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
3131 
3132 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
3133 	/* Add a bit of headroom and make multiple of 24 */
3134 	pstructs += 48;
3135 	pstructs -= pstructs % 24;
3136 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
3137 
3138 	m = tids * TCB_SIZE;
3139 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
3140 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
3141 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
3142 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
3143 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
3144 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
3145 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
3146 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
3147 
3148 	m = (m + 4095) & ~0xfff;
3149 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
3150 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
3151 
3152 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
3153 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
3154 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
3155 	if (tids < m)
3156 		adap->params.mc5.nservers += m - tids;
3157 }
3158 
3159 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
3160 {
3161 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3162 	t3_write_reg(adap, A_TP_PIO_DATA, val);
3163 }
3164 
3165 static inline u32 tp_rd_indirect(adapter_t *adap, unsigned int addr)
3166 {
3167 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
3168 	return t3_read_reg(adap, A_TP_PIO_DATA);
3169 }
3170 
3171 static void tp_config(adapter_t *adap, const struct tp_params *p)
3172 {
3173 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
3174 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
3175 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
3176 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
3177 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3178 		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
3179 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
3180 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3181 		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
3182 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
3183 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
3184 			 F_IPV6ENABLE | F_NICMODE);
3185 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
3186 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
3187 	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
3188 			 adap->params.rev > 0 ? F_ENABLEESND :
3189 			 			F_T3A_ENABLEESND);
3190 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
3191 			 F_ENABLEEPCMDAFULL,
3192 			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
3193 			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
3194 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
3195 			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
3196 			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
3197 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
3198 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
3199 
3200 	if (adap->params.rev > 0) {
3201 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
3202 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
3203 				 F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
3204 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
3205 		tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
3206 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
3207 		tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
3208 	} else
3209 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
3210 
3211 	if (adap->params.rev == T3_REV_C)
3212 		t3_set_reg_field(adap, A_TP_PC_CONFIG,
3213 				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
3214 				 V_TABLELATENCYDELTA(4));
3215 
3216 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
3217 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
3218 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
3219 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
3220 
3221 	if (adap->params.nports > 2) {
3222 		t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
3223 				 F_ENABLETXPORTFROMDA2 | F_ENABLETXPORTFROMDA |
3224 				 F_ENABLERXPORTFROMADDR);
3225 		tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
3226 				    V_RXMAPMODE(M_RXMAPMODE), 0);
3227 		tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
3228 			       V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
3229 			       F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
3230 			       F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
3231 		tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
3232 		tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
3233 		tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
3234 	}
3235 }
3236 
3237 /* TCP timer values in ms */
3238 #define TP_DACK_TIMER 50
3239 #define TP_RTO_MIN    250
3240 
3241 /**
3242  *	tp_set_timers - set TP timing parameters
3243  *	@adap: the adapter to set
3244  *	@core_clk: the core clock frequency in Hz
3245  *
3246  *	Set TP's timing parameters, such as the various timer resolutions and
3247  *	the TCP timer values.
3248  */
3249 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
3250 {
3251 	unsigned int tre = adap->params.tp.tre;
3252 	unsigned int dack_re = adap->params.tp.dack_re;
3253 	unsigned int tstamp_re = fls(core_clk / 1000);     /* 1ms, at least */
3254 	unsigned int tps = core_clk >> tre;
3255 
3256 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
3257 		     V_DELAYEDACKRESOLUTION(dack_re) |
3258 		     V_TIMESTAMPRESOLUTION(tstamp_re));
3259 	t3_write_reg(adap, A_TP_DACK_TIMER,
3260 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
3261 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
3262 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
3263 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
3264 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
3265 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
3266 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
3267 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
3268 		     V_KEEPALIVEMAX(9));
3269 
3270 #define SECONDS * tps
3271 
3272 	t3_write_reg(adap, A_TP_MSL,
3273 		     adap->params.rev > 0 ? 0 : 2 SECONDS);
3274 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
3275 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
3276 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
3277 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
3278 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
3279 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
3280 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
3281 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
3282 
3283 #undef SECONDS
3284 }
3285 
3286 /**
3287  *	t3_tp_set_coalescing_size - set receive coalescing size
3288  *	@adap: the adapter
3289  *	@size: the receive coalescing size
3290  *	@psh: whether a set PSH bit should deliver coalesced data
3291  *
3292  *	Set the receive coalescing size and PSH bit handling.
3293  */
3294 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
3295 {
3296 	u32 val;
3297 
3298 	if (size > MAX_RX_COALESCING_LEN)
3299 		return -EINVAL;
3300 
3301 	val = t3_read_reg(adap, A_TP_PARA_REG3);
3302 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
3303 
3304 	if (size) {
3305 		val |= F_RXCOALESCEENABLE;
3306 		if (psh)
3307 			val |= F_RXCOALESCEPSHEN;
3308 		size = min(MAX_RX_COALESCING_LEN, size);
3309 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
3310 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
3311 	}
3312 	t3_write_reg(adap, A_TP_PARA_REG3, val);
3313 	return 0;
3314 }
3315 
3316 /**
3317  *	t3_tp_set_max_rxsize - set the max receive size
3318  *	@adap: the adapter
3319  *	@size: the max receive size
3320  *
3321  *	Set TP's max receive size.  This is the limit that applies when
3322  *	receive coalescing is disabled.
3323  */
3324 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
3325 {
3326 	t3_write_reg(adap, A_TP_PARA_REG7,
3327 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
3328 }
3329 
3330 static void __devinit init_mtus(unsigned short mtus[])
3331 {
3332 	/*
3333 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
3334 	 * it can accomodate max size TCP/IP headers when SACK and timestamps
3335 	 * are enabled and still have at least 8 bytes of payload.
3336 	 */
3337 	mtus[0] = 88;
3338 	mtus[1] = 88;
3339 	mtus[2] = 256;
3340 	mtus[3] = 512;
3341 	mtus[4] = 576;
3342 	mtus[5] = 1024;
3343 	mtus[6] = 1280;
3344 	mtus[7] = 1492;
3345 	mtus[8] = 1500;
3346 	mtus[9] = 2002;
3347 	mtus[10] = 2048;
3348 	mtus[11] = 4096;
3349 	mtus[12] = 4352;
3350 	mtus[13] = 8192;
3351 	mtus[14] = 9000;
3352 	mtus[15] = 9600;
3353 }
3354 
3355 /**
3356  *	init_cong_ctrl - initialize congestion control parameters
3357  *	@a: the alpha values for congestion control
3358  *	@b: the beta values for congestion control
3359  *
3360  *	Initialize the congestion control parameters.
3361  */
3362 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
3363 {
3364 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3365 	a[9] = 2;
3366 	a[10] = 3;
3367 	a[11] = 4;
3368 	a[12] = 5;
3369 	a[13] = 6;
3370 	a[14] = 7;
3371 	a[15] = 8;
3372 	a[16] = 9;
3373 	a[17] = 10;
3374 	a[18] = 14;
3375 	a[19] = 17;
3376 	a[20] = 21;
3377 	a[21] = 25;
3378 	a[22] = 30;
3379 	a[23] = 35;
3380 	a[24] = 45;
3381 	a[25] = 60;
3382 	a[26] = 80;
3383 	a[27] = 100;
3384 	a[28] = 200;
3385 	a[29] = 300;
3386 	a[30] = 400;
3387 	a[31] = 500;
3388 
3389 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3390 	b[9] = b[10] = 1;
3391 	b[11] = b[12] = 2;
3392 	b[13] = b[14] = b[15] = b[16] = 3;
3393 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3394 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3395 	b[28] = b[29] = 6;
3396 	b[30] = b[31] = 7;
3397 }
3398 
3399 /* The minimum additive increment value for the congestion control table */
3400 #define CC_MIN_INCR 2U
3401 
3402 /**
3403  *	t3_load_mtus - write the MTU and congestion control HW tables
3404  *	@adap: the adapter
3405  *	@mtus: the unrestricted values for the MTU table
3406  *	@alpha: the values for the congestion control alpha parameter
3407  *	@beta: the values for the congestion control beta parameter
3408  *	@mtu_cap: the maximum permitted effective MTU
3409  *
3410  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
3411  *	Update the high-speed congestion control table with the supplied alpha,
3412  * 	beta, and MTUs.
3413  */
3414 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
3415 		  unsigned short alpha[NCCTRL_WIN],
3416 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
3417 {
3418 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3419 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3420 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3421 		28672, 40960, 57344, 81920, 114688, 163840, 229376 };
3422 
3423 	unsigned int i, w;
3424 
3425 	for (i = 0; i < NMTUS; ++i) {
3426 		unsigned int mtu = min(mtus[i], mtu_cap);
3427 		unsigned int log2 = fls(mtu);
3428 
3429 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
3430 			log2--;
3431 		t3_write_reg(adap, A_TP_MTU_TABLE,
3432 			     (i << 24) | (log2 << 16) | mtu);
3433 
3434 		for (w = 0; w < NCCTRL_WIN; ++w) {
3435 			unsigned int inc;
3436 
3437 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3438 				  CC_MIN_INCR);
3439 
3440 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3441 				     (w << 16) | (beta[w] << 13) | inc);
3442 		}
3443 	}
3444 }
3445 
3446 /**
3447  *	t3_read_hw_mtus - returns the values in the HW MTU table
3448  *	@adap: the adapter
3449  *	@mtus: where to store the HW MTU values
3450  *
3451  *	Reads the HW MTU table.
3452  */
3453 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
3454 {
3455 	int i;
3456 
3457 	for (i = 0; i < NMTUS; ++i) {
3458 		unsigned int val;
3459 
3460 		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3461 		val = t3_read_reg(adap, A_TP_MTU_TABLE);
3462 		mtus[i] = val & 0x3fff;
3463 	}
3464 }
3465 
3466 /**
3467  *	t3_get_cong_cntl_tab - reads the congestion control table
3468  *	@adap: the adapter
3469  *	@incr: where to store the alpha values
3470  *
3471  *	Reads the additive increments programmed into the HW congestion
3472  *	control table.
3473  */
3474 void t3_get_cong_cntl_tab(adapter_t *adap,
3475 			  unsigned short incr[NMTUS][NCCTRL_WIN])
3476 {
3477 	unsigned int mtu, w;
3478 
3479 	for (mtu = 0; mtu < NMTUS; ++mtu)
3480 		for (w = 0; w < NCCTRL_WIN; ++w) {
3481 			t3_write_reg(adap, A_TP_CCTRL_TABLE,
3482 				     0xffff0000 | (mtu << 5) | w);
3483 			incr[mtu][w] = (unsigned short)t3_read_reg(adap,
3484 				        A_TP_CCTRL_TABLE) & 0x1fff;
3485 		}
3486 }
3487 
3488 /**
3489  *	t3_tp_get_mib_stats - read TP's MIB counters
3490  *	@adap: the adapter
3491  *	@tps: holds the returned counter values
3492  *
3493  *	Returns the values of TP's MIB counters.
3494  */
3495 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
3496 {
3497 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
3498 			 sizeof(*tps) / sizeof(u32), 0);
3499 }
3500 
3501 /**
3502  *	t3_read_pace_tbl - read the pace table
3503  *	@adap: the adapter
3504  *	@pace_vals: holds the returned values
3505  *
3506  *	Returns the values of TP's pace table in nanoseconds.
3507  */
3508 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
3509 {
3510 	unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
3511 
3512 	for (i = 0; i < NTX_SCHED; i++) {
3513 		t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3514 		pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
3515 	}
3516 }
3517 
3518 /**
3519  *	t3_set_pace_tbl - set the pace table
3520  *	@adap: the adapter
3521  *	@pace_vals: the pace values in nanoseconds
3522  *	@start: index of the first entry in the HW pace table to set
3523  *	@n: how many entries to set
3524  *
3525  *	Sets (a subset of the) HW pace table.
3526  */
3527 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
3528 		     unsigned int start, unsigned int n)
3529 {
3530 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3531 
3532 	for ( ; n; n--, start++, pace_vals++)
3533 		t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
3534 			     ((*pace_vals + tick_ns / 2) / tick_ns));
3535 }
3536 
3537 #define ulp_region(adap, name, start, len) \
3538 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3539 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3540 		     (start) + (len) - 1); \
3541 	start += len
3542 
3543 #define ulptx_region(adap, name, start, len) \
3544 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3545 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3546 		     (start) + (len) - 1)
3547 
3548 static void ulp_config(adapter_t *adap, const struct tp_params *p)
3549 {
3550 	unsigned int m = p->chan_rx_size;
3551 
3552 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3553 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3554 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3555 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3556 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3557 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3558 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3559 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3560 }
3561 
3562 
3563 /**
3564  *	t3_set_proto_sram - set the contents of the protocol sram
3565  *	@adapter: the adapter
3566  *	@data: the protocol image
3567  *
3568  *	Write the contents of the protocol SRAM.
3569  */
3570 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
3571 {
3572 	int i;
3573 	const u32 *buf = (const u32 *)data;
3574 
3575 	for (i = 0; i < PROTO_SRAM_LINES; i++) {
3576 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
3577 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
3578 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
3579 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
3580 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
3581 
3582 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3583 		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3584 			return -EIO;
3585 	}
3586 	return 0;
3587 }
3588 
3589 /**
3590  *	t3_config_trace_filter - configure one of the tracing filters
3591  *	@adapter: the adapter
3592  *	@tp: the desired trace filter parameters
3593  *	@filter_index: which filter to configure
3594  *	@invert: if set non-matching packets are traced instead of matching ones
3595  *	@enable: whether to enable or disable the filter
3596  *
3597  *	Configures one of the tracing filters available in HW.
3598  */
3599 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
3600 			    int filter_index, int invert, int enable)
3601 {
3602 	u32 addr, key[4], mask[4];
3603 
3604 	key[0] = tp->sport | (tp->sip << 16);
3605 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
3606 	key[2] = tp->dip;
3607 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3608 
3609 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3610 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3611 	mask[2] = tp->dip_mask;
3612 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3613 
3614 	if (invert)
3615 		key[3] |= (1 << 29);
3616 	if (enable)
3617 		key[3] |= (1 << 28);
3618 
3619 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3620 	tp_wr_indirect(adapter, addr++, key[0]);
3621 	tp_wr_indirect(adapter, addr++, mask[0]);
3622 	tp_wr_indirect(adapter, addr++, key[1]);
3623 	tp_wr_indirect(adapter, addr++, mask[1]);
3624 	tp_wr_indirect(adapter, addr++, key[2]);
3625 	tp_wr_indirect(adapter, addr++, mask[2]);
3626 	tp_wr_indirect(adapter, addr++, key[3]);
3627 	tp_wr_indirect(adapter, addr,   mask[3]);
3628 	(void) t3_read_reg(adapter, A_TP_PIO_DATA);
3629 }
3630 
3631 /**
3632  *	t3_query_trace_filter - query a tracing filter
3633  *	@adapter: the adapter
3634  *	@tp: the current trace filter parameters
3635  *	@filter_index: which filter to query
3636  *	@inverted: non-zero if the filter is inverted
3637  *	@enabled: non-zero if the filter is enabled
3638  *
3639  *	Returns the current settings of the specified HW tracing filter.
3640  */
3641 void t3_query_trace_filter(adapter_t *adapter, struct trace_params *tp,
3642 			   int filter_index, int *inverted, int *enabled)
3643 {
3644 	u32 addr, key[4], mask[4];
3645 
3646 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3647 	key[0]  = tp_rd_indirect(adapter, addr++);
3648 	mask[0] = tp_rd_indirect(adapter, addr++);
3649 	key[1]  = tp_rd_indirect(adapter, addr++);
3650 	mask[1] = tp_rd_indirect(adapter, addr++);
3651 	key[2]  = tp_rd_indirect(adapter, addr++);
3652 	mask[2] = tp_rd_indirect(adapter, addr++);
3653 	key[3]  = tp_rd_indirect(adapter, addr++);
3654 	mask[3] = tp_rd_indirect(adapter, addr);
3655 
3656 	tp->sport = key[0] & 0xffff;
3657 	tp->sip   = (key[0] >> 16) | ((key[1] & 0xffff) << 16);
3658 	tp->dport = key[1] >> 16;
3659 	tp->dip   = key[2];
3660 	tp->proto = key[3] & 0xff;
3661 	tp->vlan  = key[3] >> 8;
3662 	tp->intf  = key[3] >> 20;
3663 
3664 	tp->sport_mask = mask[0] & 0xffff;
3665 	tp->sip_mask   = (mask[0] >> 16) | ((mask[1] & 0xffff) << 16);
3666 	tp->dport_mask = mask[1] >> 16;
3667 	tp->dip_mask   = mask[2];
3668 	tp->proto_mask = mask[3] & 0xff;
3669 	tp->vlan_mask  = mask[3] >> 8;
3670 	tp->intf_mask  = mask[3] >> 20;
3671 
3672 	*inverted = key[3] & (1 << 29);
3673 	*enabled  = key[3] & (1 << 28);
3674 }
3675 
3676 /**
3677  *	t3_config_sched - configure a HW traffic scheduler
3678  *	@adap: the adapter
3679  *	@kbps: target rate in Kbps
3680  *	@sched: the scheduler index
3681  *
3682  *	Configure a Tx HW scheduler for the target rate.
3683  */
3684 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
3685 {
3686 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3687 	unsigned int clk = adap->params.vpd.cclk * 1000;
3688 	unsigned int selected_cpt = 0, selected_bpt = 0;
3689 
3690 	if (kbps > 0) {
3691 		kbps *= 125;     /* -> bytes */
3692 		for (cpt = 1; cpt <= 255; cpt++) {
3693 			tps = clk / cpt;
3694 			bpt = (kbps + tps / 2) / tps;
3695 			if (bpt > 0 && bpt <= 255) {
3696 				v = bpt * tps;
3697 				delta = v >= kbps ? v - kbps : kbps - v;
3698 				if (delta < mindelta) {
3699 					mindelta = delta;
3700 					selected_cpt = cpt;
3701 					selected_bpt = bpt;
3702 				}
3703 			} else if (selected_cpt)
3704 				break;
3705 		}
3706 		if (!selected_cpt)
3707 			return -EINVAL;
3708 	}
3709 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3710 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3711 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3712 	if (sched & 1)
3713 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3714 	else
3715 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3716 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3717 	return 0;
3718 }
3719 
3720 /**
3721  *	t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3722  *	@adap: the adapter
3723  *	@sched: the scheduler index
3724  *	@ipg: the interpacket delay in tenths of nanoseconds
3725  *
3726  *	Set the interpacket delay for a HW packet rate scheduler.
3727  */
3728 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3729 {
3730 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3731 
3732 	/* convert ipg to nearest number of core clocks */
3733 	ipg *= core_ticks_per_usec(adap);
3734 	ipg = (ipg + 5000) / 10000;
3735 	if (ipg > 0xffff)
3736 		return -EINVAL;
3737 
3738 	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3739 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3740 	if (sched & 1)
3741 		v = (v & 0xffff) | (ipg << 16);
3742 	else
3743 		v = (v & 0xffff0000) | ipg;
3744 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3745 	t3_read_reg(adap, A_TP_TM_PIO_DATA);
3746 	return 0;
3747 }
3748 
3749 /**
3750  *	t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3751  *	@adap: the adapter
3752  *	@sched: the scheduler index
3753  *	@kbps: the byte rate in Kbps
3754  *	@ipg: the interpacket delay in tenths of nanoseconds
3755  *
3756  *	Return the current configuration of a HW Tx scheduler.
3757  */
3758 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3759 		     unsigned int *ipg)
3760 {
3761 	unsigned int v, addr, bpt, cpt;
3762 
3763 	if (kbps) {
3764 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3765 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3766 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3767 		if (sched & 1)
3768 			v >>= 16;
3769 		bpt = (v >> 8) & 0xff;
3770 		cpt = v & 0xff;
3771 		if (!cpt)
3772 			*kbps = 0;        /* scheduler disabled */
3773 		else {
3774 			v = (adap->params.vpd.cclk * 1000) / cpt;
3775 			*kbps = (v * bpt) / 125;
3776 		}
3777 	}
3778 	if (ipg) {
3779 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3780 		t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3781 		v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3782 		if (sched & 1)
3783 			v >>= 16;
3784 		v &= 0xffff;
3785 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3786 	}
3787 }
3788 
3789 /**
3790  *	tp_init - configure TP
3791  *	@adap: the adapter
3792  *	@p: TP configuration parameters
3793  *
3794  *	Initializes the TP HW module.
3795  */
3796 static int tp_init(adapter_t *adap, const struct tp_params *p)
3797 {
3798 	int busy = 0;
3799 
3800 	tp_config(adap, p);
3801 	t3_set_vlan_accel(adap, 3, 0);
3802 
3803 	if (is_offload(adap)) {
3804 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3805 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3806 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3807 				       0, 1000, 5);
3808 		if (busy)
3809 			CH_ERR(adap, "TP initialization timed out\n");
3810 	}
3811 
3812 	if (!busy)
3813 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3814 	return busy;
3815 }
3816 
3817 /**
3818  *	t3_mps_set_active_ports - configure port failover
3819  *	@adap: the adapter
3820  *	@port_mask: bitmap of active ports
3821  *
3822  *	Sets the active ports according to the supplied bitmap.
3823  */
3824 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3825 {
3826 	if (port_mask & ~((1 << adap->params.nports) - 1))
3827 		return -EINVAL;
3828 	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3829 			 port_mask << S_PORT0ACTIVE);
3830 	return 0;
3831 }
3832 
3833 /**
3834  * 	chan_init_hw - channel-dependent HW initialization
3835  *	@adap: the adapter
3836  *	@chan_map: bitmap of Tx channels being used
3837  *
3838  *	Perform the bits of HW initialization that are dependent on the Tx
3839  *	channels being used.
3840  */
3841 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3842 {
3843 	int i;
3844 
3845 	if (chan_map != 3) {                                 /* one channel */
3846 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3847 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3848 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3849 			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3850 					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3851 		t3_write_reg(adap, A_PM1_TX_CFG,
3852 			     chan_map == 1 ? 0xffffffff : 0);
3853 		if (chan_map == 2)
3854 			t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3855 				     V_TX_MOD_QUEUE_REQ_MAP(0xff));
3856 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3857 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3858 	} else {                                             /* two channels */
3859 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3860 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3861 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3862 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3863 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3864 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3865 			     F_ENFORCEPKT);
3866 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3867 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3868 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3869 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3870 		for (i = 0; i < 16; i++)
3871 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3872 				     (i << 16) | 0x1010);
3873 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3874 		t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3875 	}
3876 }
3877 
3878 static int calibrate_xgm(adapter_t *adapter)
3879 {
3880 	if (uses_xaui(adapter)) {
3881 		unsigned int v, i;
3882 
3883 		for (i = 0; i < 5; ++i) {
3884 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3885 			(void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3886 			msleep(1);
3887 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3888 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3889 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3890 					     V_XAUIIMP(G_CALIMP(v) >> 2));
3891 				return 0;
3892 			}
3893 		}
3894 		CH_ERR(adapter, "MAC calibration failed\n");
3895 		return -1;
3896 	} else {
3897 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3898 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3899 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3900 				 F_XGM_IMPSETUPDATE);
3901 	}
3902 	return 0;
3903 }
3904 
3905 static void calibrate_xgm_t3b(adapter_t *adapter)
3906 {
3907 	if (!uses_xaui(adapter)) {
3908 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3909 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3910 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3911 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3912 				 F_XGM_IMPSETUPDATE);
3913 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3914 				 0);
3915 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3916 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3917 	}
3918 }
3919 
3920 struct mc7_timing_params {
3921 	unsigned char ActToPreDly;
3922 	unsigned char ActToRdWrDly;
3923 	unsigned char PreCyc;
3924 	unsigned char RefCyc[5];
3925 	unsigned char BkCyc;
3926 	unsigned char WrToRdDly;
3927 	unsigned char RdToWrDly;
3928 };
3929 
3930 /*
3931  * Write a value to a register and check that the write completed.  These
3932  * writes normally complete in a cycle or two, so one read should suffice.
3933  * The very first read exists to flush the posted write to the device.
3934  */
3935 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3936 {
3937 	t3_write_reg(adapter,	addr, val);
3938 	(void) t3_read_reg(adapter, addr);                   /* flush */
3939 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3940 		return 0;
3941 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3942 	return -EIO;
3943 }
3944 
3945 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3946 {
3947 	static const unsigned int mc7_mode[] = {
3948 		0x632, 0x642, 0x652, 0x432, 0x442
3949 	};
3950 	static const struct mc7_timing_params mc7_timings[] = {
3951 		{ 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3952 		{ 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3953 		{ 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3954 		{ 9,  3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3955 		{ 9,  4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3956 	};
3957 
3958 	u32 val;
3959 	unsigned int width, density, slow, attempts;
3960 	adapter_t *adapter = mc7->adapter;
3961 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3962 
3963 	if (!mc7->size)
3964 		return 0;
3965 
3966 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3967 	slow = val & F_SLOW;
3968 	width = G_WIDTH(val);
3969 	density = G_DEN(val);
3970 
3971 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3972 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  /* flush */
3973 	msleep(1);
3974 
3975 	if (!slow) {
3976 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3977 		(void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3978 		msleep(1);
3979 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3980 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3981 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3982 			       mc7->name);
3983 			goto out_fail;
3984 		}
3985 	}
3986 
3987 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3988 		     V_ACTTOPREDLY(p->ActToPreDly) |
3989 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3990 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3991 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3992 
3993 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3994 		     val | F_CLKEN | F_TERM150);
3995 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3996 
3997 	if (!slow)
3998 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3999 				 F_DLLENB);
4000 	udelay(1);
4001 
4002 	val = slow ? 3 : 6;
4003 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
4004 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
4005 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
4006 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4007 		goto out_fail;
4008 
4009 	if (!slow) {
4010 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
4011 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
4012 				 F_DLLRST, 0);
4013 		udelay(5);
4014 	}
4015 
4016 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
4017 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4018 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
4019 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
4020 		       mc7_mode[mem_type]) ||
4021 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
4022 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
4023 		goto out_fail;
4024 
4025 	/* clock value is in KHz */
4026 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;  /* ns */
4027 	mc7_clock /= 1000000;                          /* KHz->MHz, ns->us */
4028 
4029 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
4030 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
4031 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
4032 
4033 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
4034 		     F_ECCGENEN | F_ECCCHKEN);
4035 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
4036 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
4037 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
4038 		     (mc7->size << width) - 1);
4039 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
4040 	(void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
4041 
4042 	attempts = 50;
4043 	do {
4044 		msleep(250);
4045 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
4046 	} while ((val & F_BUSY) && --attempts);
4047 	if (val & F_BUSY) {
4048 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
4049 		goto out_fail;
4050 	}
4051 
4052 	/* Enable normal memory accesses. */
4053 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
4054 	return 0;
4055 
4056  out_fail:
4057 	return -1;
4058 }
4059 
4060 static void config_pcie(adapter_t *adap)
4061 {
4062 	static const u16 ack_lat[4][6] = {
4063 		{ 237, 416, 559, 1071, 2095, 4143 },
4064 		{ 128, 217, 289, 545, 1057, 2081 },
4065 		{ 73, 118, 154, 282, 538, 1050 },
4066 		{ 67, 107, 86, 150, 278, 534 }
4067 	};
4068 	static const u16 rpl_tmr[4][6] = {
4069 		{ 711, 1248, 1677, 3213, 6285, 12429 },
4070 		{ 384, 651, 867, 1635, 3171, 6243 },
4071 		{ 219, 354, 462, 846, 1614, 3150 },
4072 		{ 201, 321, 258, 450, 834, 1602 }
4073 	};
4074 
4075 	u16 val, devid;
4076 	unsigned int log2_width, pldsize;
4077 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
4078 
4079 	t3_os_pci_read_config_2(adap,
4080 				adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4081 				&val);
4082 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
4083 
4084 	/*
4085 	 * Gen2 adapter pcie bridge compatibility requires minimum
4086 	 * Max_Read_Request_size
4087 	 */
4088 	t3_os_pci_read_config_2(adap, 0x2, &devid);
4089 	if (devid == 0x37) {
4090 		t3_os_pci_write_config_2(adap,
4091 		    adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
4092 		    val & ~PCI_EXP_DEVCTL_READRQ & ~PCI_EXP_DEVCTL_PAYLOAD);
4093 		pldsize = 0;
4094 	}
4095 
4096 	t3_os_pci_read_config_2(adap,
4097 				adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
4098 			       	&val);
4099 
4100 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
4101 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
4102 			G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
4103 	log2_width = fls(adap->params.pci.width) - 1;
4104 	acklat = ack_lat[log2_width][pldsize];
4105 	if (val & 1)                            /* check LOsEnable */
4106 		acklat += fst_trn_tx * 4;
4107 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
4108 
4109 	if (adap->params.rev == 0)
4110 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
4111 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
4112 				 V_T3A_ACKLAT(acklat));
4113 	else
4114 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
4115 				 V_ACKLAT(acklat));
4116 
4117 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
4118 			 V_REPLAYLMT(rpllmt));
4119 
4120 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
4121 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
4122 			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
4123 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4124 }
4125 
4126 /**
4127  * 	t3_init_hw - initialize and configure T3 HW modules
4128  * 	@adapter: the adapter
4129  * 	@fw_params: initial parameters to pass to firmware (optional)
4130  *
4131  *	Initialize and configure T3 HW modules.  This performs the
4132  *	initialization steps that need to be done once after a card is reset.
4133  *	MAC and PHY initialization is handled separarely whenever a port is
4134  *	enabled.
4135  *
4136  *	@fw_params are passed to FW and their value is platform dependent.
4137  *	Only the top 8 bits are available for use, the rest must be 0.
4138  */
4139 int t3_init_hw(adapter_t *adapter, u32 fw_params)
4140 {
4141 	int err = -EIO, attempts, i;
4142 	const struct vpd_params *vpd = &adapter->params.vpd;
4143 
4144 	if (adapter->params.rev > 0)
4145 		calibrate_xgm_t3b(adapter);
4146 	else if (calibrate_xgm(adapter))
4147 		goto out_err;
4148 
4149 	if (adapter->params.nports > 2)
4150 		t3_mac_init(&adap2pinfo(adapter, 0)->mac);
4151 
4152 	if (vpd->mclk) {
4153 		partition_mem(adapter, &adapter->params.tp);
4154 
4155 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
4156 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
4157 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
4158 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
4159 			        adapter->params.mc5.nfilters,
4160 			       	adapter->params.mc5.nroutes))
4161 			goto out_err;
4162 
4163 		for (i = 0; i < 32; i++)
4164 			if (clear_sge_ctxt(adapter, i, F_CQ))
4165 				goto out_err;
4166 	}
4167 
4168 	if (tp_init(adapter, &adapter->params.tp))
4169 		goto out_err;
4170 
4171 	t3_tp_set_coalescing_size(adapter,
4172 				  min(adapter->params.sge.max_pkt_size,
4173 				      MAX_RX_COALESCING_LEN), 1);
4174 	t3_tp_set_max_rxsize(adapter,
4175 			     min(adapter->params.sge.max_pkt_size, 16384U));
4176 	ulp_config(adapter, &adapter->params.tp);
4177 	if (is_pcie(adapter))
4178 		config_pcie(adapter);
4179 	else
4180 		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
4181 				 F_DMASTOPEN | F_CLIDECEN);
4182 
4183 	if (adapter->params.rev == T3_REV_C)
4184 		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
4185 				 F_CFG_CQE_SOP_MASK);
4186 
4187 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
4188 	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
4189 	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4190 	chan_init_hw(adapter, adapter->params.chan_map);
4191 	t3_sge_init(adapter, &adapter->params.sge);
4192 
4193 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
4194 
4195 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
4196 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
4197 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
4198 	(void) t3_read_reg(adapter, A_CIM_BOOT_CFG);    /* flush */
4199 
4200 	attempts = 100;
4201 	do {                          /* wait for uP to initialize */
4202 		msleep(20);
4203 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
4204 	if (!attempts) {
4205 		CH_ERR(adapter, "uP initialization timed out\n");
4206 		goto out_err;
4207 	}
4208 
4209 	err = 0;
4210  out_err:
4211 	return err;
4212 }
4213 
4214 /**
4215  *	get_pci_mode - determine a card's PCI mode
4216  *	@adapter: the adapter
4217  *	@p: where to store the PCI settings
4218  *
4219  *	Determines a card's PCI mode and associated parameters, such as speed
4220  *	and width.
4221  */
4222 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
4223 {
4224 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
4225 	u32 pci_mode, pcie_cap;
4226 
4227 	pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4228 	if (pcie_cap) {
4229 		u16 val;
4230 
4231 		p->variant = PCI_VARIANT_PCIE;
4232 		p->pcie_cap_addr = pcie_cap;
4233 		t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
4234 					&val);
4235 		p->width = (val >> 4) & 0x3f;
4236 		return;
4237 	}
4238 
4239 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
4240 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
4241 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
4242 	pci_mode = G_PCIXINITPAT(pci_mode);
4243 	if (pci_mode == 0)
4244 		p->variant = PCI_VARIANT_PCI;
4245 	else if (pci_mode < 4)
4246 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
4247 	else if (pci_mode < 8)
4248 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
4249 	else
4250 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
4251 }
4252 
4253 /**
4254  *	init_link_config - initialize a link's SW state
4255  *	@lc: structure holding the link state
4256  *	@caps: link capabilities
4257  *
4258  *	Initializes the SW state maintained for each link, including the link's
4259  *	capabilities and default speed/duplex/flow-control/autonegotiation
4260  *	settings.
4261  */
4262 static void __devinit init_link_config(struct link_config *lc,
4263 				       unsigned int caps)
4264 {
4265 	lc->supported = caps;
4266 	lc->requested_speed = lc->speed = SPEED_INVALID;
4267 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
4268 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4269 	if (lc->supported & SUPPORTED_Autoneg) {
4270 		lc->advertising = lc->supported;
4271 		lc->autoneg = AUTONEG_ENABLE;
4272 		lc->requested_fc |= PAUSE_AUTONEG;
4273 	} else {
4274 		lc->advertising = 0;
4275 		lc->autoneg = AUTONEG_DISABLE;
4276 	}
4277 }
4278 
4279 /**
4280  *	mc7_calc_size - calculate MC7 memory size
4281  *	@cfg: the MC7 configuration
4282  *
4283  *	Calculates the size of an MC7 memory in bytes from the value of its
4284  *	configuration register.
4285  */
4286 static unsigned int __devinit mc7_calc_size(u32 cfg)
4287 {
4288 	unsigned int width = G_WIDTH(cfg);
4289 	unsigned int banks = !!(cfg & F_BKS) + 1;
4290 	unsigned int org = !!(cfg & F_ORG) + 1;
4291 	unsigned int density = G_DEN(cfg);
4292 	unsigned int MBs = ((256 << density) * banks) / (org << width);
4293 
4294 	return MBs << 20;
4295 }
4296 
4297 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
4298 			       unsigned int base_addr, const char *name)
4299 {
4300 	u32 cfg;
4301 
4302 	mc7->adapter = adapter;
4303 	mc7->name = name;
4304 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
4305 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
4306 	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4307 	mc7->width = G_WIDTH(cfg);
4308 }
4309 
4310 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
4311 {
4312 	u16 devid;
4313 
4314 	mac->adapter = adapter;
4315 	mac->multiport = adapter->params.nports > 2;
4316 	if (mac->multiport) {
4317 		mac->ext_port = (unsigned char)index;
4318 		mac->nucast = 8;
4319 	} else
4320 		mac->nucast = 1;
4321 
4322 	/* Gen2 adapter uses VPD xauicfg[] to notify driver which MAC
4323 	   is connected to each port, its suppose to be using xgmac0 for both ports
4324 	 */
4325 	t3_os_pci_read_config_2(adapter, 0x2, &devid);
4326 
4327 	if (mac->multiport ||
4328 		(!adapter->params.vpd.xauicfg[1] && (devid==0x37)))
4329 			index  = 0;
4330 
4331 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
4332 
4333 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
4334 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
4335 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
4336 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
4337 				 F_ENRGMII, 0);
4338 	}
4339 }
4340 
4341 /**
4342  *	early_hw_init - HW initialization done at card detection time
4343  *	@adapter: the adapter
4344  *	@ai: contains information about the adapter type and properties
4345  *
4346  *	Perfoms the part of HW initialization that is done early on when the
4347  *	driver first detecs the card.  Most of the HW state is initialized
4348  *	lazily later on when a port or an offload function are first used.
4349  */
4350 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
4351 {
4352 	u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
4353 			      3 : 2);
4354 	u32 gpio_out = ai->gpio_out;
4355 
4356 	mi1_init(adapter, ai);
4357 	t3_write_reg(adapter, A_I2C_CFG,                  /* set for 80KHz */
4358 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
4359 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
4360 		     gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
4361 	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4362 	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4363 
4364 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
4365 		val |= F_ENRGMII;
4366 
4367 	/* Enable MAC clocks so we can access the registers */
4368 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4369 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4370 
4371 	val |= F_CLKDIVRESET_;
4372 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
4373 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4374 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
4375 	(void) t3_read_reg(adapter, A_XGM_PORT_CFG);
4376 }
4377 
4378 /**
4379  *	t3_reset_adapter - reset the adapter
4380  *	@adapter: the adapter
4381  *
4382  * 	Reset the adapter.
4383  */
4384 int t3_reset_adapter(adapter_t *adapter)
4385 {
4386 	int i, save_and_restore_pcie =
4387 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4388 	uint16_t devid = 0;
4389 
4390 	if (save_and_restore_pcie)
4391 		t3_os_pci_save_state(adapter);
4392 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
4393 
4394  	/*
4395 	 * Delay. Give Some time to device to reset fully.
4396 	 * XXX The delay time should be modified.
4397 	 */
4398 	for (i = 0; i < 10; i++) {
4399 		msleep(50);
4400 		t3_os_pci_read_config_2(adapter, 0x00, &devid);
4401 		if (devid == 0x1425)
4402 			break;
4403 	}
4404 
4405 	if (devid != 0x1425)
4406 		return -1;
4407 
4408 	if (save_and_restore_pcie)
4409 		t3_os_pci_restore_state(adapter);
4410 	return 0;
4411 }
4412 
4413 static int init_parity(adapter_t *adap)
4414 {
4415 	int i, err, addr;
4416 
4417 	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
4418 		return -EBUSY;
4419 
4420 	for (err = i = 0; !err && i < 16; i++)
4421 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4422 	for (i = 0xfff0; !err && i <= 0xffff; i++)
4423 		err = clear_sge_ctxt(adap, i, F_EGRESS);
4424 	for (i = 0; !err && i < SGE_QSETS; i++)
4425 		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
4426 	if (err)
4427 		return err;
4428 
4429 	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
4430 	for (i = 0; i < 4; i++)
4431 		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
4432 			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
4433 				     F_IBQDBGWR | V_IBQDBGQID(i) |
4434 				     V_IBQDBGADDR(addr));
4435 			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
4436 					      F_IBQDBGBUSY, 0, 2, 1);
4437 			if (err)
4438 				return err;
4439 		}
4440 	return 0;
4441 }
4442 
4443 /**
4444  *	t3_prep_adapter - prepare SW and HW for operation
4445  *	@adapter: the adapter
4446  *	@ai: contains information about the adapter type and properties
4447  *
4448  *	Initialize adapter SW state for the various HW modules, set initial
4449  *	values for some adapter tunables, take PHYs out of reset, and
4450  *	initialize the MDIO interface.
4451  */
4452 int __devinit t3_prep_adapter(adapter_t *adapter,
4453 			      const struct adapter_info *ai, int reset)
4454 {
4455 	int ret;
4456 	unsigned int i, j = 0;
4457 
4458 	get_pci_mode(adapter, &adapter->params.pci);
4459 
4460 	adapter->params.info = ai;
4461 	adapter->params.nports = ai->nports0 + ai->nports1;
4462 	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
4463 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
4464 
4465 	/*
4466 	 * We used to only run the "adapter check task" once a second if
4467 	 * we had PHYs which didn't support interrupts (we would check
4468 	 * their link status once a second).  Now we check other conditions
4469 	 * in that routine which would [potentially] impose a very high
4470 	 * interrupt load on the system.  As such, we now always scan the
4471 	 * adapter state once a second ...
4472 	 */
4473 	adapter->params.linkpoll_period = 10;
4474 
4475 	if (adapter->params.nports > 2)
4476 		adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
4477 	else
4478 		adapter->params.stats_update_period = is_10G(adapter) ?
4479 			MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
4480 	adapter->params.pci.vpd_cap_addr =
4481 		t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4482 
4483 	ret = get_vpd_params(adapter, &adapter->params.vpd);
4484 	if (ret < 0)
4485 		return ret;
4486 
4487 	if (reset && t3_reset_adapter(adapter))
4488 		return -1;
4489 
4490 	if (adapter->params.vpd.mclk) {
4491 		struct tp_params *p = &adapter->params.tp;
4492 
4493 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
4494 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
4495 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
4496 
4497 		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4498 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
4499 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
4500 		p->cm_size = t3_mc7_size(&adapter->cm);
4501 		p->chan_rx_size = p->pmrx_size / 2;     /* only 1 Rx channel */
4502 		p->chan_tx_size = p->pmtx_size / p->nchan;
4503 		p->rx_pg_size = 64 * 1024;
4504 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
4505 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
4506 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
4507 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
4508 			       adapter->params.rev > 0 ? 12 : 6;
4509 		p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
4510 			 1;
4511 		p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
4512 	}
4513 
4514 	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
4515 				  t3_mc7_size(&adapter->pmtx) &&
4516 				  t3_mc7_size(&adapter->cm);
4517 
4518 	t3_sge_prep(adapter, &adapter->params.sge);
4519 
4520 	if (is_offload(adapter)) {
4521 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
4522 		/* PR 6487. TOE and filtering are mutually exclusive */
4523 		adapter->params.mc5.nfilters = 0;
4524 		adapter->params.mc5.nroutes = 0;
4525 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
4526 
4527 		init_mtus(adapter->params.mtus);
4528 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4529 	}
4530 
4531 	early_hw_init(adapter, ai);
4532 	ret = init_parity(adapter);
4533 	if (ret)
4534 		return ret;
4535 
4536 	if (adapter->params.nports > 2 &&
4537 	    (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
4538 		return ret;
4539 
4540 	for_each_port(adapter, i) {
4541 		u8 hw_addr[6];
4542 		const struct port_type_info *pti;
4543 		struct port_info *p = adap2pinfo(adapter, i);
4544 
4545 		for (;;) {
4546 			unsigned port_type = adapter->params.vpd.port_type[j];
4547 			if (port_type) {
4548 				if (port_type < ARRAY_SIZE(port_types)) {
4549 					pti = &port_types[port_type];
4550 					break;
4551 				} else
4552 					return -EINVAL;
4553 			}
4554 			j++;
4555 			if (j >= ARRAY_SIZE(adapter->params.vpd.port_type))
4556 				return -EINVAL;
4557 		}
4558 		ret = pti->phy_prep(p, ai->phy_base_addr + j,
4559 				    ai->mdio_ops);
4560 		if (ret)
4561 			return ret;
4562 		mac_prep(&p->mac, adapter, j);
4563 		++j;
4564 
4565 		/*
4566 		 * The VPD EEPROM stores the base Ethernet address for the
4567 		 * card.  A port's address is derived from the base by adding
4568 		 * the port's index to the base's low octet.
4569 		 */
4570 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
4571 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
4572 
4573 		t3_os_set_hw_addr(adapter, i, hw_addr);
4574 		init_link_config(&p->link_config, p->phy.caps);
4575 		p->phy.ops->power_down(&p->phy, 1);
4576 
4577 		/*
4578 		 * If the PHY doesn't support interrupts for link status
4579 		 * changes, schedule a scan of the adapter links at least
4580 		 * once a second.
4581 		 */
4582 		if (!(p->phy.caps & SUPPORTED_IRQ) &&
4583 		    adapter->params.linkpoll_period > 10)
4584 			adapter->params.linkpoll_period = 10;
4585 	}
4586 
4587 	return 0;
4588 }
4589 
4590 /**
4591  *	t3_reinit_adapter - prepare HW for operation again
4592  *	@adapter: the adapter
4593  *
4594  *	Put HW in the same state as @t3_prep_adapter without any changes to
4595  *	SW state.  This is a cut down version of @t3_prep_adapter intended
4596  *	to be used after events that wipe out HW state but preserve SW state,
4597  *	e.g., EEH.  The device must be reset before calling this.
4598  */
4599 int t3_reinit_adapter(adapter_t *adap)
4600 {
4601 	unsigned int i;
4602 	int ret, j = 0;
4603 
4604 	early_hw_init(adap, adap->params.info);
4605 	ret = init_parity(adap);
4606 	if (ret)
4607 		return ret;
4608 
4609 	if (adap->params.nports > 2 &&
4610 	    (ret = t3_vsc7323_init(adap, adap->params.nports)))
4611 		return ret;
4612 
4613 	for_each_port(adap, i) {
4614 		const struct port_type_info *pti;
4615 		struct port_info *p = adap2pinfo(adap, i);
4616 
4617 		for (;;) {
4618 			unsigned port_type = adap->params.vpd.port_type[j];
4619 			if (port_type) {
4620 				if (port_type < ARRAY_SIZE(port_types)) {
4621 					pti = &port_types[port_type];
4622 					break;
4623 				} else
4624 					return -EINVAL;
4625 			}
4626 			j++;
4627 			if (j >= ARRAY_SIZE(adap->params.vpd.port_type))
4628 				return -EINVAL;
4629 		}
4630 		ret = pti->phy_prep(p, p->phy.addr, NULL);
4631 		if (ret)
4632 			return ret;
4633 		p->phy.ops->power_down(&p->phy, 1);
4634 	}
4635 	return 0;
4636 }
4637 
4638 void t3_led_ready(adapter_t *adapter)
4639 {
4640 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
4641 			 F_GPIO0_OUT_VAL);
4642 }
4643 
4644 void t3_port_failover(adapter_t *adapter, int port)
4645 {
4646 	u32 val;
4647 
4648 	val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
4649 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4650 			 val);
4651 }
4652 
4653 void t3_failover_done(adapter_t *adapter, int port)
4654 {
4655 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4656 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4657 }
4658 
4659 void t3_failover_clear(adapter_t *adapter)
4660 {
4661 	t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
4662 			 F_PORT0ACTIVE | F_PORT1ACTIVE);
4663 }
4664 
4665 static int t3_cim_hac_read(adapter_t *adapter, u32 addr, u32 *val)
4666 {
4667 	u32 v;
4668 
4669 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4670 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4671 				F_HOSTBUSY, 0, 10, 10, &v))
4672 		return -EIO;
4673 
4674 	*val = t3_read_reg(adapter, A_CIM_HOST_ACC_DATA);
4675 
4676 	return 0;
4677 }
4678 
4679 static int t3_cim_hac_write(adapter_t *adapter, u32 addr, u32 val)
4680 {
4681 	u32 v;
4682 
4683 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, val);
4684 
4685 	addr |= F_HOSTWRITE;
4686 	t3_write_reg(adapter, A_CIM_HOST_ACC_CTRL, addr);
4687 
4688 	if (t3_wait_op_done_val(adapter, A_CIM_HOST_ACC_CTRL,
4689 				F_HOSTBUSY, 0, 10, 5, &v))
4690 		return -EIO;
4691 	return 0;
4692 }
4693 
4694 int t3_get_up_la(adapter_t *adapter, u32 *stopped, u32 *index,
4695 		 u32 *size, void *data)
4696 {
4697 	u32 v, *buf = data;
4698 	int i, cnt,  ret;
4699 
4700 	if (*size < LA_ENTRIES * 4)
4701 		return -EINVAL;
4702 
4703 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4704 	if (ret)
4705 		goto out;
4706 
4707 	*stopped = !(v & 1);
4708 
4709 	/* Freeze LA */
4710 	if (!*stopped) {
4711 		ret = t3_cim_hac_write(adapter, LA_CTRL, 0);
4712 		if (ret)
4713 			goto out;
4714 	}
4715 
4716 	for (i = 0; i < LA_ENTRIES; i++) {
4717 		v = (i << 2) | (1 << 1);
4718 		ret = t3_cim_hac_write(adapter, LA_CTRL, v);
4719 		if (ret)
4720 			goto out;
4721 
4722 		ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4723 		if (ret)
4724 			goto out;
4725 
4726 		cnt = 20;
4727 		while ((v & (1 << 1)) && cnt) {
4728 			udelay(5);
4729 			--cnt;
4730 			ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4731 			if (ret)
4732 				goto out;
4733 		}
4734 
4735 		if (v & (1 << 1))
4736 			return -EIO;
4737 
4738 		ret = t3_cim_hac_read(adapter, LA_DATA, &v);
4739 		if (ret)
4740 			goto out;
4741 
4742 		*buf++ = v;
4743 	}
4744 
4745 	ret = t3_cim_hac_read(adapter, LA_CTRL, &v);
4746 	if (ret)
4747 		goto out;
4748 
4749 	*index = (v >> 16) + 4;
4750 	*size = LA_ENTRIES * 4;
4751 out:
4752 	/* Unfreeze LA */
4753 	t3_cim_hac_write(adapter, LA_CTRL, 1);
4754 	return ret;
4755 }
4756 
4757 int t3_get_up_ioqs(adapter_t *adapter, u32 *size, void *data)
4758 {
4759 	u32 v, *buf = data;
4760 	int i, j, ret;
4761 
4762 	if (*size < IOQ_ENTRIES * sizeof(struct t3_ioq_entry))
4763 		return -EINVAL;
4764 
4765 	for (i = 0; i < 4; i++) {
4766 		ret = t3_cim_hac_read(adapter, (4 * i), &v);
4767 		if (ret)
4768 			goto out;
4769 
4770 		*buf++ = v;
4771 	}
4772 
4773 	for (i = 0; i < IOQ_ENTRIES; i++) {
4774 		u32 base_addr = 0x10 * (i + 1);
4775 
4776 		for (j = 0; j < 4; j++) {
4777 			ret = t3_cim_hac_read(adapter, base_addr + 4 * j, &v);
4778 			if (ret)
4779 				goto out;
4780 
4781 			*buf++ = v;
4782 		}
4783 	}
4784 
4785 	*size = IOQ_ENTRIES * sizeof(struct t3_ioq_entry);
4786 
4787 out:
4788 	return ret;
4789 }
4790 
4791