xref: /netbsd/sys/dev/pci/cxgb/cxgb_t3_hw.c (revision ec0fc95d)
1 /**************************************************************************
2 
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 ***************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: cxgb_t3_hw.c,v 1.7 2021/12/10 20:36:04 andvar Exp $");
32 
33 
34 #ifdef CONFIG_DEFINED
35 #include <cxgb_include.h>
36 #else
37 #include "cxgb_include.h"
38 #endif
39 
40 #undef msleep
41 #define msleep t3_os_sleep
42 
43 /**
44  *  t3_wait_op_done_val - wait until an operation is completed
45  *  @adapter: the adapter performing the operation
46  *  @reg: the register to check for completion
47  *  @mask: a single-bit field within @reg that indicates completion
48  *  @polarity: the value of the field when the operation is completed
49  *  @attempts: number of check iterations
50  *  @delay: delay in usecs between iterations
51  *  @valp: where to store the value of the register at completion time
52  *
53  *  Wait until an operation is completed by checking a bit in a register
54  *  up to @attempts times.  If @valp is not NULL the value of the register
55  *  at the time it indicated completion is stored there.  Returns 0 if the
56  *  operation completes and -EAGAIN otherwise.
57  */
t3_wait_op_done_val(adapter_t * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)58 int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
59             int attempts, int delay, u32 *valp)
60 {
61     while (1) {
62         u32 val = t3_read_reg(adapter, reg);
63 
64         if (!!(val & mask) == polarity) {
65             if (valp)
66                 *valp = val;
67             return 0;
68         }
69         if (--attempts == 0)
70             return -EAGAIN;
71         if (delay)
72             udelay(delay);
73     }
74 }
75 
76 /**
77  *  t3_write_regs - write a bunch of registers
78  *  @adapter: the adapter to program
79  *  @p: an array of register address/register value pairs
80  *  @n: the number of address/value pairs
81  *  @offset: register address offset
82  *
83  *  Takes an array of register address/register value pairs and writes each
84  *  value to the corresponding register.  Register addresses are adjusted
85  *  by the supplied offset.
86  */
t3_write_regs(adapter_t * adapter,const struct addr_val_pair * p,int n,unsigned int offset)87 void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
88            unsigned int offset)
89 {
90     while (n--) {
91         t3_write_reg(adapter, p->reg_addr + offset, p->val);
92         p++;
93     }
94 }
95 
96 /**
97  *  t3_set_reg_field - set a register field to a value
98  *  @adapter: the adapter to program
99  *  @addr: the register address
100  *  @mask: specifies the portion of the register to modify
101  *  @val: the new value for the register field
102  *
103  *  Sets a register field specified by the supplied mask to the
104  *  given value.
105  */
t3_set_reg_field(adapter_t * adapter,unsigned int addr,u32 mask,u32 val)106 void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
107 {
108     u32 v = t3_read_reg(adapter, addr) & ~mask;
109 
110     t3_write_reg(adapter, addr, v | val);
111     (void) t3_read_reg(adapter, addr);      /* flush */
112 }
113 
114 /**
115  *  t3_read_indirect - read indirectly addressed registers
116  *  @adap: the adapter
117  *  @addr_reg: register holding the indirect address
118  *  @data_reg: register holding the value of the indirect register
119  *  @vals: where the read register values are stored
120  *  @start_idx: index of first indirect register to read
121  *  @nregs: how many indirect registers to read
122  *
123  *  Reads registers that are accessed indirectly through an address/data
124  *  register pair.
125  */
t3_read_indirect(adapter_t * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)126 static void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
127               unsigned int data_reg, u32 *vals, unsigned int nregs,
128               unsigned int start_idx)
129 {
130     while (nregs--) {
131         t3_write_reg(adap, addr_reg, start_idx);
132         *vals++ = t3_read_reg(adap, data_reg);
133         start_idx++;
134     }
135 }
136 
137 /**
138  *  t3_mc7_bd_read - read from MC7 through backdoor accesses
139  *  @mc7: identifies MC7 to read from
140  *  @start: index of first 64-bit word to read
141  *  @n: number of 64-bit words to read
142  *  @buf: where to store the read result
143  *
144  *  Read n 64-bit words from MC7 starting at word start, using backdoor
145  *  accesses.
146  */
t3_mc7_bd_read(struct mc7 * mc7,unsigned int start,unsigned int n,u64 * buf)147 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
148                    u64 *buf)
149 {
150     static int shift[] = { 0, 0, 16, 24 };
151     static int step[]  = { 0, 32, 16, 8 };
152 
153     unsigned int size64 = mc7->size / 8;  /* # of 64-bit words */
154     adapter_t *adap = mc7->adapter;
155 
156     if (start >= size64 || start + n > size64)
157         return -EINVAL;
158 
159     start *= (8 << mc7->width);
160     while (n--) {
161         int i;
162         u64 val64 = 0;
163 
164         for (i = (1 << mc7->width) - 1; i >= 0; --i) {
165             int attempts = 10;
166             u32 val;
167 
168             t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
169                        start);
170             t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
171             val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
172             while ((val & F_BUSY) && attempts--)
173                 val = t3_read_reg(adap,
174                           mc7->offset + A_MC7_BD_OP);
175             if (val & F_BUSY)
176                 return -EIO;
177 
178             val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
179             if (mc7->width == 0) {
180                 val64 = t3_read_reg(adap,
181                         mc7->offset + A_MC7_BD_DATA0);
182                 val64 |= (u64)val << 32;
183             } else {
184                 if (mc7->width > 1)
185                     val >>= shift[mc7->width];
186                 val64 |= (u64)val << (step[mc7->width] * i);
187             }
188             start += 8;
189         }
190         *buf++ = val64;
191     }
192     return 0;
193 }
194 
195 /*
196  * Initialize MI1.
197  */
mi1_init(adapter_t * adap,const struct adapter_info * ai)198 static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
199 {
200         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
201         u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
202           V_CLKDIV(clkdiv);
203 
204     if (!(ai->caps & SUPPORTED_10000baseT_Full))
205         val |= V_ST(1);
206         t3_write_reg(adap, A_MI1_CFG, val);
207 }
208 
209 #define MDIO_ATTEMPTS 20
210 
211 /*
212  * MI1 read/write operations for direct-addressed PHYs.
213  */
mi1_read(adapter_t * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int * valp)214 static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
215             int reg_addr, unsigned int *valp)
216 {
217     int ret;
218     u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
219 
220     if (mmd_addr)
221         return -EINVAL;
222 
223     MDIO_LOCK(adapter);
224     t3_write_reg(adapter, A_MI1_ADDR, addr);
225     t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
226     ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
227     if (!ret)
228         *valp = t3_read_reg(adapter, A_MI1_DATA);
229     MDIO_UNLOCK(adapter);
230     return ret;
231 }
232 
mi1_write(adapter_t * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int val)233 static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
234              int reg_addr, unsigned int val)
235 {
236     int ret;
237     u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
238 
239     if (mmd_addr)
240         return -EINVAL;
241 
242     MDIO_LOCK(adapter);
243     t3_write_reg(adapter, A_MI1_ADDR, addr);
244     t3_write_reg(adapter, A_MI1_DATA, val);
245     t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
246     ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
247     MDIO_UNLOCK(adapter);
248     return ret;
249 }
250 
251 static struct mdio_ops mi1_mdio_ops = {
252     mi1_read,
253     mi1_write
254 };
255 
256 /*
257  * MI1 read/write operations for indirect-addressed PHYs.
258  */
mi1_ext_read(adapter_t * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int * valp)259 static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
260             int reg_addr, unsigned int *valp)
261 {
262     int ret;
263     u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
264 
265     MDIO_LOCK(adapter);
266     t3_write_reg(adapter, A_MI1_ADDR, addr);
267     t3_write_reg(adapter, A_MI1_DATA, reg_addr);
268     t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
269     ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
270     if (!ret) {
271         t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
272         ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
273                       MDIO_ATTEMPTS, 10);
274         if (!ret)
275             *valp = t3_read_reg(adapter, A_MI1_DATA);
276     }
277     MDIO_UNLOCK(adapter);
278     return ret;
279 }
280 
mi1_ext_write(adapter_t * adapter,int phy_addr,int mmd_addr,int reg_addr,unsigned int val)281 static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
282              int reg_addr, unsigned int val)
283 {
284     int ret;
285     u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
286 
287     MDIO_LOCK(adapter);
288     t3_write_reg(adapter, A_MI1_ADDR, addr);
289     t3_write_reg(adapter, A_MI1_DATA, reg_addr);
290     t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
291     ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
292     if (!ret) {
293         t3_write_reg(adapter, A_MI1_DATA, val);
294         t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
295         ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
296                       MDIO_ATTEMPTS, 10);
297     }
298     MDIO_UNLOCK(adapter);
299     return ret;
300 }
301 
302 static struct mdio_ops mi1_mdio_ext_ops = {
303     mi1_ext_read,
304     mi1_ext_write
305 };
306 
307 /**
308  *  t3_mdio_change_bits - modify the value of a PHY register
309  *  @phy: the PHY to operate on
310  *  @mmd: the device address
311  *  @reg: the register address
312  *  @clear: what part of the register value to mask off
313  *  @set: what part of the register value to set
314  *
315  *  Changes the value of a PHY register by applying a mask to its current
316  *  value and ORing the result with a new value.
317  */
t3_mdio_change_bits(struct cphy * phy,int mmd,int reg,unsigned int clear,unsigned int set)318 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
319             unsigned int set)
320 {
321     int ret;
322     unsigned int val;
323 
324     ret = mdio_read(phy, mmd, reg, &val);
325     if (!ret) {
326         val &= ~clear;
327         ret = mdio_write(phy, mmd, reg, val | set);
328     }
329     return ret;
330 }
331 
332 /**
333  *  t3_phy_reset - reset a PHY block
334  *  @phy: the PHY to operate on
335  *  @mmd: the device address of the PHY block to reset
336  *  @wait: how long to wait for the reset to complete in 1ms increments
337  *
338  *  Resets a PHY block and optionally waits for the reset to complete.
339  *  @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
340  *  for 10G PHYs.
341  */
t3_phy_reset(struct cphy * phy,int mmd,int wait)342 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
343 {
344     int err;
345     unsigned int ctl;
346 
347     err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
348     if (err || !wait)
349         return err;
350 
351     do {
352         err = mdio_read(phy, mmd, MII_BMCR, &ctl);
353         if (err)
354             return err;
355         ctl &= BMCR_RESET;
356         if (ctl)
357             msleep(1);
358     } while (ctl && --wait);
359 
360     return ctl ? -1 : 0;
361 }
362 
363 /**
364  *  t3_phy_advertise - set the PHY advertisement registers for autoneg
365  *  @phy: the PHY to operate on
366  *  @advert: bitmap of capabilities the PHY should advertise
367  *
368  *  Sets a 10/100/1000 PHY's advertisement registers to advertise the
369  *  requested capabilities.
370  */
t3_phy_advertise(struct cphy * phy,unsigned int advert)371 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
372 {
373     int err;
374     unsigned int val = 0;
375 
376     err = mdio_read(phy, 0, MII_CTRL1000, &val);
377     if (err)
378         return err;
379 
380     val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
381     if (advert & ADVERTISED_1000baseT_Half)
382         val |= ADVERTISE_1000HALF;
383     if (advert & ADVERTISED_1000baseT_Full)
384         val |= ADVERTISE_1000FULL;
385 
386     err = mdio_write(phy, 0, MII_CTRL1000, val);
387     if (err)
388         return err;
389 
390     val = 1;
391     if (advert & ADVERTISED_10baseT_Half)
392         val |= ADVERTISE_10HALF;
393     if (advert & ADVERTISED_10baseT_Full)
394         val |= ADVERTISE_10FULL;
395     if (advert & ADVERTISED_100baseT_Half)
396         val |= ADVERTISE_100HALF;
397     if (advert & ADVERTISED_100baseT_Full)
398         val |= ADVERTISE_100FULL;
399     if (advert & ADVERTISED_Pause)
400         val |= ADVERTISE_PAUSE_CAP;
401     if (advert & ADVERTISED_Asym_Pause)
402         val |= ADVERTISE_PAUSE_ASYM;
403     return mdio_write(phy, 0, MII_ADVERTISE, val);
404 }
405 
406 /**
407  *  t3_set_phy_speed_duplex - force PHY speed and duplex
408  *  @phy: the PHY to operate on
409  *  @speed: requested PHY speed
410  *  @duplex: requested PHY duplex
411  *
412  *  Force a 10/100/1000 PHY's speed and duplex.  This also disables
413  *  auto-negotiation except for GigE, where auto-negotiation is mandatory.
414  */
t3_set_phy_speed_duplex(struct cphy * phy,int speed,int duplex)415 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
416 {
417     int err;
418     unsigned int ctl;
419 
420     err = mdio_read(phy, 0, MII_BMCR, &ctl);
421     if (err)
422         return err;
423 
424     if (speed >= 0) {
425         ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
426         if (speed == SPEED_100)
427             ctl |= BMCR_SPEED100;
428         else if (speed == SPEED_1000)
429             ctl |= BMCR_SPEED1000;
430     }
431     if (duplex >= 0) {
432         ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
433         if (duplex == DUPLEX_FULL)
434             ctl |= BMCR_FULLDPLX;
435     }
436     if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for GigE */
437         ctl |= BMCR_ANENABLE;
438     return mdio_write(phy, 0, MII_BMCR, ctl);
439 }
440 
441 static struct adapter_info t3_adap_info[] = {
442     { 1, 1, 0, 0, 0,
443       F_GPIO2_OEN | F_GPIO4_OEN |
444       F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
445       0,
446       &mi1_mdio_ops, "Chelsio PE9000" },
447     { 1, 1, 0, 0, 0,
448       F_GPIO2_OEN | F_GPIO4_OEN |
449       F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
450       0,
451       &mi1_mdio_ops, "Chelsio T302" },
452     { 1, 0, 0, 0, 0,
453       F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
454       F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
455       SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
456       &mi1_mdio_ext_ops, "Chelsio T310" },
457     { 1, 1, 0, 0, 0,
458       F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
459       F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
460       F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
461       SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
462       &mi1_mdio_ext_ops, "Chelsio T320" },
463     { 4, 0, 0, 0, 0,
464       F_GPIO5_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO5_OUT_VAL |
465       F_GPIO6_OUT_VAL | F_GPIO7_OUT_VAL,
466       F_GPIO1 | F_GPIO2 | F_GPIO3 | F_GPIO4, SUPPORTED_AUI,
467       &mi1_mdio_ops, "Chelsio T304" },
468 };
469 
470 /*
471  * Return the adapter_info structure with a given index.  Out-of-range indices
472  * return NULL.
473  */
t3_get_adapter_info(unsigned int id)474 const struct adapter_info *t3_get_adapter_info(unsigned int id)
475 {
476     return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
477 }
478 
479 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
480          SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
481 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
482 
483 static struct port_type_info port_types[] = {
484     { NULL, 0, NULL },
485     { t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486       "10GBASE-XR" },
487     { t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
488       "10/100/1000BASE-T" },
489     { t3_mv88e1xxx_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
490       "10/100/1000BASE-T" },
491     { t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
492     { NULL, CAPS_10G, "10GBASE-KX4" },
493     { t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
494     { t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
495       "10GBASE-SR" },
496     { NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
497 };
498 
499 #undef CAPS_1G
500 #undef CAPS_10G
501 
502 #define VPD_ENTRY(name, len) \
503     u8 name##_kword[2]; u8 name##_len; char name##_data[len]
504 
505 /*
506  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
507  * VPD-R sections.
508  */
509 struct t3_vpd {
510     u8  id_tag;
511     u8  id_len[2];
512     u8  id_data[16];
513     u8  vpdr_tag;
514     u8  vpdr_len[2];
515     VPD_ENTRY(pn, 16);                     /* part number */
516     VPD_ENTRY(ec, 16);                     /* EC level */
517     VPD_ENTRY(sn, SERNUM_LEN);             /* serial number */
518     VPD_ENTRY(na, 12);                     /* MAC address base */
519     VPD_ENTRY(cclk, 6);                    /* core clock */
520     VPD_ENTRY(mclk, 6);                    /* mem clock */
521     VPD_ENTRY(uclk, 6);                    /* uP clk */
522     VPD_ENTRY(mdc, 6);                     /* MDIO clk */
523     VPD_ENTRY(mt, 2);                      /* mem timing */
524     VPD_ENTRY(xaui0cfg, 6);                /* XAUI0 config */
525     VPD_ENTRY(xaui1cfg, 6);                /* XAUI1 config */
526     VPD_ENTRY(port0, 2);                   /* PHY0 complex */
527     VPD_ENTRY(port1, 2);                   /* PHY1 complex */
528     VPD_ENTRY(port2, 2);                   /* PHY2 complex */
529     VPD_ENTRY(port3, 2);                   /* PHY3 complex */
530     VPD_ENTRY(rv, 1);                      /* csum */
531     u32 pad;                  /* for multiple-of-4 sizing and alignment */
532 };
533 
534 #define EEPROM_MAX_POLL   4
535 #define EEPROM_STAT_ADDR  0x4000
536 #define VPD_BASE          0xc00
537 
538 /**
539  *  t3_seeprom_read - read a VPD EEPROM location
540  *  @adapter: adapter to read
541  *  @addr: EEPROM address
542  *  @data: where to store the read data
543  *
544  *  Read a 32-bit word from a location in VPD EEPROM using the card's PCI
545  *  VPD ROM capability.  A zero is written to the flag bit when the
546  *  address is written to the control register.  The hardware device will
547  *  set the flag to 1 when 4 bytes have been read into the data register.
548  */
t3_seeprom_read(adapter_t * adapter,u32 addr,u32 * data)549 int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
550 {
551     u16 val;
552     int attempts = EEPROM_MAX_POLL;
553     unsigned int base = adapter->params.pci.vpd_cap_addr;
554 
555     if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
556         return -EINVAL;
557 
558     t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
559     do {
560         udelay(10);
561         t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
562     } while (!(val & PCI_VPD_ADDR_F) && --attempts);
563 
564     if (!(val & PCI_VPD_ADDR_F)) {
565         CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
566         return -EIO;
567     }
568     t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
569     *data = le32_to_cpu(*data);
570     return 0;
571 }
572 
573 /**
574  *  t3_seeprom_write - write a VPD EEPROM location
575  *  @adapter: adapter to write
576  *  @addr: EEPROM address
577  *  @data: value to write
578  *
579  *  Write a 32-bit word to a location in VPD EEPROM using the card's PCI
580  *  VPD ROM capability.
581  */
t3_seeprom_write(adapter_t * adapter,u32 addr,u32 data)582 int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
583 {
584     u16 val;
585     int attempts = EEPROM_MAX_POLL;
586     unsigned int base = adapter->params.pci.vpd_cap_addr;
587 
588     if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
589         return -EINVAL;
590 
591     t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
592                  cpu_to_le32(data));
593     t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
594                  (u16)addr | PCI_VPD_ADDR_F);
595     do {
596         msleep(1);
597         t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
598     } while ((val & PCI_VPD_ADDR_F) && --attempts);
599 
600     if (val & PCI_VPD_ADDR_F) {
601         CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
602         return -EIO;
603     }
604     return 0;
605 }
606 
607 /**
608  *  t3_seeprom_wp - enable/disable EEPROM write protection
609  *  @adapter: the adapter
610  *  @enable: 1 to enable write protection, 0 to disable it
611  *
612  *  Enables or disables write protection on the serial EEPROM.
613  */
t3_seeprom_wp(adapter_t * adapter,int enable)614 int t3_seeprom_wp(adapter_t *adapter, int enable)
615 {
616     return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
617 }
618 
619 /*
620  * Convert a character holding a hex digit to a number.
621  */
hex2int(unsigned char c)622 static unsigned int hex2int(unsigned char c)
623 {
624     return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
625 }
626 
627 /**
628  *  get_vpd_params - read VPD parameters from VPD EEPROM
629  *  @adapter: adapter to read
630  *  @p: where to store the parameters
631  *
632  *  Reads card parameters stored in VPD EEPROM.
633  */
get_vpd_params(adapter_t * adapter,struct vpd_params * p)634 static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
635 {
636     int i, addr, ret;
637     struct t3_vpd vpd;
638 
639     /*
640      * Card information is normally at VPD_BASE but some early cards had
641      * it at 0.
642      */
643     ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
644     if (ret)
645         return ret;
646     addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
647 
648     for (i = 0; i < sizeof(vpd); i += 4) {
649         ret = t3_seeprom_read(adapter, addr + i,
650                       (u32 *)((u8 *)&vpd + i));
651         if (ret)
652             return ret;
653     }
654 
655     p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
656     p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
657     p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
658     p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
659     p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
660     memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
661 
662     /* Old eeproms didn't have port information */
663     if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
664         p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
665         p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
666     } else {
667         p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
668         p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
669         p->port_type[2] = (u8)hex2int(vpd.port2_data[0]);
670         p->port_type[3] = (u8)hex2int(vpd.port3_data[0]);
671         p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
672         p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
673     }
674 
675     for (i = 0; i < 6; i++)
676         p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
677                  hex2int(vpd.na_data[2 * i + 1]);
678     return 0;
679 }
680 
681 /* serial flash and firmware constants */
682 enum {
683     SF_ATTEMPTS = 5,           /* max retries for SF1 operations */
684     SF_SEC_SIZE = 64 * 1024,   /* serial flash sector size */
685     SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
686 
687     /* flash command opcodes */
688     SF_PROG_PAGE    = 2,       /* program page */
689     SF_WR_DISABLE   = 4,       /* disable writes */
690     SF_RD_STATUS    = 5,       /* read status register */
691     SF_WR_ENABLE    = 6,       /* enable writes */
692     SF_RD_DATA_FAST = 0xb,     /* read flash */
693     SF_ERASE_SECTOR = 0xd8,    /* erase sector */
694 
695     FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
696     FW_VERS_ADDR = 0x77ffc,    /* flash address holding FW version */
697     FW_MIN_SIZE = 8            /* at least version and csum */
698 };
699 
700 /**
701  *  sf1_read - read data from the serial flash
702  *  @adapter: the adapter
703  *  @byte_cnt: number of bytes to read
704  *  @cont: whether another operation will be chained
705  *  @valp: where to store the read data
706  *
707  *  Reads up to 4 bytes of data from the serial flash.  The location of
708  *  the read needs to be specified prior to calling this by issuing the
709  *  appropriate commands to the serial flash.
710  */
sf1_read(adapter_t * adapter,unsigned int byte_cnt,int cont,u32 * valp)711 static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
712             u32 *valp)
713 {
714     int ret;
715 
716     if (!byte_cnt || byte_cnt > 4)
717         return -EINVAL;
718     if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
719         return -EBUSY;
720     t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
721     ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
722     if (!ret)
723         *valp = t3_read_reg(adapter, A_SF_DATA);
724     return ret;
725 }
726 
727 /**
728  *  sf1_write - write data to the serial flash
729  *  @adapter: the adapter
730  *  @byte_cnt: number of bytes to write
731  *  @cont: whether another operation will be chained
732  *  @val: value to write
733  *
734  *  Writes up to 4 bytes of data to the serial flash.  The location of
735  *  the write needs to be specified prior to calling this by issuing the
736  *  appropriate commands to the serial flash.
737  */
sf1_write(adapter_t * adapter,unsigned int byte_cnt,int cont,u32 val)738 static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
739              u32 val)
740 {
741     if (!byte_cnt || byte_cnt > 4)
742         return -EINVAL;
743     if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
744         return -EBUSY;
745     t3_write_reg(adapter, A_SF_DATA, val);
746     t3_write_reg(adapter, A_SF_OP,
747              V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
748     return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
749 }
750 
751 /**
752  *  flash_wait_op - wait for a flash operation to complete
753  *  @adapter: the adapter
754  *  @attempts: max number of polls of the status register
755  *  @delay: delay between polls in ms
756  *
757  *  Wait for a flash operation to complete by polling the status register.
758  */
flash_wait_op(adapter_t * adapter,int attempts,int delay)759 static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
760 {
761     int ret;
762     u32 status;
763 
764     while (1) {
765         if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
766             (ret = sf1_read(adapter, 1, 0, &status)) != 0)
767             return ret;
768         if (!(status & 1))
769             return 0;
770         if (--attempts == 0)
771             return -EAGAIN;
772         if (delay)
773             msleep(delay);
774     }
775 }
776 
777 /**
778  *  t3_read_flash - read words from serial flash
779  *  @adapter: the adapter
780  *  @addr: the start address for the read
781  *  @nwords: how many 32-bit words to read
782  *  @data: where to store the read data
783  *  @byte_oriented: whether to store data as bytes or as words
784  *
785  *  Read the specified number of 32-bit words from the serial flash.
786  *  If @byte_oriented is set the read data is stored as a byte array
787  *  (i.e., big-endian), otherwise as 32-bit words in the platform's
788  *  natural endianness.
789  */
t3_read_flash(adapter_t * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)790 int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
791           u32 *data, int byte_oriented)
792 {
793     int ret;
794 
795     if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
796         return -EINVAL;
797 
798     addr = swab32(addr) | SF_RD_DATA_FAST;
799 
800     if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
801         (ret = sf1_read(adapter, 1, 1, data)) != 0)
802         return ret;
803 
804     for ( ; nwords; nwords--, data++) {
805         ret = sf1_read(adapter, 4, nwords > 1, data);
806         if (ret)
807             return ret;
808         if (byte_oriented)
809             *data = htonl(*data);
810     }
811     return 0;
812 }
813 
814 /**
815  *  t3_write_flash - write up to a page of data to the serial flash
816  *  @adapter: the adapter
817  *  @addr: the start address to write
818  *  @n: length of data to write
819  *  @data: the data to write
820  *
821  *  Writes up to a page of data (256 bytes) to the serial flash starting
822  *  at the given address.
823  */
t3_write_flash(adapter_t * adapter,unsigned int addr,unsigned int n,const u8 * data)824 static int t3_write_flash(adapter_t *adapter, unsigned int addr,
825               unsigned int n, const u8 *data)
826 {
827     int ret;
828     u32 buf[64];
829     unsigned int i, c, left, val, offset = addr & 0xff;
830 
831     if (addr + n > SF_SIZE || offset + n > 256)
832         return -EINVAL;
833 
834     val = swab32(addr) | SF_PROG_PAGE;
835 
836     if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
837         (ret = sf1_write(adapter, 4, 1, val)) != 0)
838         return ret;
839 
840     for (left = n; left; left -= c) {
841         c = uimin(left, 4U);
842         for (val = 0, i = 0; i < c; ++i)
843             val = (val << 8) + *data++;
844 
845         ret = sf1_write(adapter, c, c != left, val);
846         if (ret)
847             return ret;
848     }
849     if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
850         return ret;
851 
852     /* Read the page to verify the write succeeded */
853     ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
854     if (ret)
855         return ret;
856 
857     if (memcmp(data - n, (u8 *)buf + offset, n))
858         return -EIO;
859     return 0;
860 }
861 
862 /**
863  *  t3_get_tp_version - read the tp sram version
864  *  @adapter: the adapter
865  *  @vers: where to place the version
866  *
867  *  Reads the protocol sram version from sram.
868  */
t3_get_tp_version(adapter_t * adapter,u32 * vers)869 int t3_get_tp_version(adapter_t *adapter, u32 *vers)
870 {
871     int ret;
872 
873     /* Get version loaded in SRAM */
874     t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
875     ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
876                   1, 1, 5, 1);
877     if (ret)
878         return ret;
879 
880     *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
881 
882     return 0;
883 }
884 
885 /**
886  *  t3_check_tpsram_version - read the tp sram version
887  *  @adapter: the adapter
888  *
889  */
t3_check_tpsram_version(adapter_t * adapter)890 int t3_check_tpsram_version(adapter_t *adapter)
891 {
892     int ret;
893     u32 vers;
894     unsigned int major, minor;
895 
896     /* Get version loaded in SRAM */
897     t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
898     ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
899                   1, 1, 5, 1);
900     if (ret)
901         return ret;
902 
903     vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
904 
905     major = G_TP_VERSION_MAJOR(vers);
906     minor = G_TP_VERSION_MINOR(vers);
907 
908     if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
909         return 0;
910 
911     CH_WARN(adapter, "found wrong TP version (%u.%u), "
912            "driver needs version %d.%d\n", major, minor,
913            TP_VERSION_MAJOR, TP_VERSION_MINOR);
914     return -EINVAL;
915 }
916 
917 /**
918  *  t3_check_tpsram - check if provided protocol SRAM
919  *            is compatible with this driver
920  *  @adapter: the adapter
921  *  @tp_sram: the firmware image to write
922  *  @size: image size
923  *
924  *  Checks if an adapter's tp sram is compatible with the driver.
925  *  Returns 0 if the versions are compatible, a negative error otherwise.
926  */
t3_check_tpsram(adapter_t * adapter,const u8 * tp_sram,unsigned int size)927 int t3_check_tpsram(adapter_t *adapter, const u8 *tp_sram, unsigned int size)
928 {
929     u32 csum;
930     unsigned int i;
931     const u32 *p = (const u32 *)tp_sram;
932 
933     /* Verify checksum */
934     for (csum = 0, i = 0; i < size / sizeof(csum); i++)
935         csum += ntohl(p[i]);
936     if (csum != 0xffffffff) {
937         CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
938                csum);
939         return -EINVAL;
940     }
941 
942     return 0;
943 }
944 
945 enum fw_version_type {
946     FW_VERSION_N3,
947     FW_VERSION_T3
948 };
949 
950 /**
951  *  t3_get_fw_version - read the firmware version
952  *  @adapter: the adapter
953  *  @vers: where to place the version
954  *
955  *  Reads the FW version from flash.
956  */
t3_get_fw_version(adapter_t * adapter,u32 * vers)957 int t3_get_fw_version(adapter_t *adapter, u32 *vers)
958 {
959     return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
960 }
961 
962 /**
963  *  t3_check_fw_version - check if the FW is compatible with this driver
964  *  @adapter: the adapter
965  *
966  *  Checks if an adapter's FW is compatible with the driver.  Returns 0
967  *  if the versions are compatible, a negative error otherwise.
968  */
t3_check_fw_version(adapter_t * adapter)969 int t3_check_fw_version(adapter_t *adapter)
970 {
971     int ret;
972     u32 vers;
973     unsigned int type, major, minor;
974 
975     ret = t3_get_fw_version(adapter, &vers);
976     if (ret)
977         return ret;
978 
979     type = G_FW_VERSION_TYPE(vers);
980     major = G_FW_VERSION_MAJOR(vers);
981     minor = G_FW_VERSION_MINOR(vers);
982 
983     if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
984         minor == FW_VERSION_MINOR)
985         return 0;
986 
987     CH_WARN(adapter, "found wrong FW version (%u.%u), "
988            "driver needs version %d.%d\n", major, minor,
989            FW_VERSION_MAJOR, FW_VERSION_MINOR);
990     return -EINVAL;
991 }
992 
993 /**
994  *  t3_flash_erase_sectors - erase a range of flash sectors
995  *  @adapter: the adapter
996  *  @start: the first sector to erase
997  *  @end: the last sector to erase
998  *
999  *  Erases the sectors in the given range.
1000  */
t3_flash_erase_sectors(adapter_t * adapter,int start,int end)1001 static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
1002 {
1003     while (start <= end) {
1004         int ret;
1005 
1006         if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1007             (ret = sf1_write(adapter, 4, 0,
1008                      SF_ERASE_SECTOR | (start << 8))) != 0 ||
1009             (ret = flash_wait_op(adapter, 5, 500)) != 0)
1010             return ret;
1011         start++;
1012     }
1013     return 0;
1014 }
1015 
1016 /*
1017  *  t3_load_fw - download firmware
1018  *  @adapter: the adapter
1019  *  @fw_data: the firmware image to write
1020  *  @size: image size
1021  *
1022  *  Write the supplied firmware image to the card's serial flash.
1023  *  The FW image has the following sections: @size - 8 bytes of code and
1024  *  data, followed by 4 bytes of FW version, followed by the 32-bit
1025  *  1's complement checksum of the whole image.
1026  */
t3_load_fw(adapter_t * adapter,const u8 * fw_data,unsigned int size)1027 int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
1028 {
1029     u32 csum;
1030     unsigned int i;
1031     const u32 *p = (const u32 *)fw_data;
1032     int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1033 
1034     if ((size & 3) || size < FW_MIN_SIZE)
1035         return -EINVAL;
1036     if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1037         return -EFBIG;
1038 
1039     for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1040         csum += ntohl(p[i]);
1041     if (csum != 0xffffffff) {
1042         CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1043                csum);
1044         return -EINVAL;
1045     }
1046 
1047     ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1048     if (ret)
1049         goto out;
1050 
1051     size -= 8;  /* trim off version and checksum */
1052     for (addr = FW_FLASH_BOOT_ADDR; size; ) {
1053         unsigned int chunk_size = uimin(size, 256U);
1054 
1055         ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1056         if (ret)
1057             goto out;
1058 
1059         addr += chunk_size;
1060         fw_data += chunk_size;
1061         size -= chunk_size;
1062     }
1063 
1064     ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1065 out:
1066     if (ret)
1067         CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1068     return ret;
1069 }
1070 
1071 #define CIM_CTL_BASE 0x2000
1072 
1073 /**
1074  *  t3_cim_ctl_blk_read - read a block from CIM control region
1075  *  @adap: the adapter
1076  *  @addr: the start address within the CIM control region
1077  *  @n: number of words to read
1078  *  @valp: where to store the result
1079  *
1080  *  Reads a block of 4-byte words from the CIM control region.
1081  */
t3_cim_ctl_blk_read(adapter_t * adap,unsigned int addr,unsigned int n,unsigned int * valp)1082 int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
1083             unsigned int *valp)
1084 {
1085     int ret = 0;
1086 
1087     if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1088         return -EBUSY;
1089 
1090     for ( ; !ret && n--; addr += 4) {
1091         t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1092         ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1093                       0, 5, 2);
1094         if (!ret)
1095             *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1096     }
1097     return ret;
1098 }
1099 
1100 /**
1101  *  t3_link_changed - handle interface link changes
1102  *  @adapter: the adapter
1103  *  @port_id: the port index that changed link state
1104  *
1105  *  Called when a port's link settings change to propagate the new values
1106  *  to the associated PHY and MAC.  After performing the common tasks it
1107  *  invokes an OS-specific handler.
1108  */
t3_link_changed(adapter_t * adapter,int port_id)1109 void t3_link_changed(adapter_t *adapter, int port_id)
1110 {
1111     int link_ok, speed, duplex, fc;
1112     struct port_info *pi = adap2pinfo(adapter, port_id);
1113     struct cphy *phy = &pi->phy;
1114     struct cmac *mac = &pi->mac;
1115     struct link_config *lc = &pi->link_config;
1116 
1117     phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1118 
1119     if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1120         uses_xaui(adapter)) {
1121         if (link_ok)
1122             t3b_pcs_reset(mac);
1123         t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1124                  link_ok ? F_TXACTENABLE | F_RXEN : 0);
1125     }
1126     lc->link_ok = (unsigned char)link_ok;
1127     lc->speed = speed < 0 ? SPEED_INVALID : speed;
1128     lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1129     if (lc->requested_fc & PAUSE_AUTONEG)
1130         fc &= lc->requested_fc;
1131     else
1132         fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1133 
1134     if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1135         /* Set MAC speed, duplex, and flow control to match PHY. */
1136         t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1137         lc->fc = (unsigned char)fc;
1138     }
1139 
1140     t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1141 }
1142 
1143 /**
1144  *  t3_link_start - apply link configuration to MAC/PHY
1145  *  @phy: the PHY to setup
1146  *  @mac: the MAC to setup
1147  *  @lc: the requested link configuration
1148  *
1149  *  Set up a port's MAC and PHY according to a desired link configuration.
1150  *  - If the PHY can auto-negotiate first decide what to advertise, then
1151  *    enable/disable auto-negotiation as desired, and reset.
1152  *  - If the PHY does not auto-negotiate just reset it.
1153  *  - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1154  *    otherwise do it later based on the outcome of auto-negotiation.
1155  */
t3_link_start(struct cphy * phy,struct cmac * mac,struct link_config * lc)1156 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1157 {
1158     unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1159 
1160     lc->link_ok = 0;
1161     if (lc->supported & SUPPORTED_Autoneg) {
1162         lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1163         if (fc) {
1164             lc->advertising |= ADVERTISED_Asym_Pause;
1165             if (fc & PAUSE_RX)
1166                 lc->advertising |= ADVERTISED_Pause;
1167         }
1168         phy->ops->advertise(phy, lc->advertising);
1169 
1170         if (lc->autoneg == AUTONEG_DISABLE) {
1171             lc->speed = lc->requested_speed;
1172             lc->duplex = lc->requested_duplex;
1173             lc->fc = (unsigned char)fc;
1174             t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1175                            fc);
1176             /* Also disables autoneg */
1177             phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1178             phy->ops->reset(phy, 0);
1179         } else
1180             phy->ops->autoneg_enable(phy);
1181     } else {
1182         t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1183         lc->fc = (unsigned char)fc;
1184         phy->ops->reset(phy, 0);
1185     }
1186     return 0;
1187 }
1188 
1189 /**
1190  *  t3_set_vlan_accel - control HW VLAN extraction
1191  *  @adapter: the adapter
1192  *  @ports: bitmap of adapter ports to operate on
1193  *  @on: enable (1) or disable (0) HW VLAN extraction
1194  *
1195  *  Enables or disables HW extraction of VLAN tags for the given port.
1196  */
t3_set_vlan_accel(adapter_t * adapter,unsigned int ports,int on)1197 void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
1198 {
1199     t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1200              ports << S_VLANEXTRACTIONENABLE,
1201              on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1202 }
1203 
1204 struct intr_info {
1205     unsigned int mask;       /* bits to check in interrupt status */
1206     const char *msg;         /* message to print or NULL */
1207     short stat_idx;          /* stat counter to increment or -1 */
1208     unsigned short fatal:1;  /* whether the condition reported is fatal */
1209 };
1210 
1211 /**
1212  *  t3_handle_intr_status - table driven interrupt handler
1213  *  @adapter: the adapter that generated the interrupt
1214  *  @reg: the interrupt status register to process
1215  *  @mask: a mask to apply to the interrupt status
1216  *  @acts: table of interrupt actions
1217  *  @stats: statistics counters tracking interrupt occurrences
1218  *
1219  *  A table driven interrupt handler that applies a set of masks to an
1220  *  interrupt status word and performs the corresponding actions if the
1221  *  interrupts described by the mask have occurred.  The actions include
1222  *  optionally printing a warning or alert message, and optionally
1223  *  incrementing a stat counter.  The table is terminated by an entry
1224  *  specifying mask 0.  Returns the number of fatal interrupt conditions.
1225  */
t3_handle_intr_status(adapter_t * adapter,unsigned int reg,unsigned int mask,const struct intr_info * acts,unsigned long * stats)1226 static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
1227                  unsigned int mask,
1228                  const struct intr_info *acts,
1229                  unsigned long *stats)
1230 {
1231     int fatal = 0;
1232     unsigned int status = t3_read_reg(adapter, reg) & mask;
1233 
1234     for ( ; acts->mask; ++acts) {
1235         if (!(status & acts->mask)) continue;
1236         if (acts->fatal) {
1237             fatal++;
1238             CH_ALERT(adapter, "%s (0x%x)\n",
1239                  acts->msg, status & acts->mask);
1240         } else if (acts->msg)
1241             CH_WARN(adapter, "%s (0x%x)\n",
1242                 acts->msg, status & acts->mask);
1243         if (acts->stat_idx >= 0)
1244             stats[acts->stat_idx]++;
1245     }
1246     if (status)                           /* clear processed interrupts */
1247         t3_write_reg(adapter, reg, status);
1248     return fatal;
1249 }
1250 
1251 #define SGE_INTR_MASK (F_RSPQDISABLED)
1252 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1253                F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1254                F_NFASRCHFAIL)
1255 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1256 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1257                V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1258                F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1259 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1260             F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1261             F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1262             F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1263             V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1264             V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1265 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1266             F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1267             /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1268             V_BISTERR(M_BISTERR) | F_PEXERR)
1269 #define ULPRX_INTR_MASK F_PARERR
1270 #define ULPTX_INTR_MASK 0
1271 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1272              F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1273              F_ZERO_SWITCH_ERROR)
1274 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1275                F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1276                F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1277                F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1278 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1279             V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1280             V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1281 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1282             V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1283             V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1284 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1285                V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1286                V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1287                V_MCAPARERRENB(M_MCAPARERRENB))
1288 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1289               F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1290               F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1291               F_MPS0 | F_CPL_SWITCH)
1292 
1293 /*
1294  * Interrupt handler for the PCIX1 module.
1295  */
pci_intr_handler(adapter_t * adapter)1296 static void pci_intr_handler(adapter_t *adapter)
1297 {
1298     static struct intr_info pcix1_intr_info[] = {
1299         { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
1300         { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
1301         { F_RCVTARABT, "PCI received target abort", -1, 1 },
1302         { F_RCVMSTABT, "PCI received master abort", -1, 1 },
1303         { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
1304         { F_DETPARERR, "PCI detected parity error", -1, 1 },
1305         { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
1306         { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
1307         { F_RCVSPLCMPERR, "PCI received split completion error", -1,
1308           1 },
1309         { F_DETCORECCERR, "PCI correctable ECC error",
1310           STAT_PCI_CORR_ECC, 0 },
1311         { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
1312         { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1313         { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1314           1 },
1315         { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1316           1 },
1317         { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1318           1 },
1319         { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1320           "error", -1, 1 },
1321         { 0, NULL, 0, 0 }
1322     };
1323 
1324     if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1325                   pcix1_intr_info, adapter->irq_stats))
1326         t3_fatal_err(adapter);
1327 }
1328 
1329 /*
1330  * Interrupt handler for the PCIE module.
1331  */
pcie_intr_handler(adapter_t * adapter)1332 static void pcie_intr_handler(adapter_t *adapter)
1333 {
1334     static struct intr_info pcie_intr_info[] = {
1335         { F_PEXERR, "PCI PEX error", -1, 1 },
1336         { F_UNXSPLCPLERRR,
1337           "PCI unexpected split completion DMA read error", -1, 1 },
1338         { F_UNXSPLCPLERRC,
1339           "PCI unexpected split completion DMA command error", -1, 1 },
1340         { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
1341         { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
1342         { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
1343         { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
1344         { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1345           "PCI MSI-X table/PBA parity error", -1, 1 },
1346         { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
1347         { 0, NULL, 0, 0 }
1348     };
1349 
1350     if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1351         CH_ALERT(adapter, "PEX error code 0x%x\n",
1352              t3_read_reg(adapter, A_PCIE_PEX_ERR));
1353 
1354     if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1355                   pcie_intr_info, adapter->irq_stats))
1356         t3_fatal_err(adapter);
1357 }
1358 
1359 /*
1360  * TP interrupt handler.
1361  */
tp_intr_handler(adapter_t * adapter)1362 static void tp_intr_handler(adapter_t *adapter)
1363 {
1364     static struct intr_info tp_intr_info[] = {
1365         { 0xffffff,  "TP parity error", -1, 1 },
1366         { 0x1000000, "TP out of Rx pages", -1, 1 },
1367         { 0x2000000, "TP out of Tx pages", -1, 1 },
1368         { 0, NULL, 0, 0 }
1369     };
1370 
1371     if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1372                   tp_intr_info, NULL))
1373         t3_fatal_err(adapter);
1374 }
1375 
1376 /*
1377  * CIM interrupt handler.
1378  */
cim_intr_handler(adapter_t * adapter)1379 static void cim_intr_handler(adapter_t *adapter)
1380 {
1381     static struct intr_info cim_intr_info[] = {
1382         { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
1383         { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
1384         { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
1385         { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1386         { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
1387         { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1388         { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
1389         { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1390         { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1391         { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1392         { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1393         { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1394         { 0, NULL, 0, 0 }
1395         };
1396 
1397     if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1398                   cim_intr_info, NULL))
1399         t3_fatal_err(adapter);
1400 }
1401 
1402 /*
1403  * ULP RX interrupt handler.
1404  */
ulprx_intr_handler(adapter_t * adapter)1405 static void ulprx_intr_handler(adapter_t *adapter)
1406 {
1407     static struct intr_info ulprx_intr_info[] = {
1408         { F_PARERR, "ULP RX parity error", -1, 1 },
1409         { 0, NULL, 0, 0 }
1410         };
1411 
1412     if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1413                   ulprx_intr_info, NULL))
1414         t3_fatal_err(adapter);
1415 }
1416 
1417 /*
1418  * ULP TX interrupt handler.
1419  */
ulptx_intr_handler(adapter_t * adapter)1420 static void ulptx_intr_handler(adapter_t *adapter)
1421 {
1422     static struct intr_info ulptx_intr_info[] = {
1423         { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1424           STAT_ULP_CH0_PBL_OOB, 0 },
1425         { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1426           STAT_ULP_CH1_PBL_OOB, 0 },
1427         { 0, NULL, 0, 0 }
1428         };
1429 
1430     if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1431                   ulptx_intr_info, adapter->irq_stats))
1432         t3_fatal_err(adapter);
1433 }
1434 
1435 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1436     F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1437     F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1438     F_ICSPI1_TX_FRAMING_ERROR)
1439 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1440     F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1441     F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1442     F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1443 
1444 /*
1445  * PM TX interrupt handler.
1446  */
pmtx_intr_handler(adapter_t * adapter)1447 static void pmtx_intr_handler(adapter_t *adapter)
1448 {
1449     static struct intr_info pmtx_intr_info[] = {
1450         { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1451         { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
1452         { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
1453         { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1454           "PMTX ispi parity error", -1, 1 },
1455         { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1456           "PMTX ospi parity error", -1, 1 },
1457         { 0, NULL, 0, 0 }
1458         };
1459 
1460     if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1461                   pmtx_intr_info, NULL))
1462         t3_fatal_err(adapter);
1463 }
1464 
1465 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1466     F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1467     F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1468     F_IESPI1_TX_FRAMING_ERROR)
1469 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1470     F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1471     F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1472     F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1473 
1474 /*
1475  * PM RX interrupt handler.
1476  */
pmrx_intr_handler(adapter_t * adapter)1477 static void pmrx_intr_handler(adapter_t *adapter)
1478 {
1479     static struct intr_info pmrx_intr_info[] = {
1480         { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1481         { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
1482         { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
1483         { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1484           "PMRX ispi parity error", -1, 1 },
1485         { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1486           "PMRX ospi parity error", -1, 1 },
1487         { 0, NULL, 0, 0 }
1488         };
1489 
1490     if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1491                   pmrx_intr_info, NULL))
1492         t3_fatal_err(adapter);
1493 }
1494 
1495 /*
1496  * CPL switch interrupt handler.
1497  */
cplsw_intr_handler(adapter_t * adapter)1498 static void cplsw_intr_handler(adapter_t *adapter)
1499 {
1500     static struct intr_info cplsw_intr_info[] = {
1501 //      { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
1502         { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
1503         { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
1504         { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
1505         { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
1506         { 0, NULL, 0, 0 }
1507         };
1508 
1509     if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1510                   cplsw_intr_info, NULL))
1511         t3_fatal_err(adapter);
1512 }
1513 
1514 /*
1515  * MPS interrupt handler.
1516  */
mps_intr_handler(adapter_t * adapter)1517 static void mps_intr_handler(adapter_t *adapter)
1518 {
1519     static struct intr_info mps_intr_info[] = {
1520         { 0x1ff, "MPS parity error", -1, 1 },
1521         { 0, NULL, 0, 0 }
1522     };
1523 
1524     if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1525                   mps_intr_info, NULL))
1526         t3_fatal_err(adapter);
1527 }
1528 
1529 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1530 
1531 /*
1532  * MC7 interrupt handler.
1533  */
mc7_intr_handler(struct mc7 * mc7)1534 static void mc7_intr_handler(struct mc7 *mc7)
1535 {
1536     adapter_t *adapter = mc7->adapter;
1537     u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1538 
1539     if (cause & F_CE) {
1540         mc7->stats.corr_err++;
1541         CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1542             "data 0x%x 0x%x 0x%x\n", mc7->name,
1543             t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1544             t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1545             t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1546             t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1547     }
1548 
1549     if (cause & F_UE) {
1550         mc7->stats.uncorr_err++;
1551         CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1552              "data 0x%x 0x%x 0x%x\n", mc7->name,
1553              t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1554              t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1555              t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1556              t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1557     }
1558 
1559     if (G_PE(cause)) {
1560         mc7->stats.parity_err++;
1561         CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1562              mc7->name, G_PE(cause));
1563     }
1564 
1565     if (cause & F_AE) {
1566         u32 addr = 0;
1567 
1568         if (adapter->params.rev > 0)
1569             addr = t3_read_reg(adapter,
1570                        mc7->offset + A_MC7_ERR_ADDR);
1571         mc7->stats.addr_err++;
1572         CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1573              mc7->name, addr);
1574     }
1575 
1576     if (cause & MC7_INTR_FATAL)
1577         t3_fatal_err(adapter);
1578 
1579     t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1580 }
1581 
1582 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1583             V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1584 /*
1585  * XGMAC interrupt handler.
1586  */
mac_intr_handler(adapter_t * adap,unsigned int idx)1587 static int mac_intr_handler(adapter_t *adap, unsigned int idx)
1588 {
1589     u32 cause;
1590     struct cmac *mac;
1591 
1592     idx = idx == 0 ? 0 : adapter_info(adap)->nports0; /* MAC idx -> port */
1593     mac = &adap2pinfo(adap, idx)->mac;
1594     cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1595 
1596     if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1597         mac->stats.tx_fifo_parity_err++;
1598         CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1599     }
1600     if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1601         mac->stats.rx_fifo_parity_err++;
1602         CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1603     }
1604     if (cause & F_TXFIFO_UNDERRUN)
1605         mac->stats.tx_fifo_urun++;
1606     if (cause & F_RXFIFO_OVERFLOW)
1607         mac->stats.rx_fifo_ovfl++;
1608     if (cause & V_SERDES_LOS(M_SERDES_LOS))
1609         mac->stats.serdes_signal_loss++;
1610     if (cause & F_XAUIPCSCTCERR)
1611         mac->stats.xaui_pcs_ctc_err++;
1612     if (cause & F_XAUIPCSALIGNCHANGE)
1613         mac->stats.xaui_pcs_align_change++;
1614 
1615     t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1616     if (cause & XGM_INTR_FATAL)
1617         t3_fatal_err(adap);
1618     return cause != 0;
1619 }
1620 
1621 /*
1622  * Interrupt handler for PHY events.
1623  */
t3_phy_intr_handler(adapter_t * adapter)1624 int t3_phy_intr_handler(adapter_t *adapter)
1625 {
1626     u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1627     u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1628 
1629     for_each_port(adapter, i) {
1630         struct port_info *p = adap2pinfo(adapter, i);
1631 
1632         mask = gpi - (gpi & (gpi - 1));
1633         gpi -= mask;
1634 
1635         if (!(p->port_type->caps & SUPPORTED_IRQ))
1636             continue;
1637 
1638         if (cause & mask) {
1639             int phy_cause = p->phy.ops->intr_handler(&p->phy);
1640 
1641             if (phy_cause & cphy_cause_link_change)
1642                 t3_link_changed(adapter, i);
1643             if (phy_cause & cphy_cause_fifo_error)
1644                 p->phy.fifo_errors++;
1645         }
1646     }
1647 
1648     t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1649     return 0;
1650 }
1651 
1652 /**
1653  *  t3_slow_intr_handler - control path interrupt handler
1654  *  @adapter: the adapter
1655  *
1656  *  T3 interrupt handler for non-data interrupt events, e.g., errors.
1657  *  The designation 'slow' is because it involves register reads, while
1658  *  data interrupts typically don't involve any MMIOs.
1659  */
t3_slow_intr_handler(adapter_t * adapter)1660 int t3_slow_intr_handler(adapter_t *adapter)
1661 {
1662     u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1663 
1664     cause &= adapter->slow_intr_mask;
1665     if (!cause)
1666         return 0;
1667     if (cause & F_PCIM0) {
1668         if (is_pcie(adapter))
1669             pcie_intr_handler(adapter);
1670         else
1671             pci_intr_handler(adapter);
1672     }
1673     if (cause & F_SGE3)
1674         t3_sge_err_intr_handler(adapter);
1675     if (cause & F_MC7_PMRX)
1676         mc7_intr_handler(&adapter->pmrx);
1677     if (cause & F_MC7_PMTX)
1678         mc7_intr_handler(&adapter->pmtx);
1679     if (cause & F_MC7_CM)
1680         mc7_intr_handler(&adapter->cm);
1681     if (cause & F_CIM)
1682         cim_intr_handler(adapter);
1683     if (cause & F_TP1)
1684         tp_intr_handler(adapter);
1685     if (cause & F_ULP2_RX)
1686         ulprx_intr_handler(adapter);
1687     if (cause & F_ULP2_TX)
1688         ulptx_intr_handler(adapter);
1689     if (cause & F_PM1_RX)
1690         pmrx_intr_handler(adapter);
1691     if (cause & F_PM1_TX)
1692         pmtx_intr_handler(adapter);
1693     if (cause & F_CPL_SWITCH)
1694         cplsw_intr_handler(adapter);
1695     if (cause & F_MPS0)
1696         mps_intr_handler(adapter);
1697     if (cause & F_MC5A)
1698         t3_mc5_intr_handler(&adapter->mc5);
1699     if (cause & F_XGMAC0_0)
1700         mac_intr_handler(adapter, 0);
1701     if (cause & F_XGMAC0_1)
1702         mac_intr_handler(adapter, 1);
1703     if (cause & F_T3DBG)
1704         t3_os_ext_intr_handler(adapter);
1705 
1706     /* Clear the interrupts just processed. */
1707     t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1708     (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1709     return 1;
1710 }
1711 
1712 /**
1713  *  t3_intr_enable - enable interrupts
1714  *  @adapter: the adapter whose interrupts should be enabled
1715  *
1716  *  Enable interrupts by setting the interrupt enable registers of the
1717  *  various HW modules and then enabling the top-level interrupt
1718  *  concentrator.
1719  */
t3_intr_enable(adapter_t * adapter)1720 void t3_intr_enable(adapter_t *adapter)
1721 {
1722     static struct addr_val_pair intr_en_avp[] = {
1723         { A_SG_INT_ENABLE, SGE_INTR_MASK },
1724         { A_MC7_INT_ENABLE, MC7_INTR_MASK },
1725         { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1726             MC7_INTR_MASK },
1727         { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1728             MC7_INTR_MASK },
1729         { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
1730         { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
1731         { A_TP_INT_ENABLE, 0x3bfffff },
1732         { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
1733         { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
1734         { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
1735         { A_MPS_INT_ENABLE, MPS_INTR_MASK },
1736     };
1737 
1738     adapter->slow_intr_mask = PL_INTR_MASK;
1739 
1740     t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1741 
1742     if (adapter->params.rev > 0) {
1743         t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1744                  CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1745         t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1746                  ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1747                  F_PBL_BOUND_ERR_CH1);
1748     } else {
1749         t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1750         t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1751     }
1752 
1753     t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1754              adapter_info(adapter)->gpio_intr);
1755     t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1756              adapter_info(adapter)->gpio_intr);
1757     if (is_pcie(adapter))
1758         t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1759     else
1760         t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1761     t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1762     (void) t3_read_reg(adapter, A_PL_INT_ENABLE0);          /* flush */
1763 }
1764 
1765 /**
1766  *  t3_intr_disable - disable a card's interrupts
1767  *  @adapter: the adapter whose interrupts should be disabled
1768  *
1769  *  Disable interrupts.  We only disable the top-level interrupt
1770  *  concentrator and the SGE data interrupts.
1771  */
t3_intr_disable(adapter_t * adapter)1772 void t3_intr_disable(adapter_t *adapter)
1773 {
1774     t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1775     (void) t3_read_reg(adapter, A_PL_INT_ENABLE0);  /* flush */
1776     adapter->slow_intr_mask = 0;
1777 }
1778 
1779 /**
1780  *  t3_intr_clear - clear all interrupts
1781  *  @adapter: the adapter whose interrupts should be cleared
1782  *
1783  *  Clears all interrupts.
1784  */
t3_intr_clear(adapter_t * adapter)1785 void t3_intr_clear(adapter_t *adapter)
1786 {
1787     static const unsigned int cause_reg_addr[] = {
1788         A_SG_INT_CAUSE,
1789         A_SG_RSPQ_FL_STATUS,
1790         A_PCIX_INT_CAUSE,
1791         A_MC7_INT_CAUSE,
1792         A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1793         A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1794         A_CIM_HOST_INT_CAUSE,
1795         A_TP_INT_CAUSE,
1796         A_MC5_DB_INT_CAUSE,
1797         A_ULPRX_INT_CAUSE,
1798         A_ULPTX_INT_CAUSE,
1799         A_CPL_INTR_CAUSE,
1800         A_PM1_TX_INT_CAUSE,
1801         A_PM1_RX_INT_CAUSE,
1802         A_MPS_INT_CAUSE,
1803         A_T3DBG_INT_CAUSE,
1804     };
1805     unsigned int i;
1806 
1807     /* Clear PHY and MAC interrupts for each port. */
1808     for_each_port(adapter, i)
1809         t3_port_intr_clear(adapter, i);
1810 
1811     for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1812         t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1813 
1814     if (is_pcie(adapter))
1815         t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1816     t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1817     (void) t3_read_reg(adapter, A_PL_INT_CAUSE0);          /* flush */
1818 }
1819 
1820 /**
1821  *  t3_port_intr_enable - enable port-specific interrupts
1822  *  @adapter: associated adapter
1823  *  @idx: index of port whose interrupts should be enabled
1824  *
1825  *  Enable port-specific (i.e., MAC and PHY) interrupts for the given
1826  *  adapter port.
1827  */
t3_port_intr_enable(adapter_t * adapter,int idx)1828 void t3_port_intr_enable(adapter_t *adapter, int idx)
1829 {
1830     struct port_info *pi = adap2pinfo(adapter, idx);
1831 
1832     t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, XGM_INTR_MASK);
1833     pi->phy.ops->intr_enable(&pi->phy);
1834 }
1835 
1836 /**
1837  *  t3_port_intr_disable - disable port-specific interrupts
1838  *  @adapter: associated adapter
1839  *  @idx: index of port whose interrupts should be disabled
1840  *
1841  *  Disable port-specific (i.e., MAC and PHY) interrupts for the given
1842  *  adapter port.
1843  */
t3_port_intr_disable(adapter_t * adapter,int idx)1844 void t3_port_intr_disable(adapter_t *adapter, int idx)
1845 {
1846     struct port_info *pi = adap2pinfo(adapter, idx);
1847 
1848     t3_write_reg(adapter, A_XGM_INT_ENABLE + pi->mac.offset, 0);
1849     pi->phy.ops->intr_disable(&pi->phy);
1850 }
1851 
1852 /**
1853  *  t3_port_intr_clear - clear port-specific interrupts
1854  *  @adapter: associated adapter
1855  *  @idx: index of port whose interrupts to clear
1856  *
1857  *  Clear port-specific (i.e., MAC and PHY) interrupts for the given
1858  *  adapter port.
1859  */
t3_port_intr_clear(adapter_t * adapter,int idx)1860 void t3_port_intr_clear(adapter_t *adapter, int idx)
1861 {
1862     struct port_info *pi = adap2pinfo(adapter, idx);
1863 
1864     t3_write_reg(adapter, A_XGM_INT_CAUSE + pi->mac.offset, 0xffffffff);
1865     pi->phy.ops->intr_clear(&pi->phy);
1866 }
1867 
1868 #define SG_CONTEXT_CMD_ATTEMPTS 100
1869 
1870 /**
1871  *  t3_sge_write_context - write an SGE context
1872  *  @adapter: the adapter
1873  *  @id: the context id
1874  *  @type: the context type
1875  *
1876  *  Program an SGE context with the values already loaded in the
1877  *  CONTEXT_DATA? registers.
1878  */
t3_sge_write_context(adapter_t * adapter,unsigned int id,unsigned int type)1879 static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
1880                 unsigned int type)
1881 {
1882     t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1883     t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1884     t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1885     t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1886     t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1887              V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1888     return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1889                    0, SG_CONTEXT_CMD_ATTEMPTS, 1);
1890 }
1891 
1892 /**
1893  *  t3_sge_init_ecntxt - initialize an SGE egress context
1894  *  @adapter: the adapter to configure
1895  *  @id: the context id
1896  *  @gts_enable: whether to enable GTS for the context
1897  *  @type: the egress context type
1898  *  @respq: associated response queue
1899  *  @base_addr: base address of queue
1900  *  @size: number of queue entries
1901  *  @token: uP token
1902  *  @gen: initial generation value for the context
1903  *  @cidx: consumer pointer
1904  *
1905  *  Initialize an SGE egress context and make it ready for use.  If the
1906  *  platform allows concurrent context operations, the caller is
1907  *  responsible for appropriate locking.
1908  */
t3_sge_init_ecntxt(adapter_t * adapter,unsigned int id,int gts_enable,enum sge_context_type type,int respq,u64 base_addr,unsigned int size,unsigned int token,int gen,unsigned int cidx)1909 int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1910                enum sge_context_type type, int respq, u64 base_addr,
1911                unsigned int size, unsigned int token, int gen,
1912                unsigned int cidx)
1913 {
1914     unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1915 
1916     if (base_addr & 0xfff)     /* must be 4K aligned */
1917         return -EINVAL;
1918     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1919         return -EBUSY;
1920 
1921     base_addr >>= 12;
1922     t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1923              V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1924     t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1925              V_EC_BASE_LO((u32)base_addr & 0xffff));
1926     base_addr >>= 16;
1927     t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
1928     base_addr >>= 32;
1929     t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1930              V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
1931              V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1932              F_EC_VALID);
1933     return t3_sge_write_context(adapter, id, F_EGRESS);
1934 }
1935 
1936 /**
1937  *  t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1938  *  @adapter: the adapter to configure
1939  *  @id: the context id
1940  *  @gts_enable: whether to enable GTS for the context
1941  *  @base_addr: base address of queue
1942  *  @size: number of queue entries
1943  *  @bsize: size of each buffer for this queue
1944  *  @cong_thres: threshold to signal congestion to upstream producers
1945  *  @gen: initial generation value for the context
1946  *  @cidx: consumer pointer
1947  *
1948  *  Initialize an SGE free list context and make it ready for use.  The
1949  *  caller is responsible for ensuring only one context operation occurs
1950  *  at a time.
1951  */
t3_sge_init_flcntxt(adapter_t * adapter,unsigned int id,int gts_enable,u64 base_addr,unsigned int size,unsigned int bsize,unsigned int cong_thres,int gen,unsigned int cidx)1952 int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
1953             u64 base_addr, unsigned int size, unsigned int bsize,
1954             unsigned int cong_thres, int gen, unsigned int cidx)
1955 {
1956     if (base_addr & 0xfff)     /* must be 4K aligned */
1957         return -EINVAL;
1958     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1959         return -EBUSY;
1960 
1961     base_addr >>= 12;
1962     t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
1963     base_addr >>= 32;
1964     t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1965              V_FL_BASE_HI((u32)base_addr) |
1966              V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1967     t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1968              V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1969              V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1970     t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1971              V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1972              V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1973     return t3_sge_write_context(adapter, id, F_FREELIST);
1974 }
1975 
1976 /**
1977  *  t3_sge_init_rspcntxt - initialize an SGE response queue context
1978  *  @adapter: the adapter to configure
1979  *  @id: the context id
1980  *  @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1981  *  @base_addr: base address of queue
1982  *  @size: number of queue entries
1983  *  @fl_thres: threshold for selecting the normal or jumbo free list
1984  *  @gen: initial generation value for the context
1985  *  @cidx: consumer pointer
1986  *
1987  *  Initialize an SGE response queue context and make it ready for use.
1988  *  The caller is responsible for ensuring only one context operation
1989  *  occurs at a time.
1990  */
t3_sge_init_rspcntxt(adapter_t * adapter,unsigned int id,int irq_vec_idx,u64 base_addr,unsigned int size,unsigned int fl_thres,int gen,unsigned int cidx)1991 int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
1992              u64 base_addr, unsigned int size,
1993              unsigned int fl_thres, int gen, unsigned int cidx)
1994 {
1995     unsigned int intr = 0;
1996 
1997     if (base_addr & 0xfff)     /* must be 4K aligned */
1998         return -EINVAL;
1999     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2000         return -EBUSY;
2001 
2002     base_addr >>= 12;
2003     t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2004              V_CQ_INDEX(cidx));
2005     t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2006     base_addr >>= 32;
2007     if (irq_vec_idx >= 0)
2008         intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2009     t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2010              V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
2011     t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2012     return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2013 }
2014 
2015 /**
2016  *  t3_sge_init_cqcntxt - initialize an SGE completion queue context
2017  *  @adapter: the adapter to configure
2018  *  @id: the context id
2019  *  @base_addr: base address of queue
2020  *  @size: number of queue entries
2021  *  @rspq: response queue for async notifications
2022  *  @ovfl_mode: CQ overflow mode
2023  *  @credits: completion queue credits
2024  *  @credit_thres: the credit threshold
2025  *
2026  *  Initialize an SGE completion queue context and make it ready for use.
2027  *  The caller is responsible for ensuring only one context operation
2028  *  occurs at a time.
2029  */
t3_sge_init_cqcntxt(adapter_t * adapter,unsigned int id,u64 base_addr,unsigned int size,int rspq,int ovfl_mode,unsigned int credits,unsigned int credit_thres)2030 int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
2031             unsigned int size, int rspq, int ovfl_mode,
2032             unsigned int credits, unsigned int credit_thres)
2033 {
2034     if (base_addr & 0xfff)     /* must be 4K aligned */
2035         return -EINVAL;
2036     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2037         return -EBUSY;
2038 
2039     base_addr >>= 12;
2040     t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2041     t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
2042     base_addr >>= 32;
2043     t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2044              V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
2045              V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2046              V_CQ_ERR(ovfl_mode));
2047     t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2048              V_CQ_CREDIT_THRES(credit_thres));
2049     return t3_sge_write_context(adapter, id, F_CQ);
2050 }
2051 
2052 /**
2053  *  t3_sge_enable_ecntxt - enable/disable an SGE egress context
2054  *  @adapter: the adapter
2055  *  @id: the egress context id
2056  *  @enable: enable (1) or disable (0) the context
2057  *
2058  *  Enable or disable an SGE egress context.  The caller is responsible for
2059  *  ensuring only one context operation occurs at a time.
2060  */
t3_sge_enable_ecntxt(adapter_t * adapter,unsigned int id,int enable)2061 int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
2062 {
2063     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2064         return -EBUSY;
2065 
2066     t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2067     t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2068     t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2069     t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2070     t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2071     t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2072              V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2073     return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2074                    0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2075 }
2076 
2077 /**
2078  *  t3_sge_disable_fl - disable an SGE free-buffer list
2079  *  @adapter: the adapter
2080  *  @id: the free list context id
2081  *
2082  *  Disable an SGE free-buffer list.  The caller is responsible for
2083  *  ensuring only one context operation occurs at a time.
2084  */
t3_sge_disable_fl(adapter_t * adapter,unsigned int id)2085 int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
2086 {
2087     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2088         return -EBUSY;
2089 
2090     t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2091     t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2092     t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2093     t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2094     t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2095     t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2096              V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2097     return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2098                    0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2099 }
2100 
2101 /**
2102  *  t3_sge_disable_rspcntxt - disable an SGE response queue
2103  *  @adapter: the adapter
2104  *  @id: the response queue context id
2105  *
2106  *  Disable an SGE response queue.  The caller is responsible for
2107  *  ensuring only one context operation occurs at a time.
2108  */
t3_sge_disable_rspcntxt(adapter_t * adapter,unsigned int id)2109 int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
2110 {
2111     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2112         return -EBUSY;
2113 
2114     t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2115     t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2116     t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2117     t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2118     t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2119     t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2120              V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2121     return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2122                    0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2123 }
2124 
2125 /**
2126  *  t3_sge_disable_cqcntxt - disable an SGE completion queue
2127  *  @adapter: the adapter
2128  *  @id: the completion queue context id
2129  *
2130  *  Disable an SGE completion queue.  The caller is responsible for
2131  *  ensuring only one context operation occurs at a time.
2132  */
t3_sge_disable_cqcntxt(adapter_t * adapter,unsigned int id)2133 int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
2134 {
2135     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2136         return -EBUSY;
2137 
2138     t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2139     t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2140     t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2141     t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2142     t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2143     t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2144              V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2145     return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2146                    0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2147 }
2148 
2149 /**
2150  *  t3_sge_cqcntxt_op - perform an operation on a completion queue context
2151  *  @adapter: the adapter
2152  *  @id: the context id
2153  *  @op: the operation to perform
2154  *  @credits: credits to return to the CQ
2155  *
2156  *  Perform the selected operation on an SGE completion queue context.
2157  *  The caller is responsible for ensuring only one context operation
2158  *  occurs at a time.
2159  *
2160  *  For most operations the function returns the current HW position in
2161  *  the completion queue.
2162  */
t3_sge_cqcntxt_op(adapter_t * adapter,unsigned int id,unsigned int op,unsigned int credits)2163 int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
2164               unsigned int credits)
2165 {
2166     u32 val;
2167 
2168     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2169         return -EBUSY;
2170 
2171     t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2172     t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2173              V_CONTEXT(id) | F_CQ);
2174     if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2175                 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2176         return -EIO;
2177 
2178     if (op >= 2 && op < 7) {
2179         if (adapter->params.rev > 0)
2180             return G_CQ_INDEX(val);
2181 
2182         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2183                  V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2184         if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2185                     F_CONTEXT_CMD_BUSY, 0,
2186                     SG_CONTEXT_CMD_ATTEMPTS, 1))
2187             return -EIO;
2188         return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2189     }
2190     return 0;
2191 }
2192 
2193 /**
2194  *  t3_sge_read_context - read an SGE context
2195  *  @type: the context type
2196  *  @adapter: the adapter
2197  *  @id: the context id
2198  *  @data: holds the retrieved context
2199  *
2200  *  Read an SGE egress context.  The caller is responsible for ensuring
2201  *  only one context operation occurs at a time.
2202  */
t3_sge_read_context(unsigned int type,adapter_t * adapter,unsigned int id,u32 data[4])2203 static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
2204                    unsigned int id, u32 data[4])
2205 {
2206     if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2207         return -EBUSY;
2208 
2209     t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2210              V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2211     if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2212                 SG_CONTEXT_CMD_ATTEMPTS, 1))
2213         return -EIO;
2214     data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2215     data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2216     data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2217     data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2218     return 0;
2219 }
2220 
2221 /**
2222  *  t3_sge_read_ecntxt - read an SGE egress context
2223  *  @adapter: the adapter
2224  *  @id: the context id
2225  *  @data: holds the retrieved context
2226  *
2227  *  Read an SGE egress context.  The caller is responsible for ensuring
2228  *  only one context operation occurs at a time.
2229  */
t3_sge_read_ecntxt(adapter_t * adapter,unsigned int id,u32 data[4])2230 int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
2231 {
2232     if (id >= 65536)
2233         return -EINVAL;
2234     return t3_sge_read_context(F_EGRESS, adapter, id, data);
2235 }
2236 
2237 /**
2238  *  t3_sge_read_cq - read an SGE CQ context
2239  *  @adapter: the adapter
2240  *  @id: the context id
2241  *  @data: holds the retrieved context
2242  *
2243  *  Read an SGE CQ context.  The caller is responsible for ensuring
2244  *  only one context operation occurs at a time.
2245  */
t3_sge_read_cq(adapter_t * adapter,unsigned int id,u32 data[4])2246 int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
2247 {
2248     if (id >= 65536)
2249         return -EINVAL;
2250     return t3_sge_read_context(F_CQ, adapter, id, data);
2251 }
2252 
2253 /**
2254  *  t3_sge_read_fl - read an SGE free-list context
2255  *  @adapter: the adapter
2256  *  @id: the context id
2257  *  @data: holds the retrieved context
2258  *
2259  *  Read an SGE free-list context.  The caller is responsible for ensuring
2260  *  only one context operation occurs at a time.
2261  */
t3_sge_read_fl(adapter_t * adapter,unsigned int id,u32 data[4])2262 int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
2263 {
2264     if (id >= SGE_QSETS * 2)
2265         return -EINVAL;
2266     return t3_sge_read_context(F_FREELIST, adapter, id, data);
2267 }
2268 
2269 /**
2270  *  t3_sge_read_rspq - read an SGE response queue context
2271  *  @adapter: the adapter
2272  *  @id: the context id
2273  *  @data: holds the retrieved context
2274  *
2275  *  Read an SGE response queue context.  The caller is responsible for
2276  *  ensuring only one context operation occurs at a time.
2277  */
t3_sge_read_rspq(adapter_t * adapter,unsigned int id,u32 data[4])2278 int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
2279 {
2280     if (id >= SGE_QSETS)
2281         return -EINVAL;
2282     return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2283 }
2284 
2285 /**
2286  *  t3_config_rss - configure Rx packet steering
2287  *  @adapter: the adapter
2288  *  @rss_config: RSS settings (written to TP_RSS_CONFIG)
2289  *  @cpus: values for the CPU lookup table (0xff terminated)
2290  *  @rspq: values for the response queue lookup table (0xffff terminated)
2291  *
2292  *  Programs the receive packet steering logic.  @cpus and @rspq provide
2293  *  the values for the CPU and response queue lookup tables.  If they
2294  *  provide fewer values than the size of the tables the supplied values
2295  *  are used repeatedly until the tables are fully populated.
2296  */
t3_config_rss(adapter_t * adapter,unsigned int rss_config,const u8 * cpus,const u16 * rspq)2297 void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
2298            const u16 *rspq)
2299 {
2300     int i, j, cpu_idx = 0, q_idx = 0;
2301 
2302     if (cpus)
2303         for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2304             u32 val = i << 16;
2305 
2306             for (j = 0; j < 2; ++j) {
2307                 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2308                 if (cpus[cpu_idx] == 0xff)
2309                     cpu_idx = 0;
2310             }
2311             t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2312         }
2313 
2314     if (rspq)
2315         for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2316             t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2317                      (i << 16) | rspq[q_idx++]);
2318             if (rspq[q_idx] == 0xffff)
2319                 q_idx = 0;
2320         }
2321 
2322     t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2323 }
2324 
2325 /**
2326  *  t3_read_rss - read the contents of the RSS tables
2327  *  @adapter: the adapter
2328  *  @lkup: holds the contents of the RSS lookup table
2329  *  @map: holds the contents of the RSS map table
2330  *
2331  *  Reads the contents of the receive packet steering tables.
2332  */
t3_read_rss(adapter_t * adapter,u8 * lkup,u16 * map)2333 int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
2334 {
2335     int i;
2336     u32 val;
2337 
2338     if (lkup)
2339         for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2340             t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2341                      0xffff0000 | i);
2342             val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2343             if (!(val & 0x80000000))
2344                 return -EAGAIN;
2345             *lkup++ = (u8)val;
2346             *lkup++ = (u8)(val >> 8);
2347         }
2348 
2349     if (map)
2350         for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2351             t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2352                      0xffff0000 | i);
2353             val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2354             if (!(val & 0x80000000))
2355                 return -EAGAIN;
2356             *map++ = (u16)val;
2357         }
2358     return 0;
2359 }
2360 
2361 /**
2362  *  t3_tp_set_offload_mode - put TP in NIC/offload mode
2363  *  @adap: the adapter
2364  *  @enable: 1 to select offload mode, 0 for regular NIC
2365  *
2366  *  Switches TP to NIC/offload mode.
2367  */
t3_tp_set_offload_mode(adapter_t * adap,int enable)2368 void t3_tp_set_offload_mode(adapter_t *adap, int enable)
2369 {
2370     if (is_offload(adap) || !enable)
2371         t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2372                  V_NICMODE(!enable));
2373 }
2374 
2375 /**
2376  *  tp_wr_bits_indirect - set/clear bits in an indirect TP register
2377  *  @adap: the adapter
2378  *  @addr: the indirect TP register address
2379  *  @mask: specifies the field within the register to modify
2380  *  @val: new value for the field
2381  *
2382  *  Sets a field of an indirect TP register to the given value.
2383  */
tp_wr_bits_indirect(adapter_t * adap,unsigned int addr,unsigned int mask,unsigned int val)2384 static void tp_wr_bits_indirect(adapter_t *adap, unsigned int addr,
2385                 unsigned int mask, unsigned int val)
2386 {
2387     t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2388     val |= t3_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2389     t3_write_reg(adap, A_TP_PIO_DATA, val);
2390 }
2391 
2392 /**
2393  *  t3_enable_filters - enable the HW filters
2394  *  @adap: the adapter
2395  *
2396  *  Enables the HW filters for NIC traffic.
2397  */
t3_enable_filters(adapter_t * adap)2398 void t3_enable_filters(adapter_t *adap)
2399 {
2400     t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, 0);
2401     t3_set_reg_field(adap, A_MC5_DB_CONFIG, 0, F_FILTEREN);
2402     t3_set_reg_field(adap, A_TP_GLOBAL_CONFIG, 0, V_FIVETUPLELOOKUP(3));
2403     tp_wr_bits_indirect(adap, A_TP_INGRESS_CONFIG, 0, F_LOOKUPEVERYPKT);
2404 }
2405 
2406 /**
2407  *  pm_num_pages - calculate the number of pages of the payload memory
2408  *  @mem_size: the size of the payload memory
2409  *  @pg_size: the size of each payload memory page
2410  *
2411  *  Calculate the number of pages, each of the given size, that fit in a
2412  *  memory of the specified size, respecting the HW requirement that the
2413  *  number of pages must be a multiple of 24.
2414  */
pm_num_pages(unsigned int mem_size,unsigned int pg_size)2415 static inline unsigned int pm_num_pages(unsigned int mem_size,
2416                     unsigned int pg_size)
2417 {
2418     unsigned int n = mem_size / pg_size;
2419 
2420     return n - n % 24;
2421 }
2422 
2423 #define mem_region(adap, start, size, reg) \
2424     t3_write_reg((adap), A_ ## reg, (start)); \
2425     start += size
2426 
2427 /*
2428  * fls: find last bit set.
2429  */
fls(int x)2430 static __inline int fls(int x)
2431 {
2432     int r = 32;
2433 
2434     if (!x)
2435         return 0;
2436     if (!(x & 0xffff0000u)) {
2437         x <<= 16;
2438         r -= 16;
2439     }
2440     if (!(x & 0xff000000u)) {
2441         x <<= 8;
2442         r -= 8;
2443     }
2444     if (!(x & 0xf0000000u)) {
2445         x <<= 4;
2446         r -= 4;
2447     }
2448     if (!(x & 0xc0000000u)) {
2449         x <<= 2;
2450         r -= 2;
2451     }
2452     if (!(x & 0x80000000u)) {
2453         x <<= 1;
2454         r -= 1;
2455     }
2456     return r;
2457 }
2458 
2459 /**
2460  *  partition_mem - partition memory and configure TP memory settings
2461  *  @adap: the adapter
2462  *  @p: the TP parameters
2463  *
2464  *  Partitions context and payload memory and configures TP's memory
2465  *  registers.
2466  */
partition_mem(adapter_t * adap,const struct tp_params * p)2467 static void partition_mem(adapter_t *adap, const struct tp_params *p)
2468 {
2469     unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2470     unsigned int timers = 0, timers_shift = 22;
2471 
2472     if (adap->params.rev > 0) {
2473         if (tids <= 16 * 1024) {
2474             timers = 1;
2475             timers_shift = 16;
2476         } else if (tids <= 64 * 1024) {
2477             timers = 2;
2478             timers_shift = 18;
2479         } else if (tids <= 256 * 1024) {
2480             timers = 3;
2481             timers_shift = 20;
2482         }
2483     }
2484 
2485     t3_write_reg(adap, A_TP_PMM_SIZE,
2486              p->chan_rx_size | (p->chan_tx_size >> 16));
2487 
2488     t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2489     t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2490     t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2491     t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2492              V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2493 
2494     t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2495     t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2496     t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2497 
2498     pstructs = p->rx_num_pgs + p->tx_num_pgs;
2499     /* Add a bit of headroom and make multiple of 24 */
2500     pstructs += 48;
2501     pstructs -= pstructs % 24;
2502     t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2503 
2504     m = tids * TCB_SIZE;
2505     mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2506     mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2507     t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2508     m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2509     mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2510     mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2511     mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2512     mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2513 
2514     m = (m + 4095) & ~0xfff;
2515     t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2516     t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2517 
2518     tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2519     m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2520         adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2521     if (tids < m)
2522         adap->params.mc5.nservers += m - tids;
2523 }
2524 
tp_wr_indirect(adapter_t * adap,unsigned int addr,u32 val)2525 static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
2526 {
2527     t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2528     t3_write_reg(adap, A_TP_PIO_DATA, val);
2529 }
2530 
tp_config(adapter_t * adap,const struct tp_params * p)2531 static void tp_config(adapter_t *adap, const struct tp_params *p)
2532 {
2533     t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2534              F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2535              F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2536     t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2537              F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2538              V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2539     t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2540              V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2541              V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2542              F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2543     t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2544              F_IPV6ENABLE | F_NICMODE);
2545     t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2546     t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2547     t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2548              adap->params.rev > 0 ? F_ENABLEESND :
2549                         F_T3A_ENABLEESND);
2550     t3_set_reg_field(adap, A_TP_PC_CONFIG,
2551              F_ENABLEEPCMDAFULL,
2552              F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2553              F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2554     t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2555     t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2556     t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2557 
2558     if (adap->params.rev > 0) {
2559         tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2560         t3_set_reg_field(adap, A_TP_PARA_REG3, 0,
2561                  F_TXPACEAUTO | F_TXPACEAUTOSTRICT);
2562         t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2563         tp_wr_indirect(adap, A_TP_VLAN_PRI_MAP, 0xfa50);
2564         tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP0, 0xfac688);
2565         tp_wr_indirect(adap, A_TP_MAC_MATCH_MAP1, 0xfac688);
2566     } else
2567         t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2568 
2569     t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2570     t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2571     t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2572     t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2573 
2574     if (adap->params.nports > 2) {
2575         t3_set_reg_field(adap, A_TP_PC_CONFIG2, 0,
2576                  F_ENABLETXPORTFROMDA | F_ENABLERXPORTFROMADDR);
2577         tp_wr_bits_indirect(adap, A_TP_QOS_RX_MAP_MODE,
2578                     V_RXMAPMODE(M_RXMAPMODE), 0);
2579         tp_wr_indirect(adap, A_TP_INGRESS_CONFIG, V_BITPOS0(48) |
2580                    V_BITPOS1(49) | V_BITPOS2(50) | V_BITPOS3(51) |
2581                    F_ENABLEEXTRACT | F_ENABLEEXTRACTIONSFD |
2582                    F_ENABLEINSERTION | F_ENABLEINSERTIONSFD);
2583         tp_wr_indirect(adap, A_TP_PREAMBLE_MSB, 0xfb000000);
2584         tp_wr_indirect(adap, A_TP_PREAMBLE_LSB, 0xd5);
2585         tp_wr_indirect(adap, A_TP_INTF_FROM_TX_PKT, F_INTFFROMTXPKT);
2586     }
2587 }
2588 
2589 /* TCP timer values in ms */
2590 #define TP_DACK_TIMER 50
2591 #define TP_RTO_MIN    250
2592 
2593 /**
2594  *  tp_set_timers - set TP timing parameters
2595  *  @adap: the adapter to set
2596  *  @core_clk: the core clock frequency in Hz
2597  *
2598  *  Set TP's timing parameters, such as the various timer resolutions and
2599  *  the TCP timer values.
2600  */
tp_set_timers(adapter_t * adap,unsigned int core_clk)2601 static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
2602 {
2603     unsigned int tre = adap->params.tp.tre;
2604     unsigned int dack_re = adap->params.tp.dack_re;
2605     unsigned int tstamp_re = fls(core_clk / 1000);     /* 1ms, at least */
2606     unsigned int tps = core_clk >> tre;
2607 
2608     t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2609              V_DELAYEDACKRESOLUTION(dack_re) |
2610              V_TIMESTAMPRESOLUTION(tstamp_re));
2611     t3_write_reg(adap, A_TP_DACK_TIMER,
2612              (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2613     t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2614     t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2615     t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2616     t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2617     t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2618              V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2619              V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2620              V_KEEPALIVEMAX(9));
2621 
2622 #define SECONDS * tps
2623 
2624     t3_write_reg(adap, A_TP_MSL,
2625              adap->params.rev > 0 ? 0 : 2 SECONDS);
2626     t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2627     t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2628     t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2629     t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2630     t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2631     t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2632     t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2633     t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2634 
2635 #undef SECONDS
2636 }
2637 
2638 #ifdef CONFIG_CHELSIO_T3_CORE
2639 /**
2640  *  t3_tp_set_coalescing_size - set receive coalescing size
2641  *  @adap: the adapter
2642  *  @size: the receive coalescing size
2643  *  @psh: whether a set PSH bit should deliver coalesced data
2644  *
2645  *  Set the receive coalescing size and PSH bit handling.
2646  */
t3_tp_set_coalescing_size(adapter_t * adap,unsigned int size,int psh)2647 int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
2648 {
2649     u32 val;
2650 
2651     if (size > MAX_RX_COALESCING_LEN)
2652         return -EINVAL;
2653 
2654     val = t3_read_reg(adap, A_TP_PARA_REG3);
2655     val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2656 
2657     if (size) {
2658         val |= F_RXCOALESCEENABLE;
2659         if (psh)
2660             val |= F_RXCOALESCEPSHEN;
2661         size = uimin(MAX_RX_COALESCING_LEN, size);
2662         t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2663                  V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2664     }
2665     t3_write_reg(adap, A_TP_PARA_REG3, val);
2666     return 0;
2667 }
2668 
2669 /**
2670  *  t3_tp_set_max_rxsize - set the max receive size
2671  *  @adap: the adapter
2672  *  @size: the max receive size
2673  *
2674  *  Set TP's max receive size.  This is the limit that applies when
2675  *  receive coalescing is disabled.
2676  */
t3_tp_set_max_rxsize(adapter_t * adap,unsigned int size)2677 void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
2678 {
2679     t3_write_reg(adap, A_TP_PARA_REG7,
2680              V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2681 }
2682 
init_mtus(unsigned short mtus[])2683 static void __devinit init_mtus(unsigned short mtus[])
2684 {
2685     /*
2686      * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2687      * it can accommodate max size TCP/IP headers when SACK and timestamps
2688      * are enabled and still have at least 8 bytes of payload.
2689      */
2690     mtus[0] = 88;
2691     mtus[1] = 88;
2692     mtus[2] = 256;
2693     mtus[3] = 512;
2694     mtus[4] = 576;
2695     mtus[5] = 1024;
2696     mtus[6] = 1280;
2697     mtus[7] = 1492;
2698     mtus[8] = 1500;
2699     mtus[9] = 2002;
2700     mtus[10] = 2048;
2701     mtus[11] = 4096;
2702     mtus[12] = 4352;
2703     mtus[13] = 8192;
2704     mtus[14] = 9000;
2705     mtus[15] = 9600;
2706 }
2707 
2708 /**
2709  *  init_cong_ctrl - initialize congestion control parameters
2710  *  @a: the alpha values for congestion control
2711  *  @b: the beta values for congestion control
2712  *
2713  *  Initialize the congestion control parameters.
2714  */
init_cong_ctrl(unsigned short * a,unsigned short * b)2715 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2716 {
2717     a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2718     a[9] = 2;
2719     a[10] = 3;
2720     a[11] = 4;
2721     a[12] = 5;
2722     a[13] = 6;
2723     a[14] = 7;
2724     a[15] = 8;
2725     a[16] = 9;
2726     a[17] = 10;
2727     a[18] = 14;
2728     a[19] = 17;
2729     a[20] = 21;
2730     a[21] = 25;
2731     a[22] = 30;
2732     a[23] = 35;
2733     a[24] = 45;
2734     a[25] = 60;
2735     a[26] = 80;
2736     a[27] = 100;
2737     a[28] = 200;
2738     a[29] = 300;
2739     a[30] = 400;
2740     a[31] = 500;
2741 
2742     b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2743     b[9] = b[10] = 1;
2744     b[11] = b[12] = 2;
2745     b[13] = b[14] = b[15] = b[16] = 3;
2746     b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2747     b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2748     b[28] = b[29] = 6;
2749     b[30] = b[31] = 7;
2750 }
2751 
2752 /* The minimum additive increment value for the congestion control table */
2753 #define CC_MIN_INCR 2U
2754 
2755 /**
2756  *  t3_load_mtus - write the MTU and congestion control HW tables
2757  *  @adap: the adapter
2758  *  @mtus: the unrestricted values for the MTU table
2759  *  @alpha: the values for the congestion control alpha parameter
2760  *  @beta: the values for the congestion control beta parameter
2761  *  @mtu_cap: the maximum permitted effective MTU
2762  *
2763  *  Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2764  *  Update the high-speed congestion control table with the supplied alpha,
2765  *  beta, and MTUs.
2766  */
t3_load_mtus(adapter_t * adap,unsigned short mtus[NMTUS],unsigned short alpha[NCCTRL_WIN],unsigned short beta[NCCTRL_WIN],unsigned short mtu_cap)2767 void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
2768           unsigned short alpha[NCCTRL_WIN],
2769           unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2770 {
2771     static const unsigned int avg_pkts[NCCTRL_WIN] = {
2772         2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2773         896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2774         28672, 40960, 57344, 81920, 114688, 163840, 229376 };
2775 
2776     unsigned int i, w;
2777 
2778     for (i = 0; i < NMTUS; ++i) {
2779         unsigned int mtu = uimin(mtus[i], mtu_cap);
2780         unsigned int log2 = fls(mtu);
2781 
2782         if (!(mtu & ((1 << log2) >> 2)))     /* round */
2783             log2--;
2784         t3_write_reg(adap, A_TP_MTU_TABLE,
2785                  (i << 24) | (log2 << 16) | mtu);
2786 
2787         for (w = 0; w < NCCTRL_WIN; ++w) {
2788             unsigned int inc;
2789 
2790             inc = uimax(((mtu - 40) * alpha[w]) / avg_pkts[w],
2791                   CC_MIN_INCR);
2792 
2793             t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2794                      (w << 16) | (beta[w] << 13) | inc);
2795         }
2796     }
2797 }
2798 
2799 /**
2800  *  t3_read_hw_mtus - returns the values in the HW MTU table
2801  *  @adap: the adapter
2802  *  @mtus: where to store the HW MTU values
2803  *
2804  *  Reads the HW MTU table.
2805  */
t3_read_hw_mtus(adapter_t * adap,unsigned short mtus[NMTUS])2806 void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
2807 {
2808     int i;
2809 
2810     for (i = 0; i < NMTUS; ++i) {
2811         unsigned int val;
2812 
2813         t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2814         val = t3_read_reg(adap, A_TP_MTU_TABLE);
2815         mtus[i] = val & 0x3fff;
2816     }
2817 }
2818 
2819 /**
2820  *  t3_get_cong_cntl_tab - reads the congestion control table
2821  *  @adap: the adapter
2822  *  @incr: where to store the alpha values
2823  *
2824  *  Reads the additive increments programmed into the HW congestion
2825  *  control table.
2826  */
t3_get_cong_cntl_tab(adapter_t * adap,unsigned short incr[NMTUS][NCCTRL_WIN])2827 void t3_get_cong_cntl_tab(adapter_t *adap,
2828               unsigned short incr[NMTUS][NCCTRL_WIN])
2829 {
2830     unsigned int mtu, w;
2831 
2832     for (mtu = 0; mtu < NMTUS; ++mtu)
2833         for (w = 0; w < NCCTRL_WIN; ++w) {
2834             t3_write_reg(adap, A_TP_CCTRL_TABLE,
2835                      0xffff0000 | (mtu << 5) | w);
2836             incr[mtu][w] = (unsigned short)t3_read_reg(adap,
2837                         A_TP_CCTRL_TABLE) & 0x1fff;
2838         }
2839 }
2840 
2841 /**
2842  *  t3_tp_get_mib_stats - read TP's MIB counters
2843  *  @adap: the adapter
2844  *  @tps: holds the returned counter values
2845  *
2846  *  Returns the values of TP's MIB counters.
2847  */
t3_tp_get_mib_stats(adapter_t * adap,struct tp_mib_stats * tps)2848 void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
2849 {
2850     t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
2851              sizeof(*tps) / sizeof(u32), 0);
2852 }
2853 
2854 /**
2855  *  t3_read_pace_tbl - read the pace table
2856  *  @adap: the adapter
2857  *  @pace_vals: holds the returned values
2858  *
2859  *  Returns the values of TP's pace table in nanoseconds.
2860  */
t3_read_pace_tbl(adapter_t * adap,unsigned int pace_vals[NTX_SCHED])2861 void t3_read_pace_tbl(adapter_t *adap, unsigned int pace_vals[NTX_SCHED])
2862 {
2863     unsigned int i, tick_ns = dack_ticks_to_usec(adap, 1000);
2864 
2865     for (i = 0; i < NTX_SCHED; i++) {
2866         t3_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2867         pace_vals[i] = t3_read_reg(adap, A_TP_PACE_TABLE) * tick_ns;
2868     }
2869 }
2870 
2871 /**
2872  *  t3_set_pace_tbl - set the pace table
2873  *  @adap: the adapter
2874  *  @pace_vals: the pace values in nanoseconds
2875  *  @start: index of the first entry in the HW pace table to set
2876  *  @n: how many entries to set
2877  *
2878  *  Sets (a subset of the) HW pace table.
2879  */
t3_set_pace_tbl(adapter_t * adap,unsigned int * pace_vals,unsigned int start,unsigned int n)2880 void t3_set_pace_tbl(adapter_t *adap, unsigned int *pace_vals,
2881              unsigned int start, unsigned int n)
2882 {
2883     unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
2884 
2885     for ( ; n; n--, start++, pace_vals++)
2886         t3_write_reg(adap, A_TP_PACE_TABLE, (start << 16) |
2887                  ((*pace_vals + tick_ns / 2) / tick_ns));
2888 }
2889 
2890 #define ulp_region(adap, name, start, len) \
2891     t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2892     t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2893              (start) + (len) - 1); \
2894     start += len
2895 
2896 #define ulptx_region(adap, name, start, len) \
2897     t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2898     t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2899              (start) + (len) - 1)
2900 
ulp_config(adapter_t * adap,const struct tp_params * p)2901 static void ulp_config(adapter_t *adap, const struct tp_params *p)
2902 {
2903     unsigned int m = p->chan_rx_size;
2904 
2905     ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2906     ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2907     ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2908     ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2909     ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2910     ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2911     ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2912     t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2913 }
2914 
2915 
2916 /**
2917  *  t3_set_proto_sram - set the contents of the protocol sram
2918  *  @adapter: the adapter
2919  *  @data: the protocol image
2920  *
2921  *  Write the contents of the protocol SRAM.
2922  */
t3_set_proto_sram(adapter_t * adap,const u8 * data)2923 int t3_set_proto_sram(adapter_t *adap, const u8 *data)
2924 {
2925     int i;
2926     const u32 *buf = (const u32 *)data;
2927 
2928     for (i = 0; i < PROTO_SRAM_LINES; i++) {
2929         t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2930         t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2931         t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2932         t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2933         t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2934 
2935         t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2936         if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2937             return -EIO;
2938     }
2939     return 0;
2940 }
2941 #endif
2942 
2943 /**
2944  *  t3_config_trace_filter - configure one of the tracing filters
2945  *  @adapter: the adapter
2946  *  @tp: the desired trace filter parameters
2947  *  @filter_index: which filter to configure
2948  *  @invert: if set non-matching packets are traced instead of matching ones
2949  *  @enable: whether to enable or disable the filter
2950  *
2951  *  Configures one of the tracing filters available in HW.
2952  */
t3_config_trace_filter(adapter_t * adapter,const struct trace_params * tp,int filter_index,int invert,int enable)2953 void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
2954                 int filter_index, int invert, int enable)
2955 {
2956     u32 addr, key[4], mask[4];
2957 
2958     key[0] = tp->sport | (tp->sip << 16);
2959     key[1] = (tp->sip >> 16) | (tp->dport << 16);
2960     key[2] = tp->dip;
2961     key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2962 
2963     mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2964     mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2965     mask[2] = tp->dip_mask;
2966     mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2967 
2968     if (invert)
2969         key[3] |= (1 << 29);
2970     if (enable)
2971         key[3] |= (1 << 28);
2972 
2973     addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2974     tp_wr_indirect(adapter, addr++, key[0]);
2975     tp_wr_indirect(adapter, addr++, mask[0]);
2976     tp_wr_indirect(adapter, addr++, key[1]);
2977     tp_wr_indirect(adapter, addr++, mask[1]);
2978     tp_wr_indirect(adapter, addr++, key[2]);
2979     tp_wr_indirect(adapter, addr++, mask[2]);
2980     tp_wr_indirect(adapter, addr++, key[3]);
2981     tp_wr_indirect(adapter, addr,   mask[3]);
2982     (void) t3_read_reg(adapter, A_TP_PIO_DATA);
2983 }
2984 
2985 /**
2986  *  t3_config_sched - configure a HW traffic scheduler
2987  *  @adap: the adapter
2988  *  @kbps: target rate in Kbps
2989  *  @sched: the scheduler index
2990  *
2991  *  Configure a Tx HW scheduler for the target rate.
2992  */
t3_config_sched(adapter_t * adap,unsigned int kbps,int sched)2993 int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
2994 {
2995     unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2996     unsigned int clk = adap->params.vpd.cclk * 1000;
2997     unsigned int selected_cpt = 0, selected_bpt = 0;
2998 
2999     if (kbps > 0) {
3000         kbps *= 125;     /* -> bytes */
3001         for (cpt = 1; cpt <= 255; cpt++) {
3002             tps = clk / cpt;
3003             bpt = (kbps + tps / 2) / tps;
3004             if (bpt > 0 && bpt <= 255) {
3005                 v = bpt * tps;
3006                 delta = v >= kbps ? v - kbps : kbps - v;
3007                 if (delta <= mindelta) {
3008                     mindelta = delta;
3009                     selected_cpt = cpt;
3010                     selected_bpt = bpt;
3011                 }
3012             } else if (selected_cpt)
3013                 break;
3014         }
3015         if (!selected_cpt)
3016             return -EINVAL;
3017     }
3018     t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3019              A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3020     v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3021     if (sched & 1)
3022         v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3023     else
3024         v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3025     t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3026     return 0;
3027 }
3028 
3029 /**
3030  *  t3_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3031  *  @adap: the adapter
3032  *  @sched: the scheduler index
3033  *  @ipg: the interpacket delay in tenths of nanoseconds
3034  *
3035  *  Set the interpacket delay for a HW packet rate scheduler.
3036  */
t3_set_sched_ipg(adapter_t * adap,int sched,unsigned int ipg)3037 int t3_set_sched_ipg(adapter_t *adap, int sched, unsigned int ipg)
3038 {
3039     unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3040 
3041     /* convert ipg to nearest number of core clocks */
3042     ipg *= core_ticks_per_usec(adap);
3043     ipg = (ipg + 5000) / 10000;
3044     if (ipg > 0xffff)
3045         return -EINVAL;
3046 
3047     t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3048     v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3049     if (sched & 1)
3050         v = (v & 0xffff) | (ipg << 16);
3051     else
3052         v = (v & 0xffff0000) | ipg;
3053     t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3054     t3_read_reg(adap, A_TP_TM_PIO_DATA);
3055     return 0;
3056 }
3057 
3058 /**
3059  *  t3_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3060  *  @adap: the adapter
3061  *  @sched: the scheduler index
3062  *  @kbps: the byte rate in Kbps
3063  *  @ipg: the interpacket delay in tenths of nanoseconds
3064  *
3065  *  Return the current configuration of a HW Tx scheduler.
3066  */
t3_get_tx_sched(adapter_t * adap,unsigned int sched,unsigned int * kbps,unsigned int * ipg)3067 void t3_get_tx_sched(adapter_t *adap, unsigned int sched, unsigned int *kbps,
3068              unsigned int *ipg)
3069 {
3070     unsigned int v, addr, bpt, cpt;
3071 
3072     if (kbps) {
3073         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3074         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3075         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3076         if (sched & 1)
3077             v >>= 16;
3078         bpt = (v >> 8) & 0xff;
3079         cpt = v & 0xff;
3080         if (!cpt)
3081             *kbps = 0;        /* scheduler disabled */
3082         else {
3083             v = (adap->params.vpd.cclk * 1000) / cpt;
3084             *kbps = (v * bpt) / 125;
3085         }
3086     }
3087     if (ipg) {
3088         addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3089         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3090         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3091         if (sched & 1)
3092             v >>= 16;
3093         v &= 0xffff;
3094         *ipg = (10000 * v) / core_ticks_per_usec(adap);
3095     }
3096 }
3097 
3098 /**
3099  *  tp_init - configure TP
3100  *  @adap: the adapter
3101  *  @p: TP configuration parameters
3102  *
3103  *  Initializes the TP HW module.
3104  */
tp_init(adapter_t * adap,const struct tp_params * p)3105 static int tp_init(adapter_t *adap, const struct tp_params *p)
3106 {
3107     int busy = 0;
3108 
3109     tp_config(adap, p);
3110     t3_set_vlan_accel(adap, 3, 0);
3111 
3112     if (is_offload(adap)) {
3113         tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3114         t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3115         busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3116                        0, 1000, 5);
3117         if (busy)
3118             CH_ERR(adap, "TP initialization timed out\n");
3119     }
3120 
3121     if (!busy)
3122         t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3123     return busy;
3124 }
3125 
3126 /**
3127  *  t3_mps_set_active_ports - configure port failover
3128  *  @adap: the adapter
3129  *  @port_mask: bitmap of active ports
3130  *
3131  *  Sets the active ports according to the supplied bitmap.
3132  */
t3_mps_set_active_ports(adapter_t * adap,unsigned int port_mask)3133 int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
3134 {
3135     if (port_mask & ~((1 << adap->params.nports) - 1))
3136         return -EINVAL;
3137     t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3138              port_mask << S_PORT0ACTIVE);
3139     return 0;
3140 }
3141 
3142 /**
3143  *  chan_init_hw - channel-dependent HW initialization
3144  *  @adap: the adapter
3145  *  @chan_map: bitmap of Tx channels being used
3146  *
3147  *  Perform the bits of HW initialization that are dependent on the Tx
3148  *  channels being used.
3149  */
chan_init_hw(adapter_t * adap,unsigned int chan_map)3150 static void chan_init_hw(adapter_t *adap, unsigned int chan_map)
3151 {
3152     int i;
3153 
3154     if (chan_map != 3) {                                 /* one channel */
3155         t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3156         t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3157         t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3158                  (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3159                           F_TPTXPORT1EN | F_PORT1ACTIVE));
3160         t3_write_reg(adap, A_PM1_TX_CFG,
3161                  chan_map == 1 ? 0xffffffff : 0);
3162         if (chan_map == 2)
3163             t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3164                      V_TX_MOD_QUEUE_REQ_MAP(0xff));
3165         t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xd9c8);
3166         t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfbea);
3167     } else {                                             /* two channels */
3168         t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3169         t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3170         t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3171                  V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3172         t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3173                  F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3174                  F_ENFORCEPKT);
3175         t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3176         t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3177         t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3178                  V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3179         for (i = 0; i < 16; i++)
3180             t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3181                      (i << 16) | 0x1010);
3182         t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (12 << 16) | 0xba98);
3183         t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, (13 << 16) | 0xfedc);
3184     }
3185 }
3186 
calibrate_xgm(adapter_t * adapter)3187 static int calibrate_xgm(adapter_t *adapter)
3188 {
3189     if (uses_xaui(adapter)) {
3190         unsigned int v, i;
3191 
3192         for (i = 0; i < 5; ++i) {
3193             t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3194             (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
3195             msleep(1);
3196             v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3197             if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3198                 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3199                          V_XAUIIMP(G_CALIMP(v) >> 2));
3200                 return 0;
3201             }
3202         }
3203         CH_ERR(adapter, "MAC calibration failed\n");
3204         return -1;
3205     } else {
3206         t3_write_reg(adapter, A_XGM_RGMII_IMP,
3207                  V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3208         t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3209                  F_XGM_IMPSETUPDATE);
3210     }
3211     return 0;
3212 }
3213 
calibrate_xgm_t3b(adapter_t * adapter)3214 static void calibrate_xgm_t3b(adapter_t *adapter)
3215 {
3216     if (!uses_xaui(adapter)) {
3217         t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3218                  F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3219         t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3220         t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3221                  F_XGM_IMPSETUPDATE);
3222         t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3223                  0);
3224         t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3225         t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3226     }
3227 }
3228 
3229 struct mc7_timing_params {
3230     unsigned char ActToPreDly;
3231     unsigned char ActToRdWrDly;
3232     unsigned char PreCyc;
3233     unsigned char RefCyc[5];
3234     unsigned char BkCyc;
3235     unsigned char WrToRdDly;
3236     unsigned char RdToWrDly;
3237 };
3238 
3239 /*
3240  * Write a value to a register and check that the write completed.  These
3241  * writes normally complete in a cycle or two, so one read should suffice.
3242  * The very first read exists to flush the posted write to the device.
3243  */
wrreg_wait(adapter_t * adapter,unsigned int addr,u32 val)3244 static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
3245 {
3246     t3_write_reg(adapter,   addr, val);
3247     (void) t3_read_reg(adapter, addr);                   /* flush */
3248     if (!(t3_read_reg(adapter, addr) & F_BUSY))
3249         return 0;
3250     CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3251     return -EIO;
3252 }
3253 
mc7_init(struct mc7 * mc7,unsigned int mc7_clock,int mem_type)3254 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3255 {
3256     static const unsigned int mc7_mode[] = {
3257         0x632, 0x642, 0x652, 0x432, 0x442
3258     };
3259     static const struct mc7_timing_params mc7_timings[] = {
3260         { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
3261         { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
3262         { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
3263         { 9,  3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
3264         { 9,  4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
3265     };
3266 
3267     u32 val;
3268     unsigned int width, density, slow, attempts;
3269     adapter_t *adapter = mc7->adapter;
3270     const struct mc7_timing_params *p = &mc7_timings[mem_type];
3271 
3272     if (!mc7->size)
3273         return 0;
3274 
3275     val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3276     slow = val & F_SLOW;
3277     width = G_WIDTH(val);
3278     density = G_DEN(val);
3279 
3280     t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3281     val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  /* flush */
3282     msleep(1);
3283 
3284     if (!slow) {
3285         t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3286         (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3287         msleep(1);
3288         if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3289             (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3290             CH_ERR(adapter, "%s MC7 calibration timed out\n",
3291                    mc7->name);
3292             goto out_fail;
3293         }
3294     }
3295 
3296     t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3297              V_ACTTOPREDLY(p->ActToPreDly) |
3298              V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3299              V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3300              V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3301 
3302     t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3303              val | F_CLKEN | F_TERM150);
3304     (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3305 
3306     if (!slow)
3307         t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3308                  F_DLLENB);
3309     udelay(1);
3310 
3311     val = slow ? 3 : 6;
3312     if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3313         wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3314         wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3315         wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3316         goto out_fail;
3317 
3318     if (!slow) {
3319         t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3320         t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
3321                  F_DLLRST, 0);
3322         udelay(5);
3323     }
3324 
3325     if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3326         wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3327         wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3328         wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3329                mc7_mode[mem_type]) ||
3330         wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3331         wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3332         goto out_fail;
3333 
3334     /* clock value is in KHz */
3335     mc7_clock = mc7_clock * 7812 + mc7_clock / 2;  /* ns */
3336     mc7_clock /= 1000000;                          /* KHz->MHz, ns->us */
3337 
3338     t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3339              F_PERREFEN | V_PREREFDIV(mc7_clock));
3340     (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3341 
3342     t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
3343              F_ECCGENEN | F_ECCCHKEN);
3344     t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3345     t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3346     t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3347              (mc7->size << width) - 1);
3348     t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3349     (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3350 
3351     attempts = 50;
3352     do {
3353         msleep(250);
3354         val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3355     } while ((val & F_BUSY) && --attempts);
3356     if (val & F_BUSY) {
3357         CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3358         goto out_fail;
3359     }
3360 
3361     /* Enable normal memory accesses. */
3362     t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3363     return 0;
3364 
3365  out_fail:
3366     return -1;
3367 }
3368 
config_pcie(adapter_t * adap)3369 static void config_pcie(adapter_t *adap)
3370 {
3371     static const u16 ack_lat[4][6] = {
3372         { 237, 416, 559, 1071, 2095, 4143 },
3373         { 128, 217, 289, 545, 1057, 2081 },
3374         { 73, 118, 154, 282, 538, 1050 },
3375         { 67, 107, 86, 150, 278, 534 }
3376     };
3377     static const u16 rpl_tmr[4][6] = {
3378         { 711, 1248, 1677, 3213, 6285, 12429 },
3379         { 384, 651, 867, 1635, 3171, 6243 },
3380         { 219, 354, 462, 846, 1614, 3150 },
3381         { 201, 321, 258, 450, 834, 1602 }
3382     };
3383 
3384     u16 val;
3385     unsigned int log2_width, pldsize;
3386     unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3387 
3388     t3_os_pci_read_config_2(adap,
3389                 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3390                 &val);
3391     pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3392 
3393     t3_os_pci_read_config_2(adap,
3394                 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3395                     &val);
3396 
3397     fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3398     fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3399             G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3400     log2_width = fls(adap->params.pci.width) - 1;
3401     acklat = ack_lat[log2_width][pldsize];
3402     if (val & 1)                            /* check LOsEnable */
3403         acklat += fst_trn_tx * 4;
3404     rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3405 
3406     if (adap->params.rev == 0)
3407         t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3408                  V_T3A_ACKLAT(M_T3A_ACKLAT),
3409                  V_T3A_ACKLAT(acklat));
3410     else
3411         t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3412                  V_ACKLAT(acklat));
3413 
3414     t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3415              V_REPLAYLMT(rpllmt));
3416 
3417     t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3418     t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3419 }
3420 
3421 /**
3422  *  t3_init_hw - initialize and configure T3 HW modules
3423  *  @adapter: the adapter
3424  *  @fw_params: initial parameters to pass to firmware (optional)
3425  *
3426  *  Initialize and configure T3 HW modules.  This performs the
3427  *  initialization steps that need to be done once after a card is reset.
3428  *  MAC and PHY initialization is handled separarely whenever a port is
3429  *  enabled.
3430  *
3431  *  @fw_params are passed to FW and their value is platform dependent.
3432  *  Only the top 8 bits are available for use, the rest must be 0.
3433  */
t3_init_hw(adapter_t * adapter,u32 fw_params)3434 int t3_init_hw(adapter_t *adapter, u32 fw_params)
3435 {
3436     int err = -EIO, attempts = 100;
3437     const struct vpd_params *vpd = &adapter->params.vpd;
3438 
3439     if (adapter->params.rev > 0)
3440         calibrate_xgm_t3b(adapter);
3441     else if (calibrate_xgm(adapter))
3442         goto out_err;
3443 
3444     if (adapter->params.nports > 2)
3445         t3_mac_reset(&adap2pinfo(adapter, 0)->mac);
3446 
3447     if (vpd->mclk) {
3448         partition_mem(adapter, &adapter->params.tp);
3449 
3450         if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3451             mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3452             mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3453             t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3454                     adapter->params.mc5.nfilters,
3455                     adapter->params.mc5.nroutes))
3456             goto out_err;
3457     }
3458 
3459     if (tp_init(adapter, &adapter->params.tp))
3460         goto out_err;
3461 
3462 #ifdef CONFIG_CHELSIO_T3_CORE
3463     t3_tp_set_coalescing_size(adapter,
3464                   uimin(adapter->params.sge.max_pkt_size,
3465                       MAX_RX_COALESCING_LEN), 1);
3466     t3_tp_set_max_rxsize(adapter,
3467                  uimin(adapter->params.sge.max_pkt_size, 16384U));
3468     ulp_config(adapter, &adapter->params.tp);
3469 #endif
3470     if (is_pcie(adapter))
3471         config_pcie(adapter);
3472     else
3473         t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3474 
3475     t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3476     t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3477     t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3478     chan_init_hw(adapter, adapter->params.chan_map);
3479     t3_sge_init(adapter, &adapter->params.sge);
3480 
3481     t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3482     t3_write_reg(adapter, A_CIM_BOOT_CFG,
3483              V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3484     (void) t3_read_reg(adapter, A_CIM_BOOT_CFG);    /* flush */
3485 
3486     do {                          /* wait for uP to initialize */
3487         msleep(20);
3488     } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3489     if (!attempts) {
3490         CH_ERR(adapter, "uP initialization timed out\n");
3491         goto out_err;
3492     }
3493 
3494     err = 0;
3495  out_err:
3496     return err;
3497 }
3498 
3499 /**
3500  *  get_pci_mode - determine a card's PCI mode
3501  *  @adapter: the adapter
3502  *  @p: where to store the PCI settings
3503  *
3504  *  Determines a card's PCI mode and associated parameters, such as speed
3505  *  and width.
3506  */
get_pci_mode(adapter_t * adapter,struct pci_params * p)3507 static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
3508 {
3509     static unsigned short speed_map[] = { 33, 66, 100, 133 };
3510     u32 pcie_mode, pcie_cap;
3511 
3512     pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3513     if (pcie_cap) {
3514         u16 val;
3515 
3516         p->variant = PCI_VARIANT_PCIE;
3517         p->pcie_cap_addr = pcie_cap;
3518         t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
3519                     &val);
3520         p->width = (val >> 4) & 0x3f;
3521         return;
3522     }
3523 
3524     pcie_mode = t3_read_reg(adapter, A_PCIX_MODE);
3525     p->speed = speed_map[G_PCLKRANGE(pcie_mode)];
3526     p->width = (pcie_mode & F_64BIT) ? 64 : 32;
3527     pcie_mode = G_PCIXINITPAT(pcie_mode);
3528     if (pcie_mode == 0)
3529         p->variant = PCI_VARIANT_PCI;
3530     else if (pcie_mode < 4)
3531         p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3532     else if (pcie_mode < 8)
3533         p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3534     else
3535         p->variant = PCI_VARIANT_PCIX_266_MODE2;
3536 }
3537 
3538 /**
3539  *  init_link_config - initialize a link's SW state
3540  *  @lc: structure holding the link state
3541  *  @caps: link capabilities
3542  *
3543  *  Initializes the SW state maintained for each link, including the link's
3544  *  capabilities and default speed/duplex/flow-control/autonegotiation
3545  *  settings.
3546  */
init_link_config(struct link_config * lc,unsigned int caps)3547 static void __devinit init_link_config(struct link_config *lc,
3548                        unsigned int caps)
3549 {
3550     lc->supported = caps;
3551     lc->requested_speed = lc->speed = SPEED_INVALID;
3552     lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3553     lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3554     if (lc->supported & SUPPORTED_Autoneg) {
3555         lc->advertising = lc->supported;
3556         lc->autoneg = AUTONEG_ENABLE;
3557         lc->requested_fc |= PAUSE_AUTONEG;
3558     } else {
3559         lc->advertising = 0;
3560         lc->autoneg = AUTONEG_DISABLE;
3561     }
3562 }
3563 
3564 /**
3565  *  mc7_calc_size - calculate MC7 memory size
3566  *  @cfg: the MC7 configuration
3567  *
3568  *  Calculates the size of an MC7 memory in bytes from the value of its
3569  *  configuration register.
3570  */
mc7_calc_size(u32 cfg)3571 static unsigned int __devinit mc7_calc_size(u32 cfg)
3572 {
3573     unsigned int width = G_WIDTH(cfg);
3574     unsigned int banks = !!(cfg & F_BKS) + 1;
3575     unsigned int org = !!(cfg & F_ORG) + 1;
3576     unsigned int density = G_DEN(cfg);
3577     unsigned int MBs = ((256 << density) * banks) / (org << width);
3578 
3579     return MBs << 20;
3580 }
3581 
mc7_prep(adapter_t * adapter,struct mc7 * mc7,unsigned int base_addr,const char * name)3582 static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
3583                    unsigned int base_addr, const char *name)
3584 {
3585     u32 cfg;
3586 
3587     mc7->adapter = adapter;
3588     mc7->name = name;
3589     mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3590     cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3591     mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3592     mc7->width = G_WIDTH(cfg);
3593 }
3594 
mac_prep(struct cmac * mac,adapter_t * adapter,int index)3595 void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
3596 {
3597     mac->adapter = adapter;
3598     mac->multiport = adapter->params.nports > 2;
3599     if (mac->multiport) {
3600         mac->ext_port = (unsigned char)index;
3601         mac->nucast = 8;
3602         index = 0;
3603     } else
3604         mac->nucast = 1;
3605 
3606     mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3607 
3608     if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3609         t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3610                  is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3611         t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3612                  F_ENRGMII, 0);
3613     }
3614 }
3615 
3616 /**
3617  *  early_hw_init - HW initialization done at card detection time
3618  *  @adapter: the adapter
3619  *  @ai: contains information about the adapter type and properties
3620  *
3621  *  Perfoms the part of HW initialization that is done early on when the
3622  *  driver first detecs the card.  Most of the HW state is initialized
3623  *  lazily later on when a port or an offload function are first used.
3624  */
early_hw_init(adapter_t * adapter,const struct adapter_info * ai)3625 void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
3626 {
3627     u32 val = V_PORTSPEED(is_10G(adapter) || adapter->params.nports > 2 ?
3628                   3 : 2);
3629 
3630     mi1_init(adapter, ai);
3631     t3_write_reg(adapter, A_I2C_CFG,                  /* set for 80KHz */
3632              V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3633     t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3634              ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3635     t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3636 
3637     if (adapter->params.rev == 0 || !uses_xaui(adapter))
3638         val |= F_ENRGMII;
3639 
3640     /* Enable MAC clocks so we can access the registers */
3641     t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3642     (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3643 
3644     val |= F_CLKDIVRESET_;
3645     t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3646     (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3647     t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3648     (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
3649 }
3650 
3651 /**
3652  *  t3_reset_adapter - reset the adapter
3653  *  @adapter: the adapter
3654  *
3655  *  Reset the adapter.
3656  */
t3_reset_adapter(adapter_t * adapter)3657 static int t3_reset_adapter(adapter_t *adapter)
3658 {
3659     int i, save_and_restore_pcie =
3660         adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3661     uint16_t devid = 0;
3662 
3663     if (save_and_restore_pcie)
3664         t3_os_pci_save_state(adapter);
3665     t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3666 
3667     /*
3668      * Delay. Give Some time to device to reset fully.
3669      * XXX The delay time should be modified.
3670      */
3671     for (i = 0; i < 10; i++) {
3672         msleep(50);
3673         t3_os_pci_read_config_2(adapter, 0x00, &devid);
3674         if (devid == 0x1425)
3675             break;
3676     }
3677 
3678     if (devid != 0x1425)
3679         return -1;
3680 
3681     if (save_and_restore_pcie)
3682         t3_os_pci_restore_state(adapter);
3683     return 0;
3684 }
3685 
3686 /**
3687  *  t3_prep_adapter - prepare SW and HW for operation
3688  *  @adapter: the adapter
3689  *  @ai: contains information about the adapter type and properties
3690  *
3691  *  Initialize adapter SW state for the various HW modules, set initial
3692  *  values for some adapter tunables, take PHYs out of reset, and
3693  *  initialize the MDIO interface.
3694  */
t3_prep_adapter(adapter_t * adapter,const struct adapter_info * ai,int reset)3695 int __devinit t3_prep_adapter(adapter_t *adapter,
3696                   const struct adapter_info *ai, int reset)
3697 {
3698     int ret;
3699     unsigned int i, j = 0;
3700 
3701     get_pci_mode(adapter, &adapter->params.pci);
3702 
3703     adapter->params.info = ai;
3704     adapter->params.nports = ai->nports0 + ai->nports1;
3705     adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3706     adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3707     adapter->params.linkpoll_period = 0;
3708     if (adapter->params.nports > 2)
3709         adapter->params.stats_update_period = VSC_STATS_ACCUM_SECS;
3710     else
3711         adapter->params.stats_update_period = is_10G(adapter) ?
3712             MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3713     adapter->params.pci.vpd_cap_addr =
3714         t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3715 
3716     ret = get_vpd_params(adapter, &adapter->params.vpd);
3717     if (ret < 0)
3718         return ret;
3719 
3720     if (reset && t3_reset_adapter(adapter))
3721         return -1;
3722 
3723     t3_sge_prep(adapter, &adapter->params.sge);
3724 
3725     if (adapter->params.vpd.mclk) {
3726         struct tp_params *p = &adapter->params.tp;
3727 
3728         mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3729         mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3730         mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3731 
3732         p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3733         p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3734         p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3735         p->cm_size = t3_mc7_size(&adapter->cm);
3736         p->chan_rx_size = p->pmrx_size / 2;     /* only 1 Rx channel */
3737         p->chan_tx_size = p->pmtx_size / p->nchan;
3738         p->rx_pg_size = 64 * 1024;
3739         p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3740         p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3741         p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3742         p->ntimer_qs = p->cm_size >= (128 << 20) ||
3743                    adapter->params.rev > 0 ? 12 : 6;
3744         p->tre = fls(adapter->params.vpd.cclk / (1000 / TP_TMR_RES)) -
3745              1;
3746         p->dack_re = fls(adapter->params.vpd.cclk / 10) - 1; /* 100us */
3747     }
3748 
3749     adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3750                   t3_mc7_size(&adapter->pmtx) &&
3751                   t3_mc7_size(&adapter->cm);
3752 
3753     if (is_offload(adapter)) {
3754         adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3755         adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3756                            DEFAULT_NFILTERS : 0;
3757         adapter->params.mc5.nroutes = 0;
3758         t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3759 
3760 #ifdef CONFIG_CHELSIO_T3_CORE
3761         init_mtus(adapter->params.mtus);
3762         init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3763 #endif
3764     }
3765 
3766     early_hw_init(adapter, ai);
3767 
3768     if (adapter->params.nports > 2 &&
3769         (ret = t3_vsc7323_init(adapter, adapter->params.nports)))
3770         return ret;
3771 
3772     for_each_port(adapter, i) {
3773         u8 hw_addr[6];
3774         struct port_info *p = adap2pinfo(adapter, i);
3775 
3776         while (!adapter->params.vpd.port_type[j])
3777             ++j;
3778 
3779         p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3780         p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3781                        ai->mdio_ops);
3782         mac_prep(&p->mac, adapter, j);
3783         ++j;
3784 
3785         /*
3786          * The VPD EEPROM stores the base Ethernet address for the
3787          * card.  A port's address is derived from the base by adding
3788          * the port's index to the base's low octet.
3789          */
3790         memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3791         hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3792 
3793         t3_os_set_hw_addr(adapter, i, hw_addr);
3794         init_link_config(&p->link_config, p->port_type->caps);
3795         p->phy.ops->power_down(&p->phy, 1);
3796         if (!(p->port_type->caps & SUPPORTED_IRQ))
3797             adapter->params.linkpoll_period = 10;
3798     }
3799 
3800     return 0;
3801 }
3802 
t3_led_ready(adapter_t * adapter)3803 void t3_led_ready(adapter_t *adapter)
3804 {
3805     t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3806              F_GPIO0_OUT_VAL);
3807 }
3808 
t3_port_failover(adapter_t * adapter,int port)3809 void t3_port_failover(adapter_t *adapter, int port)
3810 {
3811     u32 val;
3812 
3813     val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
3814     t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3815              val);
3816 }
3817 
t3_failover_done(adapter_t * adapter,int port)3818 void t3_failover_done(adapter_t *adapter, int port)
3819 {
3820     t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3821              F_PORT0ACTIVE | F_PORT1ACTIVE);
3822 }
3823 
t3_failover_clear(adapter_t * adapter)3824 void t3_failover_clear(adapter_t *adapter)
3825 {
3826     t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
3827              F_PORT0ACTIVE | F_PORT1ACTIVE);
3828 }
3829