1
2 /**************************************************************************
3
4 Copyright (c) 2007, Chelsio Inc.
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
28
29 ***************************************************************************/
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: cxgb_xgmac.c,v 1.1 2010/03/21 21:11:13 jklos Exp $");
33
34 #ifdef CONFIG_DEFINED
35 #include <cxgb_include.h>
36 #else
37 #include "cxgb_include.h"
38 #endif
39
40 #undef msleep
41 #define msleep t3_os_sleep
42
43 /*
44 * # of exact address filters. The first one is used for the station address,
45 * the rest are available for multicast addresses.
46 */
47 #define EXACT_ADDR_FILTERS 8
48
macidx(const struct cmac * mac)49 static inline int macidx(const struct cmac *mac)
50 {
51 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
52 }
53
xaui_serdes_reset(struct cmac * mac)54 static void xaui_serdes_reset(struct cmac *mac)
55 {
56 static const unsigned int clear[] = {
57 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
58 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
59 };
60
61 int i;
62 adapter_t *adap = mac->adapter;
63 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
64
65 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
66 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
67 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
68 F_RESETPLL23 | F_RESETPLL01);
69 (void)t3_read_reg(adap, ctrl);
70 udelay(15);
71
72 for (i = 0; i < ARRAY_SIZE(clear); i++) {
73 t3_set_reg_field(adap, ctrl, clear[i], 0);
74 udelay(15);
75 }
76 }
77
t3b_pcs_reset(struct cmac * mac)78 void t3b_pcs_reset(struct cmac *mac)
79 {
80 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
81 F_PCS_RESET_, 0);
82 udelay(20);
83 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
84 F_PCS_RESET_);
85 }
86
t3_mac_reset(struct cmac * mac)87 int t3_mac_reset(struct cmac *mac)
88 {
89 static struct addr_val_pair mac_reset_avp[] = {
90 { A_XGM_TX_CTRL, 0 },
91 { A_XGM_RX_CTRL, 0 },
92 { A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
93 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST },
94 { A_XGM_RX_HASH_LOW, 0 },
95 { A_XGM_RX_HASH_HIGH, 0 },
96 { A_XGM_RX_EXACT_MATCH_LOW_1, 0 },
97 { A_XGM_RX_EXACT_MATCH_LOW_2, 0 },
98 { A_XGM_RX_EXACT_MATCH_LOW_3, 0 },
99 { A_XGM_RX_EXACT_MATCH_LOW_4, 0 },
100 { A_XGM_RX_EXACT_MATCH_LOW_5, 0 },
101 { A_XGM_RX_EXACT_MATCH_LOW_6, 0 },
102 { A_XGM_RX_EXACT_MATCH_LOW_7, 0 },
103 { A_XGM_RX_EXACT_MATCH_LOW_8, 0 },
104 { A_XGM_STAT_CTRL, F_CLRSTATS }
105 };
106 u32 val;
107 adapter_t *adap = mac->adapter;
108 unsigned int oft = mac->offset;
109
110 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
111 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
112
113 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
114 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
115 F_RXSTRFRWRD | F_DISERRFRAMES,
116 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
117
118 if (uses_xaui(adap)) {
119 if (adap->params.rev == 0) {
120 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
121 F_RXENABLE | F_TXENABLE);
122 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
123 F_CMULOCK, 1, 5, 2)) {
124 CH_ERR(adap,
125 "MAC %d XAUI SERDES CMU lock failed\n",
126 macidx(mac));
127 return -1;
128 }
129 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
130 F_SERDESRESET_);
131 } else
132 xaui_serdes_reset(mac);
133 }
134
135
136 if (mac->multiport) {
137 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
138 MAX_FRAME_SIZE - 4);
139 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0,
140 F_DISPREAMBLE);
141 t3_set_reg_field(adap, A_XGM_RX_CFG + oft, 0, F_COPYPREAMBLE |
142 F_ENNON802_3PREAMBLE);
143 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft,
144 V_TXFIFOTHRESH(M_TXFIFOTHRESH),
145 V_TXFIFOTHRESH(64));
146 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
147 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
148 }
149
150 val = F_MAC_RESET_;
151 if (is_10G(adap) || mac->multiport)
152 val |= F_PCS_RESET_;
153 else if (uses_xaui(adap))
154 val |= F_PCS_RESET_ | F_XG2G_RESET_;
155 else
156 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
157 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
158 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
159 if ((val & F_PCS_RESET_) && adap->params.rev) {
160 msleep(1);
161 t3b_pcs_reset(mac);
162 }
163
164 memset(&mac->stats, 0, sizeof(mac->stats));
165 return 0;
166 }
167
t3b2_mac_reset(struct cmac * mac)168 static int t3b2_mac_reset(struct cmac *mac)
169 {
170 u32 val;
171 adapter_t *adap = mac->adapter;
172 unsigned int oft = mac->offset;
173
174
175 /* Stop egress traffic to xgm*/
176 if (!macidx(mac))
177 t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
178 else
179 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
180
181 /* PCS in reset */
182 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
183 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
184
185 msleep(10);
186
187 /* Check for xgm Rx fifo empty */
188 if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
189 0x80000000, 1, 5, 2)) {
190 CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
191 macidx(mac));
192 return -1;
193 }
194
195 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); /*MAC in reset*/
196 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
197
198 val = F_MAC_RESET_;
199 if (is_10G(adap))
200 val |= F_PCS_RESET_;
201 else if (uses_xaui(adap))
202 val |= F_PCS_RESET_ | F_XG2G_RESET_;
203 else
204 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
205 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
206 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
207 if ((val & F_PCS_RESET_) && adap->params.rev) {
208 msleep(1);
209 t3b_pcs_reset(mac);
210 }
211 t3_write_reg(adap, A_XGM_RX_CFG + oft,
212 F_DISPAUSEFRAMES | F_EN1536BFRAMES |
213 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST );
214
215 /*Resume egress traffic to xgm*/
216 if (!macidx(mac))
217 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
218 else
219 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
220
221 return 0;
222 }
223
224 /*
225 * Set the exact match register 'idx' to recognize the given Ethernet address.
226 */
set_addr_filter(struct cmac * mac,int idx,const u8 * addr)227 static void set_addr_filter(struct cmac *mac, int idx, const u8 *addr)
228 {
229 u32 addr_lo, addr_hi;
230 unsigned int oft = mac->offset + idx * 8;
231
232 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
233 addr_hi = (addr[5] << 8) | addr[4];
234
235 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
236 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
237 }
238
239 /* Set one of the station's unicast MAC addresses. */
t3_mac_set_address(struct cmac * mac,unsigned int idx,u8 addr[6])240 int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
241 {
242 if (mac->multiport)
243 idx = mac->ext_port + idx * mac->adapter->params.nports;
244 if (idx >= mac->nucast)
245 return -EINVAL;
246 set_addr_filter(mac, idx, addr);
247 if (mac->multiport && idx < mac->adapter->params.nports)
248 t3_vsc7323_set_addr(mac->adapter, addr, idx);
249 return 0;
250 }
251
252 /*
253 * Specify the number of exact address filters that should be reserved for
254 * unicast addresses. Caller should reload the unicast and multicast addresses
255 * after calling this.
256 */
t3_mac_set_num_ucast(struct cmac * mac,unsigned char n)257 int t3_mac_set_num_ucast(struct cmac *mac, unsigned char n)
258 {
259 if (n > EXACT_ADDR_FILTERS)
260 return -EINVAL;
261 mac->nucast = n;
262 return 0;
263 }
264
disable_exact_filters(struct cmac * mac)265 static void disable_exact_filters(struct cmac *mac)
266 {
267 unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
268
269 for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
270 u32 v = t3_read_reg(mac->adapter, reg);
271 t3_write_reg(mac->adapter, reg, v);
272 }
273 t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
274 }
275
enable_exact_filters(struct cmac * mac)276 static void enable_exact_filters(struct cmac *mac)
277 {
278 unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
279
280 for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
281 u32 v = t3_read_reg(mac->adapter, reg);
282 t3_write_reg(mac->adapter, reg, v);
283 }
284 t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
285 }
286
287 /* Calculate the RX hash filter index of an Ethernet address */
hash_hw_addr(const u8 * addr)288 static int hash_hw_addr(const u8 *addr)
289 {
290 int hash = 0, octet, bit, i = 0, c;
291
292 for (octet = 0; octet < 6; ++octet)
293 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
294 hash ^= (c & 1) << i;
295 if (++i == 6)
296 i = 0;
297 }
298 return hash;
299 }
300
t3_mac_set_rx_mode(struct cmac * mac,struct t3_rx_mode * rm)301 int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
302 {
303 u32 hash_lo, hash_hi;
304 adapter_t *adap = mac->adapter;
305 unsigned int oft = mac->offset;
306
307 if (promisc_rx_mode(rm))
308 mac->promisc_map |= 1 << mac->ext_port;
309 else
310 mac->promisc_map &= ~(1 << mac->ext_port);
311 t3_set_reg_field(adap, A_XGM_RX_CFG + oft, F_COPYALLFRAMES,
312 mac->promisc_map ? F_COPYALLFRAMES : 0);
313
314 if (allmulti_rx_mode(rm) || mac->multiport)
315 hash_lo = hash_hi = 0xffffffff;
316 else {
317 u8 *addr;
318 int exact_addr_idx = mac->nucast;
319
320 hash_lo = hash_hi = 0;
321 while ((addr = t3_get_next_mcaddr(rm)))
322 if (exact_addr_idx < EXACT_ADDR_FILTERS)
323 set_addr_filter(mac, exact_addr_idx++, addr);
324 else {
325 int hash = hash_hw_addr(addr);
326
327 if (hash < 32)
328 hash_lo |= (1 << hash);
329 else
330 hash_hi |= (1 << (hash - 32));
331 }
332 }
333
334 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
335 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
336 return 0;
337 }
338
rx_fifo_hwm(int mtu)339 static int rx_fifo_hwm(int mtu)
340 {
341 int hwm;
342
343 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
344 return min(hwm, MAC_RXFIFO_SIZE - 8192);
345 }
346
t3_mac_set_mtu(struct cmac * mac,unsigned int mtu)347 int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
348 {
349 int hwm, lwm;
350 unsigned int thres, v;
351 adapter_t *adap = mac->adapter;
352
353 /*
354 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
355 * packet size register includes header, but not FCS.
356 */
357 mtu += 14;
358 if (mac->multiport)
359 mtu += 8; /* for preamble */
360 if (mtu > MAX_FRAME_SIZE - 4)
361 return -EINVAL;
362 if (mac->multiport)
363 return t3_vsc7323_set_mtu(adap, mtu - 4, mac->ext_port);
364
365 if (adap->params.rev == T3_REV_B2 &&
366 (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
367 disable_exact_filters(mac);
368 v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
369 t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
370 F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
371
372 /* drain rx FIFO */
373 if (t3_wait_op_done(adap,
374 A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + mac->offset,
375 1 << 31, 1, 20, 5)) {
376 t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
377 enable_exact_filters(mac);
378 return -EIO;
379 }
380 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
381 t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
382 enable_exact_filters(mac);
383 } else
384 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
385
386 /*
387 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
388 * HWM only if flow-control is enabled.
389 */
390 hwm = rx_fifo_hwm(mtu);
391 lwm = min(3 * (int) mtu, MAC_RXFIFO_SIZE /4);
392 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
393 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
394 v |= V_RXFIFOPAUSELWM(lwm / 8);
395 if (G_RXFIFOPAUSEHWM(v))
396 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
397 V_RXFIFOPAUSEHWM(hwm / 8);
398
399 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
400
401 /* Adjust the TX FIFO threshold based on the MTU */
402 thres = (adap->params.vpd.cclk * 1000) / 15625;
403 thres = (thres * mtu) / 1000;
404 if (is_10G(adap))
405 thres /= 10;
406 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
407 thres = max(thres, 8U); /* need at least 8 */
408 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
409 V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
410 V_TXFIFOTHRESH(thres) | V_TXIPG(1));
411
412 /* Assuming a minimum drain rate of 2.5Gbps...
413 */
414 if (adap->params.rev > 0)
415 t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
416 (hwm - lwm) * 4 / 8);
417 t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
418 MAC_RXFIFO_SIZE * 4 * 8 / 512);
419 return 0;
420 }
421
t3_mac_set_speed_duplex_fc(struct cmac * mac,int speed,int duplex,int fc)422 int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
423 {
424 u32 val;
425 adapter_t *adap = mac->adapter;
426 unsigned int oft = mac->offset;
427
428 if (duplex >= 0 && duplex != DUPLEX_FULL)
429 return -EINVAL;
430 if (mac->multiport) {
431 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
432 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
433 val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(t3_read_reg(adap,
434 A_XGM_RX_MAX_PKT_SIZE + oft)) / 8);
435 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
436
437 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
438 F_TXPAUSEEN);
439 return t3_vsc7323_set_speed_fc(adap, speed, fc, mac->ext_port);
440 }
441 if (speed >= 0) {
442 if (speed == SPEED_10)
443 val = V_PORTSPEED(0);
444 else if (speed == SPEED_100)
445 val = V_PORTSPEED(1);
446 else if (speed == SPEED_1000)
447 val = V_PORTSPEED(2);
448 else if (speed == SPEED_10000)
449 val = V_PORTSPEED(3);
450 else
451 return -EINVAL;
452
453 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
454 V_PORTSPEED(M_PORTSPEED), val);
455 }
456
457 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
458 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
459 if (fc & PAUSE_TX)
460 val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(t3_read_reg(adap,
461 A_XGM_RX_MAX_PKT_SIZE + oft)) / 8);
462 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
463
464 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
465 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
466 return 0;
467 }
468
t3_mac_enable(struct cmac * mac,int which)469 int t3_mac_enable(struct cmac *mac, int which)
470 {
471 int idx = macidx(mac);
472 adapter_t *adap = mac->adapter;
473 unsigned int oft = mac->offset;
474 struct mac_stats *s = &mac->stats;
475
476 if (mac->multiport)
477 return t3_vsc7323_enable(adap, mac->ext_port, which);
478
479 if (which & MAC_DIRECTION_TX) {
480 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
481 t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401);
482 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
483 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
484
485 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
486
487 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
488 mac->tx_mcnt = s->tx_frames;
489 mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
490 A_TP_PIO_DATA)));
491 mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
492 A_XGM_TX_SPI4_SOP_EOP_CNT +
493 oft)));
494 mac->rx_mcnt = s->rx_frames;
495 mac->rx_pause = s->rx_pause;
496 mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
497 A_XGM_RX_SPI4_SOP_EOP_CNT +
498 oft)));
499 mac->rx_ocnt = s->rx_fifo_ovfl;
500 mac->txen = F_TXEN;
501 mac->toggle_cnt = 0;
502 }
503 if (which & MAC_DIRECTION_RX)
504 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
505 return 0;
506 }
507
t3_mac_disable(struct cmac * mac,int which)508 int t3_mac_disable(struct cmac *mac, int which)
509 {
510 adapter_t *adap = mac->adapter;
511
512 if (mac->multiport)
513 return t3_vsc7323_disable(adap, mac->ext_port, which);
514
515 if (which & MAC_DIRECTION_TX) {
516 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
517 mac->txen = 0;
518 }
519 if (which & MAC_DIRECTION_RX) {
520 int val = F_MAC_RESET_;
521
522 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
523 F_PCS_RESET_, 0);
524 msleep(100);
525 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
526 if (is_10G(adap))
527 val |= F_PCS_RESET_;
528 else if (uses_xaui(adap))
529 val |= F_PCS_RESET_ | F_XG2G_RESET_;
530 else
531 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
532 t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
533 }
534 return 0;
535 }
536
t3b2_mac_watchdog_task(struct cmac * mac)537 int t3b2_mac_watchdog_task(struct cmac *mac)
538 {
539 int status;
540 unsigned int tx_tcnt, tx_xcnt;
541 adapter_t *adap = mac->adapter;
542 struct mac_stats *s = &mac->stats;
543 unsigned int tx_mcnt = (unsigned int)s->tx_frames;
544 unsigned int rx_mcnt = (unsigned int)s->rx_frames;
545 unsigned int rx_xcnt;
546
547 if (mac->multiport) {
548 tx_mcnt = t3_read_reg(adap, A_XGM_STAT_TX_FRAME_LOW);
549 rx_mcnt = t3_read_reg(adap, A_XGM_STAT_RX_FRAMES_LOW);
550 } else {
551 tx_mcnt = (unsigned int)s->tx_frames;
552 rx_mcnt = (unsigned int)s->rx_frames;
553 }
554 status = 0;
555 tx_xcnt = 1; /* By default tx_xcnt is making progress*/
556 tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt*/
557 rx_xcnt = 1; /* By default rx_xcnt is making progress*/
558 if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
559 tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
560 A_XGM_TX_SPI4_SOP_EOP_CNT +
561 mac->offset)));
562 if (tx_xcnt == 0) {
563 t3_write_reg(adap, A_TP_PIO_ADDR,
564 A_TP_TX_DROP_CNT_CH0 + macidx(mac));
565 tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
566 A_TP_PIO_DATA)));
567 } else {
568 goto rxcheck;
569 }
570 } else {
571 mac->toggle_cnt = 0;
572 goto rxcheck;
573 }
574
575 if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
576 if (mac->toggle_cnt > 4) {
577 status = 2;
578 goto out;
579 } else {
580 status = 1;
581 goto out;
582 }
583 } else {
584 mac->toggle_cnt = 0;
585 goto rxcheck;
586 }
587
588 rxcheck:
589 if (rx_mcnt != mac->rx_mcnt) {
590 rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
591 A_XGM_RX_SPI4_SOP_EOP_CNT +
592 mac->offset))) +
593 (s->rx_fifo_ovfl - mac->rx_ocnt);
594 mac->rx_ocnt = s->rx_fifo_ovfl;
595 } else
596 goto out;
597
598 if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0) {
599 if (!mac->multiport)
600 status = 2;
601 goto out;
602 }
603
604 out:
605 mac->tx_tcnt = tx_tcnt;
606 mac->tx_xcnt = tx_xcnt;
607 mac->tx_mcnt = s->tx_frames;
608 mac->rx_xcnt = rx_xcnt;
609 mac->rx_mcnt = s->rx_frames;
610 mac->rx_pause = s->rx_pause;
611 if (status == 1) {
612 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
613 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
614 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
615 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
616 mac->toggle_cnt++;
617 } else if (status == 2) {
618 t3b2_mac_reset(mac);
619 mac->toggle_cnt = 0;
620 }
621 return status;
622 }
623
624 /*
625 * This function is called periodically to accumulate the current values of the
626 * RMON counters into the port statistics. Since the packet counters are only
627 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
628 * called more frequently than that. The byte counters are 45-bit wide, they
629 * would overflow in ~7.8 hours.
630 */
t3_mac_update_stats(struct cmac * mac)631 const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
632 {
633 #define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
634 #define RMON_UPDATE(mac, name, reg) \
635 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
636 #define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
637 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
638 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
639
640 u32 v, lo;
641
642 if (mac->multiport)
643 return t3_vsc7323_update_stats(mac);
644
645 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
646 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
647 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
648 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
649 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
650 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
651 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
652 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
653 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
654
655 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
656
657 v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
658 if (mac->adapter->params.rev == T3_REV_B2)
659 v &= 0x7fffffff;
660 mac->stats.rx_too_long += v;
661
662 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
663 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
664 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
665 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
666 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
667 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
668 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
669
670 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
671 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
672 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
673 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
674 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
675 /* This counts error frames in general (bad FCS, underrun, etc). */
676 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
677
678 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
679 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
680 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
681 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
682 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
683 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
684 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
685
686 /* The next stat isn't clear-on-read. */
687 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
688 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
689 lo = (u32)mac->stats.rx_cong_drops;
690 mac->stats.rx_cong_drops += (u64)(v - lo);
691
692 return &mac->stats;
693 }
694