1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright (c) 1997, 1998, 1999, 2000
7 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Bill Paul.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
35 */
36 /*-
37 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
38 *
39 * Permission to use, copy, modify, and distribute this software for any
40 * purpose with or without fee is hereby granted, provided that the above
41 * copyright notice and this permission notice appear in all copies.
42 *
43 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
44 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
46 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
47 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
48 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
49 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
50 */
51
52 #include <sys/cdefs.h>
53 /*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * The XaQti XMAC II datasheet,
58 * https://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * https://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71 /*
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
78 *
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
86 */
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/bus.h>
91 #include <sys/endian.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sockio.h>
98 #include <sys/queue.h>
99 #include <sys/sysctl.h>
100
101 #include <net/bpf.h>
102 #include <net/ethernet.h>
103 #include <net/if.h>
104 #include <net/if_var.h>
105 #include <net/if_arp.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_types.h>
109 #include <net/if_vlan_var.h>
110
111 #include <netinet/in.h>
112 #include <netinet/in_systm.h>
113 #include <netinet/ip.h>
114
115 #include <machine/bus.h>
116 #include <machine/in_cksum.h>
117 #include <machine/resource.h>
118 #include <sys/rman.h>
119
120 #include <dev/mii/mii.h>
121 #include <dev/mii/miivar.h>
122 #include <dev/mii/brgphyreg.h>
123
124 #include <dev/pci/pcireg.h>
125 #include <dev/pci/pcivar.h>
126
127 #if 0
128 #define SK_USEIOSPACE
129 #endif
130
131 #include <dev/sk/if_skreg.h>
132 #include <dev/sk/xmaciireg.h>
133 #include <dev/sk/yukonreg.h>
134
135 MODULE_DEPEND(sk, pci, 1, 1, 1);
136 MODULE_DEPEND(sk, ether, 1, 1, 1);
137 MODULE_DEPEND(sk, miibus, 1, 1, 1);
138
139 /* "device miibus" required. See GENERIC if you get errors here. */
140 #include "miibus_if.h"
141
142 static const struct sk_type sk_devs[] = {
143 {
144 VENDORID_SK,
145 DEVICEID_SK_V1,
146 "SysKonnect Gigabit Ethernet (V1.0)"
147 },
148 {
149 VENDORID_SK,
150 DEVICEID_SK_V2,
151 "SysKonnect Gigabit Ethernet (V2.0)"
152 },
153 {
154 VENDORID_MARVELL,
155 DEVICEID_SK_V2,
156 "Marvell Gigabit Ethernet"
157 },
158 {
159 VENDORID_MARVELL,
160 DEVICEID_BELKIN_5005,
161 "Belkin F5D5005 Gigabit Ethernet"
162 },
163 {
164 VENDORID_3COM,
165 DEVICEID_3COM_3C940,
166 "3Com 3C940 Gigabit Ethernet"
167 },
168 {
169 VENDORID_LINKSYS,
170 DEVICEID_LINKSYS_EG1032,
171 "Linksys EG1032 Gigabit Ethernet"
172 },
173 {
174 VENDORID_DLINK,
175 DEVICEID_DLINK_DGE530T_A1,
176 "D-Link DGE-530T Gigabit Ethernet"
177 },
178 {
179 VENDORID_DLINK,
180 DEVICEID_DLINK_DGE530T_B1,
181 "D-Link DGE-530T Gigabit Ethernet"
182 },
183 { 0, 0, NULL }
184 };
185
186 static int skc_probe(device_t);
187 static int skc_attach(device_t);
188 static int skc_detach(device_t);
189 static int skc_shutdown(device_t);
190 static int skc_suspend(device_t);
191 static int skc_resume(device_t);
192 static bus_dma_tag_t skc_get_dma_tag(device_t, device_t);
193 static int sk_detach(device_t);
194 static int sk_probe(device_t);
195 static int sk_attach(device_t);
196 static void sk_tick(void *);
197 static void sk_yukon_tick(void *);
198 static void sk_intr(void *);
199 static void sk_intr_xmac(struct sk_if_softc *);
200 static void sk_intr_bcom(struct sk_if_softc *);
201 static void sk_intr_yukon(struct sk_if_softc *);
202 static __inline void sk_rxcksum(if_t, struct mbuf *, u_int32_t);
203 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
204 static void sk_rxeof(struct sk_if_softc *);
205 static void sk_jumbo_rxeof(struct sk_if_softc *);
206 static void sk_txeof(struct sk_if_softc *);
207 static void sk_txcksum(if_t, struct mbuf *, struct sk_tx_desc *);
208 static int sk_encap(struct sk_if_softc *, struct mbuf **);
209 static void sk_start(if_t);
210 static void sk_start_locked(if_t);
211 static int sk_ioctl(if_t, u_long, caddr_t);
212 static void sk_init(void *);
213 static void sk_init_locked(struct sk_if_softc *);
214 static void sk_init_xmac(struct sk_if_softc *);
215 static void sk_init_yukon(struct sk_if_softc *);
216 static void sk_stop(struct sk_if_softc *);
217 static void sk_watchdog(void *);
218 static int sk_ifmedia_upd(if_t);
219 static void sk_ifmedia_sts(if_t, struct ifmediareq *);
220 static void sk_reset(struct sk_softc *);
221 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
222 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
223 static int sk_newbuf(struct sk_if_softc *, int);
224 static int sk_jumbo_newbuf(struct sk_if_softc *, int);
225 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
226 static int sk_dma_alloc(struct sk_if_softc *);
227 static int sk_dma_jumbo_alloc(struct sk_if_softc *);
228 static void sk_dma_free(struct sk_if_softc *);
229 static void sk_dma_jumbo_free(struct sk_if_softc *);
230 static int sk_init_rx_ring(struct sk_if_softc *);
231 static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
232 static void sk_init_tx_ring(struct sk_if_softc *);
233 static u_int32_t sk_win_read_4(struct sk_softc *, int);
234 static u_int16_t sk_win_read_2(struct sk_softc *, int);
235 static u_int8_t sk_win_read_1(struct sk_softc *, int);
236 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
237 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
238 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
239
240 static int sk_miibus_readreg(device_t, int, int);
241 static int sk_miibus_writereg(device_t, int, int, int);
242 static void sk_miibus_statchg(device_t);
243
244 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
245 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
246 int);
247 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
248
249 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
250 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
251 int);
252 static void sk_marv_miibus_statchg(struct sk_if_softc *);
253
254 static uint32_t sk_xmchash(const uint8_t *);
255 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
256 static void sk_rxfilter(struct sk_if_softc *);
257 static void sk_rxfilter_genesis(struct sk_if_softc *);
258 static void sk_rxfilter_yukon(struct sk_if_softc *);
259
260 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
261 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
262
263 /* Tunables. */
264 static int jumbo_disable = 0;
265 TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
266
267 /*
268 * It seems that SK-NET GENESIS supports very simple checksum offload
269 * capability for Tx and I believe it can generate 0 checksum value for
270 * UDP packets in Tx as the hardware can't differenciate UDP packets from
271 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
272 * means sender didn't perforam checksum computation. For the safety I
273 * disabled UDP checksum offload capability at the moment.
274 */
275 #define SK_CSUM_FEATURES (CSUM_TCP)
276
277 /*
278 * Note that we have newbus methods for both the GEnesis controller
279 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
280 * the miibus code is a child of the XMACs. We need to do it this way
281 * so that the miibus drivers can access the PHY registers on the
282 * right PHY. It's not quite what I had in mind, but it's the only
283 * design that achieves the desired effect.
284 */
285 static device_method_t skc_methods[] = {
286 /* Device interface */
287 DEVMETHOD(device_probe, skc_probe),
288 DEVMETHOD(device_attach, skc_attach),
289 DEVMETHOD(device_detach, skc_detach),
290 DEVMETHOD(device_suspend, skc_suspend),
291 DEVMETHOD(device_resume, skc_resume),
292 DEVMETHOD(device_shutdown, skc_shutdown),
293
294 DEVMETHOD(bus_get_dma_tag, skc_get_dma_tag),
295
296 DEVMETHOD_END
297 };
298
299 static driver_t skc_driver = {
300 "skc",
301 skc_methods,
302 sizeof(struct sk_softc)
303 };
304
305 static device_method_t sk_methods[] = {
306 /* Device interface */
307 DEVMETHOD(device_probe, sk_probe),
308 DEVMETHOD(device_attach, sk_attach),
309 DEVMETHOD(device_detach, sk_detach),
310 DEVMETHOD(device_shutdown, bus_generic_shutdown),
311
312 /* MII interface */
313 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
314 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
315 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
316
317 DEVMETHOD_END
318 };
319
320 static driver_t sk_driver = {
321 "sk",
322 sk_methods,
323 sizeof(struct sk_if_softc)
324 };
325
326 DRIVER_MODULE(skc, pci, skc_driver, NULL, NULL);
327 DRIVER_MODULE(sk, skc, sk_driver, NULL, NULL);
328 DRIVER_MODULE(miibus, sk, miibus_driver, NULL, NULL);
329
330 static struct resource_spec sk_res_spec_io[] = {
331 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE },
332 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
333 { -1, 0, 0 }
334 };
335
336 static struct resource_spec sk_res_spec_mem[] = {
337 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
338 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
339 { -1, 0, 0 }
340 };
341
342 #define SK_SETBIT(sc, reg, x) \
343 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
344
345 #define SK_CLRBIT(sc, reg, x) \
346 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
347
348 #define SK_WIN_SETBIT_4(sc, reg, x) \
349 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
350
351 #define SK_WIN_CLRBIT_4(sc, reg, x) \
352 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
353
354 #define SK_WIN_SETBIT_2(sc, reg, x) \
355 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
356
357 #define SK_WIN_CLRBIT_2(sc, reg, x) \
358 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
359
360 static u_int32_t
sk_win_read_4(struct sk_softc * sc,int reg)361 sk_win_read_4(struct sk_softc *sc, int reg)
362 {
363 #ifdef SK_USEIOSPACE
364 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
365 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
366 #else
367 return(CSR_READ_4(sc, reg));
368 #endif
369 }
370
371 static u_int16_t
sk_win_read_2(struct sk_softc * sc,int reg)372 sk_win_read_2(struct sk_softc *sc, int reg)
373 {
374 #ifdef SK_USEIOSPACE
375 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
376 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
377 #else
378 return(CSR_READ_2(sc, reg));
379 #endif
380 }
381
382 static u_int8_t
sk_win_read_1(struct sk_softc * sc,int reg)383 sk_win_read_1(struct sk_softc *sc, int reg)
384 {
385 #ifdef SK_USEIOSPACE
386 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
387 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
388 #else
389 return(CSR_READ_1(sc, reg));
390 #endif
391 }
392
393 static void
sk_win_write_4(struct sk_softc * sc,int reg,u_int32_t val)394 sk_win_write_4(struct sk_softc *sc, int reg, u_int32_t val)
395 {
396 #ifdef SK_USEIOSPACE
397 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
398 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
399 #else
400 CSR_WRITE_4(sc, reg, val);
401 #endif
402 return;
403 }
404
405 static void
sk_win_write_2(struct sk_softc * sc,int reg,u_int32_t val)406 sk_win_write_2(struct sk_softc *sc, int reg, u_int32_t val)
407 {
408 #ifdef SK_USEIOSPACE
409 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
410 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
411 #else
412 CSR_WRITE_2(sc, reg, val);
413 #endif
414 return;
415 }
416
417 static void
sk_win_write_1(struct sk_softc * sc,int reg,u_int32_t val)418 sk_win_write_1(struct sk_softc *sc, int reg, u_int32_t val)
419 {
420 #ifdef SK_USEIOSPACE
421 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
422 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
423 #else
424 CSR_WRITE_1(sc, reg, val);
425 #endif
426 return;
427 }
428
429 static int
sk_miibus_readreg(device_t dev,int phy,int reg)430 sk_miibus_readreg(device_t dev, int phy, int reg)
431 {
432 struct sk_if_softc *sc_if;
433 int v;
434
435 sc_if = device_get_softc(dev);
436
437 SK_IF_MII_LOCK(sc_if);
438 switch(sc_if->sk_softc->sk_type) {
439 case SK_GENESIS:
440 v = sk_xmac_miibus_readreg(sc_if, phy, reg);
441 break;
442 case SK_YUKON:
443 case SK_YUKON_LITE:
444 case SK_YUKON_LP:
445 v = sk_marv_miibus_readreg(sc_if, phy, reg);
446 break;
447 default:
448 v = 0;
449 break;
450 }
451 SK_IF_MII_UNLOCK(sc_if);
452
453 return (v);
454 }
455
456 static int
sk_miibus_writereg(device_t dev,int phy,int reg,int val)457 sk_miibus_writereg(device_t dev, int phy, int reg, int val)
458 {
459 struct sk_if_softc *sc_if;
460 int v;
461
462 sc_if = device_get_softc(dev);
463
464 SK_IF_MII_LOCK(sc_if);
465 switch(sc_if->sk_softc->sk_type) {
466 case SK_GENESIS:
467 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
468 break;
469 case SK_YUKON:
470 case SK_YUKON_LITE:
471 case SK_YUKON_LP:
472 v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
473 break;
474 default:
475 v = 0;
476 break;
477 }
478 SK_IF_MII_UNLOCK(sc_if);
479
480 return (v);
481 }
482
483 static void
sk_miibus_statchg(device_t dev)484 sk_miibus_statchg(device_t dev)
485 {
486 struct sk_if_softc *sc_if;
487
488 sc_if = device_get_softc(dev);
489
490 SK_IF_MII_LOCK(sc_if);
491 switch(sc_if->sk_softc->sk_type) {
492 case SK_GENESIS:
493 sk_xmac_miibus_statchg(sc_if);
494 break;
495 case SK_YUKON:
496 case SK_YUKON_LITE:
497 case SK_YUKON_LP:
498 sk_marv_miibus_statchg(sc_if);
499 break;
500 }
501 SK_IF_MII_UNLOCK(sc_if);
502
503 return;
504 }
505
506 static int
sk_xmac_miibus_readreg(struct sk_if_softc * sc_if,int phy,int reg)507 sk_xmac_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
508 {
509 int i;
510
511 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
512 SK_XM_READ_2(sc_if, XM_PHY_DATA);
513 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
514 for (i = 0; i < SK_TIMEOUT; i++) {
515 DELAY(1);
516 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
517 XM_MMUCMD_PHYDATARDY)
518 break;
519 }
520
521 if (i == SK_TIMEOUT) {
522 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
523 return(0);
524 }
525 }
526 DELAY(1);
527 i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
528
529 return(i);
530 }
531
532 static int
sk_xmac_miibus_writereg(struct sk_if_softc * sc_if,int phy,int reg,int val)533 sk_xmac_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
534 {
535 int i;
536
537 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
538 for (i = 0; i < SK_TIMEOUT; i++) {
539 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
540 break;
541 }
542
543 if (i == SK_TIMEOUT) {
544 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
545 return (ETIMEDOUT);
546 }
547
548 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
549 for (i = 0; i < SK_TIMEOUT; i++) {
550 DELAY(1);
551 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
552 break;
553 }
554 if (i == SK_TIMEOUT)
555 if_printf(sc_if->sk_ifp, "phy write timed out\n");
556
557 return(0);
558 }
559
560 static void
sk_xmac_miibus_statchg(struct sk_if_softc * sc_if)561 sk_xmac_miibus_statchg(struct sk_if_softc *sc_if)
562 {
563 struct mii_data *mii;
564
565 mii = device_get_softc(sc_if->sk_miibus);
566
567 /*
568 * If this is a GMII PHY, manually set the XMAC's
569 * duplex mode accordingly.
570 */
571 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
572 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
573 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
574 } else {
575 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
576 }
577 }
578 }
579
580 static int
sk_marv_miibus_readreg(struct sk_if_softc * sc_if,int phy,int reg)581 sk_marv_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
582 {
583 u_int16_t val;
584 int i;
585
586 if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
587 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) {
588 return(0);
589 }
590
591 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
592 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
593
594 for (i = 0; i < SK_TIMEOUT; i++) {
595 DELAY(1);
596 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
597 if (val & YU_SMICR_READ_VALID)
598 break;
599 }
600
601 if (i == SK_TIMEOUT) {
602 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
603 return(0);
604 }
605
606 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
607
608 return(val);
609 }
610
611 static int
sk_marv_miibus_writereg(struct sk_if_softc * sc_if,int phy,int reg,int val)612 sk_marv_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
613 {
614 int i;
615
616 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
617 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
618 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
619
620 for (i = 0; i < SK_TIMEOUT; i++) {
621 DELAY(1);
622 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
623 break;
624 }
625 if (i == SK_TIMEOUT)
626 if_printf(sc_if->sk_ifp, "phy write timeout\n");
627
628 return(0);
629 }
630
631 static void
sk_marv_miibus_statchg(struct sk_if_softc * sc_if)632 sk_marv_miibus_statchg(struct sk_if_softc *sc_if)
633 {
634 return;
635 }
636
637 #define HASH_BITS 6
638
639 static u_int32_t
sk_xmchash(const uint8_t * addr)640 sk_xmchash(const uint8_t *addr)
641 {
642 uint32_t crc;
643
644 /* Compute CRC for the address value. */
645 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
646
647 return (~crc & ((1 << HASH_BITS) - 1));
648 }
649
650 static void
sk_setfilt(struct sk_if_softc * sc_if,u_int16_t * addr,int slot)651 sk_setfilt(struct sk_if_softc *sc_if, u_int16_t *addr, int slot)
652 {
653 int base;
654
655 base = XM_RXFILT_ENTRY(slot);
656
657 SK_XM_WRITE_2(sc_if, base, addr[0]);
658 SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
659 SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
660
661 return;
662 }
663
664 static void
sk_rxfilter(struct sk_if_softc * sc_if)665 sk_rxfilter(struct sk_if_softc *sc_if)
666 {
667 struct sk_softc *sc;
668
669 SK_IF_LOCK_ASSERT(sc_if);
670
671 sc = sc_if->sk_softc;
672 if (sc->sk_type == SK_GENESIS)
673 sk_rxfilter_genesis(sc_if);
674 else
675 sk_rxfilter_yukon(sc_if);
676 }
677
678 struct sk_add_maddr_genesis_ctx {
679 struct sk_if_softc *sc_if;
680 uint32_t hashes[2];
681 uint32_t mode;
682 };
683
684 static u_int
sk_add_maddr_genesis(void * arg,struct sockaddr_dl * sdl,u_int cnt)685 sk_add_maddr_genesis(void *arg, struct sockaddr_dl *sdl, u_int cnt)
686 {
687 struct sk_add_maddr_genesis_ctx *ctx = arg;
688 int h;
689
690 /*
691 * Program the first XM_RXFILT_MAX multicast groups
692 * into the perfect filter.
693 */
694 if (cnt + 1 < XM_RXFILT_MAX) {
695 sk_setfilt(ctx->sc_if, (uint16_t *)LLADDR(sdl), cnt + 1);
696 ctx->mode |= XM_MODE_RX_USE_PERFECT;
697 return (1);
698 }
699 h = sk_xmchash((const uint8_t *)LLADDR(sdl));
700 if (h < 32)
701 ctx->hashes[0] |= (1 << h);
702 else
703 ctx->hashes[1] |= (1 << (h - 32));
704 ctx->mode |= XM_MODE_RX_USE_HASH;
705
706 return (1);
707 }
708
709 static void
sk_rxfilter_genesis(struct sk_if_softc * sc_if)710 sk_rxfilter_genesis(struct sk_if_softc *sc_if)
711 {
712 if_t ifp = sc_if->sk_ifp;
713 struct sk_add_maddr_genesis_ctx ctx = { sc_if, { 0, 0 } };
714 int i;
715 u_int16_t dummy[] = { 0, 0, 0 };
716
717 SK_IF_LOCK_ASSERT(sc_if);
718
719 ctx.mode = SK_XM_READ_4(sc_if, XM_MODE);
720 ctx.mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
721 XM_MODE_RX_USE_PERFECT);
722 /* First, zot all the existing perfect filters. */
723 for (i = 1; i < XM_RXFILT_MAX; i++)
724 sk_setfilt(sc_if, dummy, i);
725
726 /* Now program new ones. */
727 if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
728 if (if_getflags(ifp) & IFF_ALLMULTI)
729 ctx.mode |= XM_MODE_RX_USE_HASH;
730 if (if_getflags(ifp) & IFF_PROMISC)
731 ctx.mode |= XM_MODE_RX_PROMISC;
732 ctx.hashes[0] = 0xFFFFFFFF;
733 ctx.hashes[1] = 0xFFFFFFFF;
734 } else
735 /* XXX want to maintain reverse semantics */
736 if_foreach_llmaddr(ifp, sk_add_maddr_genesis, &ctx);
737
738 SK_XM_WRITE_4(sc_if, XM_MODE, ctx.mode);
739 SK_XM_WRITE_4(sc_if, XM_MAR0, ctx.hashes[0]);
740 SK_XM_WRITE_4(sc_if, XM_MAR2, ctx.hashes[1]);
741 }
742
743 static u_int
sk_hash_maddr_yukon(void * arg,struct sockaddr_dl * sdl,u_int cnt)744 sk_hash_maddr_yukon(void *arg, struct sockaddr_dl *sdl, u_int cnt)
745 {
746 uint32_t crc, *hashes = arg;
747
748 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
749 /* Just want the 6 least significant bits. */
750 crc &= 0x3f;
751 /* Set the corresponding bit in the hash table. */
752 hashes[crc >> 5] |= 1 << (crc & 0x1f);
753
754 return (1);
755 }
756
757 static void
sk_rxfilter_yukon(struct sk_if_softc * sc_if)758 sk_rxfilter_yukon(struct sk_if_softc *sc_if)
759 {
760 if_t ifp;
761 uint32_t hashes[2] = { 0, 0 }, mode;
762
763 SK_IF_LOCK_ASSERT(sc_if);
764
765 ifp = sc_if->sk_ifp;
766 mode = SK_YU_READ_2(sc_if, YUKON_RCR);
767 if (if_getflags(ifp) & IFF_PROMISC)
768 mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
769 else if (if_getflags(ifp) & IFF_ALLMULTI) {
770 mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN;
771 hashes[0] = 0xFFFFFFFF;
772 hashes[1] = 0xFFFFFFFF;
773 } else {
774 mode |= YU_RCR_UFLEN;
775 if_foreach_llmaddr(ifp, sk_hash_maddr_yukon, hashes);
776 if (hashes[0] != 0 || hashes[1] != 0)
777 mode |= YU_RCR_MUFLEN;
778 }
779
780 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
781 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
782 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
783 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
784 SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
785 }
786
787 static int
sk_init_rx_ring(struct sk_if_softc * sc_if)788 sk_init_rx_ring(struct sk_if_softc *sc_if)
789 {
790 struct sk_ring_data *rd;
791 bus_addr_t addr;
792 u_int32_t csum_start;
793 int i;
794
795 sc_if->sk_cdata.sk_rx_cons = 0;
796
797 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 |
798 ETHER_HDR_LEN;
799 rd = &sc_if->sk_rdata;
800 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
801 for (i = 0; i < SK_RX_RING_CNT; i++) {
802 if (sk_newbuf(sc_if, i) != 0)
803 return (ENOBUFS);
804 if (i == (SK_RX_RING_CNT - 1))
805 addr = SK_RX_RING_ADDR(sc_if, 0);
806 else
807 addr = SK_RX_RING_ADDR(sc_if, i + 1);
808 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
809 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
810 }
811
812 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
813 sc_if->sk_cdata.sk_rx_ring_map,
814 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
815
816 return(0);
817 }
818
819 static int
sk_init_jumbo_rx_ring(struct sk_if_softc * sc_if)820 sk_init_jumbo_rx_ring(struct sk_if_softc *sc_if)
821 {
822 struct sk_ring_data *rd;
823 bus_addr_t addr;
824 u_int32_t csum_start;
825 int i;
826
827 sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
828
829 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
830 ETHER_HDR_LEN;
831 rd = &sc_if->sk_rdata;
832 bzero(rd->sk_jumbo_rx_ring,
833 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
834 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
835 if (sk_jumbo_newbuf(sc_if, i) != 0)
836 return (ENOBUFS);
837 if (i == (SK_JUMBO_RX_RING_CNT - 1))
838 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
839 else
840 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
841 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
842 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
843 }
844
845 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
846 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
847 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
848
849 return (0);
850 }
851
852 static void
sk_init_tx_ring(struct sk_if_softc * sc_if)853 sk_init_tx_ring(struct sk_if_softc *sc_if)
854 {
855 struct sk_ring_data *rd;
856 struct sk_txdesc *txd;
857 bus_addr_t addr;
858 int i;
859
860 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
861 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
862
863 sc_if->sk_cdata.sk_tx_prod = 0;
864 sc_if->sk_cdata.sk_tx_cons = 0;
865 sc_if->sk_cdata.sk_tx_cnt = 0;
866
867 rd = &sc_if->sk_rdata;
868 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
869 for (i = 0; i < SK_TX_RING_CNT; i++) {
870 if (i == (SK_TX_RING_CNT - 1))
871 addr = SK_TX_RING_ADDR(sc_if, 0);
872 else
873 addr = SK_TX_RING_ADDR(sc_if, i + 1);
874 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
875 txd = &sc_if->sk_cdata.sk_txdesc[i];
876 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
877 }
878
879 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
880 sc_if->sk_cdata.sk_tx_ring_map,
881 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
882 }
883
884 static __inline void
sk_discard_rxbuf(struct sk_if_softc * sc_if,int idx)885 sk_discard_rxbuf(struct sk_if_softc *sc_if, int idx)
886 {
887 struct sk_rx_desc *r;
888 struct sk_rxdesc *rxd;
889 struct mbuf *m;
890
891 r = &sc_if->sk_rdata.sk_rx_ring[idx];
892 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
893 m = rxd->rx_m;
894 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
895 }
896
897 static __inline void
sk_discard_jumbo_rxbuf(struct sk_if_softc * sc_if,int idx)898 sk_discard_jumbo_rxbuf(struct sk_if_softc *sc_if, int idx)
899 {
900 struct sk_rx_desc *r;
901 struct sk_rxdesc *rxd;
902 struct mbuf *m;
903
904 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
905 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
906 m = rxd->rx_m;
907 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
908 }
909
910 static int
sk_newbuf(struct sk_if_softc * sc_if,int idx)911 sk_newbuf(struct sk_if_softc *sc_if, int idx)
912 {
913 struct sk_rx_desc *r;
914 struct sk_rxdesc *rxd;
915 struct mbuf *m;
916 bus_dma_segment_t segs[1];
917 bus_dmamap_t map;
918 int nsegs;
919
920 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
921 if (m == NULL)
922 return (ENOBUFS);
923 m->m_len = m->m_pkthdr.len = MCLBYTES;
924 m_adj(m, ETHER_ALIGN);
925
926 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
927 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
928 m_freem(m);
929 return (ENOBUFS);
930 }
931 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
932
933 rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
934 if (rxd->rx_m != NULL) {
935 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
936 BUS_DMASYNC_POSTREAD);
937 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
938 }
939 map = rxd->rx_dmamap;
940 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
941 sc_if->sk_cdata.sk_rx_sparemap = map;
942 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
943 BUS_DMASYNC_PREREAD);
944 rxd->rx_m = m;
945 r = &sc_if->sk_rdata.sk_rx_ring[idx];
946 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
947 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
948 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
949
950 return (0);
951 }
952
953 static int
sk_jumbo_newbuf(struct sk_if_softc * sc_if,int idx)954 sk_jumbo_newbuf(struct sk_if_softc *sc_if, int idx)
955 {
956 struct sk_rx_desc *r;
957 struct sk_rxdesc *rxd;
958 struct mbuf *m;
959 bus_dma_segment_t segs[1];
960 bus_dmamap_t map;
961 int nsegs;
962
963 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
964 if (m == NULL)
965 return (ENOBUFS);
966 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
967 /*
968 * Adjust alignment so packet payload begins on a
969 * longword boundary. Mandatory for Alpha, useful on
970 * x86 too.
971 */
972 m_adj(m, ETHER_ALIGN);
973
974 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
975 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
976 m_freem(m);
977 return (ENOBUFS);
978 }
979 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
980
981 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
982 if (rxd->rx_m != NULL) {
983 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
984 BUS_DMASYNC_POSTREAD);
985 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
986 rxd->rx_dmamap);
987 }
988 map = rxd->rx_dmamap;
989 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
990 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
991 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
992 BUS_DMASYNC_PREREAD);
993 rxd->rx_m = m;
994 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
995 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
996 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
997 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
998
999 return (0);
1000 }
1001
1002 /*
1003 * Set media options.
1004 */
1005 static int
sk_ifmedia_upd(if_t ifp)1006 sk_ifmedia_upd(if_t ifp)
1007 {
1008 struct sk_if_softc *sc_if = if_getsoftc(ifp);
1009 struct mii_data *mii;
1010
1011 mii = device_get_softc(sc_if->sk_miibus);
1012 sk_init(sc_if);
1013 mii_mediachg(mii);
1014
1015 return(0);
1016 }
1017
1018 /*
1019 * Report current media status.
1020 */
1021 static void
sk_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)1022 sk_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1023 {
1024 struct sk_if_softc *sc_if;
1025 struct mii_data *mii;
1026
1027 sc_if = if_getsoftc(ifp);
1028 mii = device_get_softc(sc_if->sk_miibus);
1029
1030 mii_pollstat(mii);
1031 ifmr->ifm_active = mii->mii_media_active;
1032 ifmr->ifm_status = mii->mii_media_status;
1033
1034 return;
1035 }
1036
1037 static int
sk_ioctl(if_t ifp,u_long command,caddr_t data)1038 sk_ioctl(if_t ifp, u_long command, caddr_t data)
1039 {
1040 struct sk_if_softc *sc_if = if_getsoftc(ifp);
1041 struct ifreq *ifr = (struct ifreq *) data;
1042 int error, mask;
1043 struct mii_data *mii;
1044
1045 error = 0;
1046 switch(command) {
1047 case SIOCSIFMTU:
1048 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
1049 error = EINVAL;
1050 else if (if_getmtu(ifp) != ifr->ifr_mtu) {
1051 if (sc_if->sk_jumbo_disable != 0 &&
1052 ifr->ifr_mtu > SK_MAX_FRAMELEN)
1053 error = EINVAL;
1054 else {
1055 SK_IF_LOCK(sc_if);
1056 if_setmtu(ifp, ifr->ifr_mtu);
1057 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1058 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1059 sk_init_locked(sc_if);
1060 }
1061 SK_IF_UNLOCK(sc_if);
1062 }
1063 }
1064 break;
1065 case SIOCSIFFLAGS:
1066 SK_IF_LOCK(sc_if);
1067 if (if_getflags(ifp) & IFF_UP) {
1068 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1069 if ((if_getflags(ifp) ^ sc_if->sk_if_flags)
1070 & (IFF_PROMISC | IFF_ALLMULTI))
1071 sk_rxfilter(sc_if);
1072 } else
1073 sk_init_locked(sc_if);
1074 } else {
1075 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1076 sk_stop(sc_if);
1077 }
1078 sc_if->sk_if_flags = if_getflags(ifp);
1079 SK_IF_UNLOCK(sc_if);
1080 break;
1081 case SIOCADDMULTI:
1082 case SIOCDELMULTI:
1083 SK_IF_LOCK(sc_if);
1084 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1085 sk_rxfilter(sc_if);
1086 SK_IF_UNLOCK(sc_if);
1087 break;
1088 case SIOCGIFMEDIA:
1089 case SIOCSIFMEDIA:
1090 mii = device_get_softc(sc_if->sk_miibus);
1091 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1092 break;
1093 case SIOCSIFCAP:
1094 SK_IF_LOCK(sc_if);
1095 if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1096 SK_IF_UNLOCK(sc_if);
1097 break;
1098 }
1099 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1100 if ((mask & IFCAP_TXCSUM) != 0 &&
1101 (IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
1102 if_togglecapenable(ifp, IFCAP_TXCSUM);
1103 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1104 if_sethwassistbits(ifp, SK_CSUM_FEATURES, 0);
1105 else
1106 if_sethwassistbits(ifp, 0, SK_CSUM_FEATURES);
1107 }
1108 if ((mask & IFCAP_RXCSUM) != 0 &&
1109 (IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0)
1110 if_togglecapenable(ifp, IFCAP_RXCSUM);
1111 SK_IF_UNLOCK(sc_if);
1112 break;
1113 default:
1114 error = ether_ioctl(ifp, command, data);
1115 break;
1116 }
1117
1118 return (error);
1119 }
1120
1121 /*
1122 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1123 * IDs against our list and return a device name if we find a match.
1124 */
1125 static int
skc_probe(device_t dev)1126 skc_probe(device_t dev)
1127 {
1128 const struct sk_type *t = sk_devs;
1129
1130 while(t->sk_name != NULL) {
1131 if ((pci_get_vendor(dev) == t->sk_vid) &&
1132 (pci_get_device(dev) == t->sk_did)) {
1133 /*
1134 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1135 * Rev. 3 is supported by re(4).
1136 */
1137 if ((t->sk_vid == VENDORID_LINKSYS) &&
1138 (t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1139 (pci_get_subdevice(dev) !=
1140 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1141 t++;
1142 continue;
1143 }
1144 device_set_desc(dev, t->sk_name);
1145 return (BUS_PROBE_DEFAULT);
1146 }
1147 t++;
1148 }
1149
1150 return(ENXIO);
1151 }
1152
1153 /*
1154 * Force the GEnesis into reset, then bring it out of reset.
1155 */
1156 static void
sk_reset(struct sk_softc * sc)1157 sk_reset(struct sk_softc *sc)
1158 {
1159
1160 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1161 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1162 if (SK_YUKON_FAMILY(sc->sk_type))
1163 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1164
1165 DELAY(1000);
1166 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1167 DELAY(2);
1168 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1169 if (SK_YUKON_FAMILY(sc->sk_type))
1170 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1171
1172 if (sc->sk_type == SK_GENESIS) {
1173 /* Configure packet arbiter */
1174 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1175 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1176 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1177 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1178 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1179 }
1180
1181 /* Enable RAM interface */
1182 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1183
1184 /*
1185 * Configure interrupt moderation. The moderation timer
1186 * defers interrupts specified in the interrupt moderation
1187 * timer mask based on the timeout specified in the interrupt
1188 * moderation timer init register. Each bit in the timer
1189 * register represents one tick, so to specify a timeout in
1190 * microseconds, we have to multiply by the correct number of
1191 * ticks-per-microsecond.
1192 */
1193 switch (sc->sk_type) {
1194 case SK_GENESIS:
1195 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1196 break;
1197 default:
1198 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1199 break;
1200 }
1201 if (bootverbose)
1202 device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1203 sc->sk_int_mod);
1204 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1205 sc->sk_int_ticks));
1206 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1207 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1208 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1209
1210 return;
1211 }
1212
1213 static int
sk_probe(device_t dev)1214 sk_probe(device_t dev)
1215 {
1216 struct sk_softc *sc;
1217
1218 sc = device_get_softc(device_get_parent(dev));
1219
1220 /*
1221 * Not much to do here. We always know there will be
1222 * at least one XMAC present, and if there are two,
1223 * skc_attach() will create a second device instance
1224 * for us.
1225 */
1226 switch (sc->sk_type) {
1227 case SK_GENESIS:
1228 device_set_desc(dev, "XaQti Corp. XMAC II");
1229 break;
1230 case SK_YUKON:
1231 case SK_YUKON_LITE:
1232 case SK_YUKON_LP:
1233 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1234 break;
1235 }
1236
1237 return (BUS_PROBE_DEFAULT);
1238 }
1239
1240 /*
1241 * Each XMAC chip is attached as a separate logical IP interface.
1242 * Single port cards will have only one logical interface of course.
1243 */
1244 static int
sk_attach(device_t dev)1245 sk_attach(device_t dev)
1246 {
1247 struct sk_softc *sc;
1248 struct sk_if_softc *sc_if;
1249 if_t ifp;
1250 u_int32_t r;
1251 int error, i, phy, port;
1252 u_char eaddr[6];
1253 u_char inv_mac[] = {0, 0, 0, 0, 0, 0};
1254
1255 if (dev == NULL)
1256 return(EINVAL);
1257
1258 error = 0;
1259 sc_if = device_get_softc(dev);
1260 sc = device_get_softc(device_get_parent(dev));
1261 port = *(int *)device_get_ivars(dev);
1262
1263 sc_if->sk_if_dev = dev;
1264 sc_if->sk_port = port;
1265 sc_if->sk_softc = sc;
1266 sc->sk_if[port] = sc_if;
1267 if (port == SK_PORT_A)
1268 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1269 if (port == SK_PORT_B)
1270 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1271
1272 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1273 callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1274
1275 if (sk_dma_alloc(sc_if) != 0) {
1276 error = ENOMEM;
1277 goto fail;
1278 }
1279 sk_dma_jumbo_alloc(sc_if);
1280
1281 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1282 if (ifp == NULL) {
1283 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1284 error = ENOSPC;
1285 goto fail;
1286 }
1287 if_setsoftc(ifp, sc_if);
1288 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1289 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1290 /*
1291 * SK_GENESIS has a bug in checksum offload - From linux.
1292 */
1293 if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1294 if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_RXCSUM);
1295 if_sethwassist(ifp, 0);
1296 } else {
1297 if_setcapabilities(ifp, 0);
1298 if_sethwassist(ifp, 0);
1299 }
1300 if_setcapenable(ifp, if_getcapabilities(ifp));
1301 /*
1302 * Some revision of Yukon controller generates corrupted
1303 * frame when TX checksum offloading is enabled. The
1304 * frame has a valid checksum value so payload might be
1305 * modified during TX checksum calculation. Disable TX
1306 * checksum offloading but give users chance to enable it
1307 * when they know their controller works without problems
1308 * with TX checksum offloading.
1309 */
1310 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
1311 if_setioctlfn(ifp, sk_ioctl);
1312 if_setstartfn(ifp, sk_start);
1313 if_setinitfn(ifp, sk_init);
1314 if_setsendqlen(ifp, SK_TX_RING_CNT - 1);
1315 if_setsendqready(ifp);
1316
1317 /*
1318 * Get station address for this interface. Note that
1319 * dual port cards actually come with three station
1320 * addresses: one for each port, plus an extra. The
1321 * extra one is used by the SysKonnect driver software
1322 * as a 'virtual' station address for when both ports
1323 * are operating in failover mode. Currently we don't
1324 * use this extra address.
1325 */
1326 SK_IF_LOCK(sc_if);
1327 for (i = 0; i < ETHER_ADDR_LEN; i++)
1328 eaddr[i] =
1329 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1330
1331 /* Verify whether the station address is invalid or not. */
1332 if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) {
1333 device_printf(sc_if->sk_if_dev,
1334 "Generating random ethernet address\n");
1335 r = arc4random();
1336 /*
1337 * Set OUI to convenient locally assigned address. 'b'
1338 * is 0x62, which has the locally assigned bit set, and
1339 * the broadcast/multicast bit clear.
1340 */
1341 eaddr[0] = 'b';
1342 eaddr[1] = 's';
1343 eaddr[2] = 'd';
1344 eaddr[3] = (r >> 16) & 0xff;
1345 eaddr[4] = (r >> 8) & 0xff;
1346 eaddr[5] = (r >> 0) & 0xff;
1347 }
1348 /*
1349 * Set up RAM buffer addresses. The NIC will have a certain
1350 * amount of SRAM on it, somewhere between 512K and 2MB. We
1351 * need to divide this up a) between the transmitter and
1352 * receiver and b) between the two XMACs, if this is a
1353 * dual port NIC. Our algotithm is to divide up the memory
1354 * evenly so that everyone gets a fair share.
1355 *
1356 * Just to be contrary, Yukon2 appears to have separate memory
1357 * for each MAC.
1358 */
1359 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1360 u_int32_t chunk, val;
1361
1362 chunk = sc->sk_ramsize / 2;
1363 val = sc->sk_rboff / sizeof(u_int64_t);
1364 sc_if->sk_rx_ramstart = val;
1365 val += (chunk / sizeof(u_int64_t));
1366 sc_if->sk_rx_ramend = val - 1;
1367 sc_if->sk_tx_ramstart = val;
1368 val += (chunk / sizeof(u_int64_t));
1369 sc_if->sk_tx_ramend = val - 1;
1370 } else {
1371 u_int32_t chunk, val;
1372
1373 chunk = sc->sk_ramsize / 4;
1374 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1375 sizeof(u_int64_t);
1376 sc_if->sk_rx_ramstart = val;
1377 val += (chunk / sizeof(u_int64_t));
1378 sc_if->sk_rx_ramend = val - 1;
1379 sc_if->sk_tx_ramstart = val;
1380 val += (chunk / sizeof(u_int64_t));
1381 sc_if->sk_tx_ramend = val - 1;
1382 }
1383
1384 /* Read and save PHY type and set PHY address */
1385 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1386 if (!SK_YUKON_FAMILY(sc->sk_type)) {
1387 switch(sc_if->sk_phytype) {
1388 case SK_PHYTYPE_XMAC:
1389 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1390 break;
1391 case SK_PHYTYPE_BCOM:
1392 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1393 break;
1394 default:
1395 device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1396 sc_if->sk_phytype);
1397 error = ENODEV;
1398 SK_IF_UNLOCK(sc_if);
1399 goto fail;
1400 }
1401 } else {
1402 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1403 sc->sk_pmd != 'S') {
1404 /* not initialized, punt */
1405 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1406 sc->sk_coppertype = 1;
1407 }
1408
1409 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1410
1411 if (!(sc->sk_coppertype))
1412 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1413 }
1414
1415 /*
1416 * Call MI attach routine. Can't hold locks when calling into ether_*.
1417 */
1418 SK_IF_UNLOCK(sc_if);
1419 ether_ifattach(ifp, eaddr);
1420 SK_IF_LOCK(sc_if);
1421
1422 /*
1423 * The hardware should be ready for VLAN_MTU by default:
1424 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1425 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1426 *
1427 */
1428 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
1429 if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
1430 /*
1431 * Tell the upper layer(s) we support long frames.
1432 * Must appear after the call to ether_ifattach() because
1433 * ether_ifattach() sets ifi_hdrlen to the default value.
1434 */
1435 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
1436
1437 /*
1438 * Do miibus setup.
1439 */
1440 phy = MII_PHY_ANY;
1441 switch (sc->sk_type) {
1442 case SK_GENESIS:
1443 sk_init_xmac(sc_if);
1444 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
1445 phy = 0;
1446 break;
1447 case SK_YUKON:
1448 case SK_YUKON_LITE:
1449 case SK_YUKON_LP:
1450 sk_init_yukon(sc_if);
1451 phy = 0;
1452 break;
1453 }
1454
1455 SK_IF_UNLOCK(sc_if);
1456 error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd,
1457 sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
1458 if (error != 0) {
1459 device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n");
1460 ether_ifdetach(ifp);
1461 goto fail;
1462 }
1463
1464 fail:
1465 if (error) {
1466 /* Access should be ok even though lock has been dropped */
1467 sc->sk_if[port] = NULL;
1468 sk_detach(dev);
1469 }
1470
1471 return(error);
1472 }
1473
1474 /*
1475 * Attach the interface. Allocate softc structures, do ifmedia
1476 * setup and ethernet/BPF attach.
1477 */
1478 static int
skc_attach(device_t dev)1479 skc_attach(device_t dev)
1480 {
1481 struct sk_softc *sc;
1482 int error = 0, *port;
1483 uint8_t skrs;
1484 const char *pname = NULL;
1485 char *revstr;
1486
1487 sc = device_get_softc(dev);
1488 sc->sk_dev = dev;
1489
1490 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1491 MTX_DEF);
1492 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1493 /*
1494 * Map control/status registers.
1495 */
1496 pci_enable_busmaster(dev);
1497
1498 /* Allocate resources */
1499 #ifdef SK_USEIOSPACE
1500 sc->sk_res_spec = sk_res_spec_io;
1501 #else
1502 sc->sk_res_spec = sk_res_spec_mem;
1503 #endif
1504 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1505 if (error) {
1506 if (sc->sk_res_spec == sk_res_spec_mem)
1507 sc->sk_res_spec = sk_res_spec_io;
1508 else
1509 sc->sk_res_spec = sk_res_spec_mem;
1510 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1511 if (error) {
1512 device_printf(dev, "couldn't allocate %s resources\n",
1513 sc->sk_res_spec == sk_res_spec_mem ? "memory" :
1514 "I/O");
1515 goto fail;
1516 }
1517 }
1518
1519 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1520 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1521
1522 /* Bail out if chip is not recognized. */
1523 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1524 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1525 sc->sk_type, sc->sk_rev);
1526 error = ENXIO;
1527 goto fail;
1528 }
1529
1530 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1531 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1532 OID_AUTO, "int_mod",
1533 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1534 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1535 "SK interrupt moderation");
1536
1537 /* Pull in device tunables. */
1538 sc->sk_int_mod = SK_IM_DEFAULT;
1539 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1540 "int_mod", &sc->sk_int_mod);
1541 if (error == 0) {
1542 if (sc->sk_int_mod < SK_IM_MIN ||
1543 sc->sk_int_mod > SK_IM_MAX) {
1544 device_printf(dev, "int_mod value out of range; "
1545 "using default: %d\n", SK_IM_DEFAULT);
1546 sc->sk_int_mod = SK_IM_DEFAULT;
1547 }
1548 }
1549
1550 /* Reset the adapter. */
1551 sk_reset(sc);
1552
1553 skrs = sk_win_read_1(sc, SK_EPROM0);
1554 if (sc->sk_type == SK_GENESIS) {
1555 /* Read and save RAM size and RAMbuffer offset */
1556 switch(skrs) {
1557 case SK_RAMSIZE_512K_64:
1558 sc->sk_ramsize = 0x80000;
1559 sc->sk_rboff = SK_RBOFF_0;
1560 break;
1561 case SK_RAMSIZE_1024K_64:
1562 sc->sk_ramsize = 0x100000;
1563 sc->sk_rboff = SK_RBOFF_80000;
1564 break;
1565 case SK_RAMSIZE_1024K_128:
1566 sc->sk_ramsize = 0x100000;
1567 sc->sk_rboff = SK_RBOFF_0;
1568 break;
1569 case SK_RAMSIZE_2048K_128:
1570 sc->sk_ramsize = 0x200000;
1571 sc->sk_rboff = SK_RBOFF_0;
1572 break;
1573 default:
1574 device_printf(dev, "unknown ram size: %d\n", skrs);
1575 error = ENXIO;
1576 goto fail;
1577 }
1578 } else { /* SK_YUKON_FAMILY */
1579 if (skrs == 0x00)
1580 sc->sk_ramsize = 0x20000;
1581 else
1582 sc->sk_ramsize = skrs * (1<<12);
1583 sc->sk_rboff = SK_RBOFF_0;
1584 }
1585
1586 /* Read and save physical media type */
1587 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1588
1589 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1590 sc->sk_coppertype = 1;
1591 else
1592 sc->sk_coppertype = 0;
1593
1594 /* Determine whether to name it with VPD PN or just make it up.
1595 * Marvell Yukon VPD PN seems to freqently be bogus. */
1596 switch (pci_get_device(dev)) {
1597 case DEVICEID_SK_V1:
1598 case DEVICEID_BELKIN_5005:
1599 case DEVICEID_3COM_3C940:
1600 case DEVICEID_LINKSYS_EG1032:
1601 case DEVICEID_DLINK_DGE530T_A1:
1602 case DEVICEID_DLINK_DGE530T_B1:
1603 /* Stay with VPD PN. */
1604 (void) pci_get_vpd_ident(dev, &pname);
1605 break;
1606 case DEVICEID_SK_V2:
1607 /* YUKON VPD PN might bear no resemblance to reality. */
1608 switch (sc->sk_type) {
1609 case SK_GENESIS:
1610 /* Stay with VPD PN. */
1611 (void) pci_get_vpd_ident(dev, &pname);
1612 break;
1613 case SK_YUKON:
1614 pname = "Marvell Yukon Gigabit Ethernet";
1615 break;
1616 case SK_YUKON_LITE:
1617 pname = "Marvell Yukon Lite Gigabit Ethernet";
1618 break;
1619 case SK_YUKON_LP:
1620 pname = "Marvell Yukon LP Gigabit Ethernet";
1621 break;
1622 default:
1623 pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1624 break;
1625 }
1626
1627 /* Yukon Lite Rev. A0 needs special test. */
1628 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1629 u_int32_t far;
1630 u_int8_t testbyte;
1631
1632 /* Save flash address register before testing. */
1633 far = sk_win_read_4(sc, SK_EP_ADDR);
1634
1635 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1636 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1637
1638 if (testbyte != 0x00) {
1639 /* Yukon Lite Rev. A0 detected. */
1640 sc->sk_type = SK_YUKON_LITE;
1641 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1642 /* Restore flash address register. */
1643 sk_win_write_4(sc, SK_EP_ADDR, far);
1644 }
1645 }
1646 break;
1647 default:
1648 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1649 "chipver=%02x, rev=%x\n",
1650 pci_get_vendor(dev), pci_get_device(dev),
1651 sc->sk_type, sc->sk_rev);
1652 error = ENXIO;
1653 goto fail;
1654 }
1655
1656 if (sc->sk_type == SK_YUKON_LITE) {
1657 switch (sc->sk_rev) {
1658 case SK_YUKON_LITE_REV_A0:
1659 revstr = "A0";
1660 break;
1661 case SK_YUKON_LITE_REV_A1:
1662 revstr = "A1";
1663 break;
1664 case SK_YUKON_LITE_REV_A3:
1665 revstr = "A3";
1666 break;
1667 default:
1668 revstr = "";
1669 break;
1670 }
1671 } else {
1672 revstr = "";
1673 }
1674
1675 /* Announce the product name and more VPD data if there. */
1676 if (pname != NULL)
1677 device_printf(dev, "%s rev. %s(0x%x)\n",
1678 pname, revstr, sc->sk_rev);
1679
1680 if (bootverbose) {
1681 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type);
1682 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev);
1683 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1684 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1685 }
1686
1687 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1688 if (sc->sk_devs[SK_PORT_A] == NULL) {
1689 device_printf(dev, "failed to add child for PORT_A\n");
1690 error = ENXIO;
1691 goto fail;
1692 }
1693 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1694 if (port == NULL) {
1695 device_printf(dev, "failed to allocate memory for "
1696 "ivars of PORT_A\n");
1697 error = ENXIO;
1698 goto fail;
1699 }
1700 *port = SK_PORT_A;
1701 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1702
1703 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1704 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1705 if (sc->sk_devs[SK_PORT_B] == NULL) {
1706 device_printf(dev, "failed to add child for PORT_B\n");
1707 error = ENXIO;
1708 goto fail;
1709 }
1710 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1711 if (port == NULL) {
1712 device_printf(dev, "failed to allocate memory for "
1713 "ivars of PORT_B\n");
1714 error = ENXIO;
1715 goto fail;
1716 }
1717 *port = SK_PORT_B;
1718 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1719 }
1720
1721 /* Turn on the 'driver is loaded' LED. */
1722 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1723
1724 error = bus_generic_attach(dev);
1725 if (error) {
1726 device_printf(dev, "failed to attach port(s)\n");
1727 goto fail;
1728 }
1729
1730 /* Hook interrupt last to avoid having to lock softc */
1731 error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
1732 NULL, sk_intr, sc, &sc->sk_intrhand);
1733
1734 if (error) {
1735 device_printf(dev, "couldn't set up irq\n");
1736 goto fail;
1737 }
1738
1739 fail:
1740 if (error)
1741 skc_detach(dev);
1742
1743 return(error);
1744 }
1745
1746 /*
1747 * Shutdown hardware and free up resources. This can be called any
1748 * time after the mutex has been initialized. It is called in both
1749 * the error case in attach and the normal detach case so it needs
1750 * to be careful about only freeing resources that have actually been
1751 * allocated.
1752 */
1753 static int
sk_detach(device_t dev)1754 sk_detach(device_t dev)
1755 {
1756 struct sk_if_softc *sc_if;
1757 if_t ifp;
1758
1759 sc_if = device_get_softc(dev);
1760 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1761 ("sk mutex not initialized in sk_detach"));
1762 SK_IF_LOCK(sc_if);
1763
1764 ifp = sc_if->sk_ifp;
1765 /* These should only be active if attach_xmac succeeded */
1766 if (device_is_attached(dev)) {
1767 sk_stop(sc_if);
1768 /* Can't hold locks while calling detach */
1769 SK_IF_UNLOCK(sc_if);
1770 callout_drain(&sc_if->sk_tick_ch);
1771 callout_drain(&sc_if->sk_watchdog_ch);
1772 ether_ifdetach(ifp);
1773 SK_IF_LOCK(sc_if);
1774 }
1775 /*
1776 * We're generally called from skc_detach() which is using
1777 * device_delete_child() to get to here. It's already trashed
1778 * miibus for us, so don't do it here or we'll panic.
1779 */
1780 /*
1781 if (sc_if->sk_miibus != NULL)
1782 device_delete_child(dev, sc_if->sk_miibus);
1783 */
1784 bus_generic_detach(dev);
1785 sk_dma_jumbo_free(sc_if);
1786 sk_dma_free(sc_if);
1787 SK_IF_UNLOCK(sc_if);
1788 if (ifp)
1789 if_free(ifp);
1790
1791 return(0);
1792 }
1793
1794 static int
skc_detach(device_t dev)1795 skc_detach(device_t dev)
1796 {
1797 struct sk_softc *sc;
1798
1799 sc = device_get_softc(dev);
1800 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1801
1802 if (device_is_alive(dev)) {
1803 if (sc->sk_devs[SK_PORT_A] != NULL) {
1804 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1805 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1806 }
1807 if (sc->sk_devs[SK_PORT_B] != NULL) {
1808 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1809 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1810 }
1811 bus_generic_detach(dev);
1812 }
1813
1814 if (sc->sk_intrhand)
1815 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
1816 bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
1817
1818 mtx_destroy(&sc->sk_mii_mtx);
1819 mtx_destroy(&sc->sk_mtx);
1820
1821 return(0);
1822 }
1823
1824 static bus_dma_tag_t
skc_get_dma_tag(device_t bus,device_t child __unused)1825 skc_get_dma_tag(device_t bus, device_t child __unused)
1826 {
1827
1828 return (bus_get_dma_tag(bus));
1829 }
1830
1831 struct sk_dmamap_arg {
1832 bus_addr_t sk_busaddr;
1833 };
1834
1835 static void
sk_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)1836 sk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1837 {
1838 struct sk_dmamap_arg *ctx;
1839
1840 if (error != 0)
1841 return;
1842
1843 ctx = arg;
1844 ctx->sk_busaddr = segs[0].ds_addr;
1845 }
1846
1847 /*
1848 * Allocate jumbo buffer storage. The SysKonnect adapters support
1849 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1850 * use them in their drivers. In order for us to use them, we need
1851 * large 9K receive buffers, however standard mbuf clusters are only
1852 * 2048 bytes in size. Consequently, we need to allocate and manage
1853 * our own jumbo buffer pool. Fortunately, this does not require an
1854 * excessive amount of additional code.
1855 */
1856 static int
sk_dma_alloc(struct sk_if_softc * sc_if)1857 sk_dma_alloc(struct sk_if_softc *sc_if)
1858 {
1859 struct sk_dmamap_arg ctx;
1860 struct sk_txdesc *txd;
1861 struct sk_rxdesc *rxd;
1862 int error, i;
1863
1864 /* create parent tag */
1865 /*
1866 * XXX
1867 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1868 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1869 * However bz@ reported that it does not work on amd64 with > 4GB
1870 * RAM. Until we have more clues of the breakage, disable DAC mode
1871 * by limiting DMA address to be in 32bit address space.
1872 */
1873 error = bus_dma_tag_create(
1874 bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1875 1, 0, /* algnmnt, boundary */
1876 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1877 BUS_SPACE_MAXADDR, /* highaddr */
1878 NULL, NULL, /* filter, filterarg */
1879 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1880 0, /* nsegments */
1881 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1882 0, /* flags */
1883 NULL, NULL, /* lockfunc, lockarg */
1884 &sc_if->sk_cdata.sk_parent_tag);
1885 if (error != 0) {
1886 device_printf(sc_if->sk_if_dev,
1887 "failed to create parent DMA tag\n");
1888 goto fail;
1889 }
1890
1891 /* create tag for Tx ring */
1892 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1893 SK_RING_ALIGN, 0, /* algnmnt, boundary */
1894 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1895 BUS_SPACE_MAXADDR, /* highaddr */
1896 NULL, NULL, /* filter, filterarg */
1897 SK_TX_RING_SZ, /* maxsize */
1898 1, /* nsegments */
1899 SK_TX_RING_SZ, /* maxsegsize */
1900 0, /* flags */
1901 NULL, NULL, /* lockfunc, lockarg */
1902 &sc_if->sk_cdata.sk_tx_ring_tag);
1903 if (error != 0) {
1904 device_printf(sc_if->sk_if_dev,
1905 "failed to allocate Tx ring DMA tag\n");
1906 goto fail;
1907 }
1908
1909 /* create tag for Rx ring */
1910 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1911 SK_RING_ALIGN, 0, /* algnmnt, boundary */
1912 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1913 BUS_SPACE_MAXADDR, /* highaddr */
1914 NULL, NULL, /* filter, filterarg */
1915 SK_RX_RING_SZ, /* maxsize */
1916 1, /* nsegments */
1917 SK_RX_RING_SZ, /* maxsegsize */
1918 0, /* flags */
1919 NULL, NULL, /* lockfunc, lockarg */
1920 &sc_if->sk_cdata.sk_rx_ring_tag);
1921 if (error != 0) {
1922 device_printf(sc_if->sk_if_dev,
1923 "failed to allocate Rx ring DMA tag\n");
1924 goto fail;
1925 }
1926
1927 /* create tag for Tx buffers */
1928 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1929 1, 0, /* algnmnt, boundary */
1930 BUS_SPACE_MAXADDR, /* lowaddr */
1931 BUS_SPACE_MAXADDR, /* highaddr */
1932 NULL, NULL, /* filter, filterarg */
1933 MCLBYTES * SK_MAXTXSEGS, /* maxsize */
1934 SK_MAXTXSEGS, /* nsegments */
1935 MCLBYTES, /* maxsegsize */
1936 0, /* flags */
1937 NULL, NULL, /* lockfunc, lockarg */
1938 &sc_if->sk_cdata.sk_tx_tag);
1939 if (error != 0) {
1940 device_printf(sc_if->sk_if_dev,
1941 "failed to allocate Tx DMA tag\n");
1942 goto fail;
1943 }
1944
1945 /* create tag for Rx buffers */
1946 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1947 1, 0, /* algnmnt, boundary */
1948 BUS_SPACE_MAXADDR, /* lowaddr */
1949 BUS_SPACE_MAXADDR, /* highaddr */
1950 NULL, NULL, /* filter, filterarg */
1951 MCLBYTES, /* maxsize */
1952 1, /* nsegments */
1953 MCLBYTES, /* maxsegsize */
1954 0, /* flags */
1955 NULL, NULL, /* lockfunc, lockarg */
1956 &sc_if->sk_cdata.sk_rx_tag);
1957 if (error != 0) {
1958 device_printf(sc_if->sk_if_dev,
1959 "failed to allocate Rx DMA tag\n");
1960 goto fail;
1961 }
1962
1963 /* allocate DMA'able memory and load the DMA map for Tx ring */
1964 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
1965 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT |
1966 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map);
1967 if (error != 0) {
1968 device_printf(sc_if->sk_if_dev,
1969 "failed to allocate DMA'able memory for Tx ring\n");
1970 goto fail;
1971 }
1972
1973 ctx.sk_busaddr = 0;
1974 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
1975 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
1976 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1977 if (error != 0) {
1978 device_printf(sc_if->sk_if_dev,
1979 "failed to load DMA'able memory for Tx ring\n");
1980 goto fail;
1981 }
1982 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
1983
1984 /* allocate DMA'able memory and load the DMA map for Rx ring */
1985 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
1986 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT |
1987 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map);
1988 if (error != 0) {
1989 device_printf(sc_if->sk_if_dev,
1990 "failed to allocate DMA'able memory for Rx ring\n");
1991 goto fail;
1992 }
1993
1994 ctx.sk_busaddr = 0;
1995 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
1996 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
1997 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1998 if (error != 0) {
1999 device_printf(sc_if->sk_if_dev,
2000 "failed to load DMA'able memory for Rx ring\n");
2001 goto fail;
2002 }
2003 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2004
2005 /* create DMA maps for Tx buffers */
2006 for (i = 0; i < SK_TX_RING_CNT; i++) {
2007 txd = &sc_if->sk_cdata.sk_txdesc[i];
2008 txd->tx_m = NULL;
2009 txd->tx_dmamap = NULL;
2010 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2011 &txd->tx_dmamap);
2012 if (error != 0) {
2013 device_printf(sc_if->sk_if_dev,
2014 "failed to create Tx dmamap\n");
2015 goto fail;
2016 }
2017 }
2018
2019 /* create DMA maps for Rx buffers */
2020 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2021 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2022 device_printf(sc_if->sk_if_dev,
2023 "failed to create spare Rx dmamap\n");
2024 goto fail;
2025 }
2026 for (i = 0; i < SK_RX_RING_CNT; i++) {
2027 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2028 rxd->rx_m = NULL;
2029 rxd->rx_dmamap = NULL;
2030 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2031 &rxd->rx_dmamap);
2032 if (error != 0) {
2033 device_printf(sc_if->sk_if_dev,
2034 "failed to create Rx dmamap\n");
2035 goto fail;
2036 }
2037 }
2038
2039 fail:
2040 return (error);
2041 }
2042
2043 static int
sk_dma_jumbo_alloc(struct sk_if_softc * sc_if)2044 sk_dma_jumbo_alloc(struct sk_if_softc *sc_if)
2045 {
2046 struct sk_dmamap_arg ctx;
2047 struct sk_rxdesc *jrxd;
2048 int error, i;
2049
2050 if (jumbo_disable != 0) {
2051 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
2052 sc_if->sk_jumbo_disable = 1;
2053 return (0);
2054 }
2055 /* create tag for jumbo Rx ring */
2056 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2057 SK_RING_ALIGN, 0, /* algnmnt, boundary */
2058 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2059 BUS_SPACE_MAXADDR, /* highaddr */
2060 NULL, NULL, /* filter, filterarg */
2061 SK_JUMBO_RX_RING_SZ, /* maxsize */
2062 1, /* nsegments */
2063 SK_JUMBO_RX_RING_SZ, /* maxsegsize */
2064 0, /* flags */
2065 NULL, NULL, /* lockfunc, lockarg */
2066 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2067 if (error != 0) {
2068 device_printf(sc_if->sk_if_dev,
2069 "failed to allocate jumbo Rx ring DMA tag\n");
2070 goto jumbo_fail;
2071 }
2072
2073 /* create tag for jumbo Rx buffers */
2074 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2075 1, 0, /* algnmnt, boundary */
2076 BUS_SPACE_MAXADDR, /* lowaddr */
2077 BUS_SPACE_MAXADDR, /* highaddr */
2078 NULL, NULL, /* filter, filterarg */
2079 MJUM9BYTES, /* maxsize */
2080 1, /* nsegments */
2081 MJUM9BYTES, /* maxsegsize */
2082 0, /* flags */
2083 NULL, NULL, /* lockfunc, lockarg */
2084 &sc_if->sk_cdata.sk_jumbo_rx_tag);
2085 if (error != 0) {
2086 device_printf(sc_if->sk_if_dev,
2087 "failed to allocate jumbo Rx DMA tag\n");
2088 goto jumbo_fail;
2089 }
2090
2091 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2092 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2093 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT |
2094 BUS_DMA_COHERENT | BUS_DMA_ZERO,
2095 &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2096 if (error != 0) {
2097 device_printf(sc_if->sk_if_dev,
2098 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2099 goto jumbo_fail;
2100 }
2101
2102 ctx.sk_busaddr = 0;
2103 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2104 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2105 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2106 &ctx, BUS_DMA_NOWAIT);
2107 if (error != 0) {
2108 device_printf(sc_if->sk_if_dev,
2109 "failed to load DMA'able memory for jumbo Rx ring\n");
2110 goto jumbo_fail;
2111 }
2112 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2113
2114 /* create DMA maps for jumbo Rx buffers */
2115 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2116 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2117 device_printf(sc_if->sk_if_dev,
2118 "failed to create spare jumbo Rx dmamap\n");
2119 goto jumbo_fail;
2120 }
2121 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2122 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2123 jrxd->rx_m = NULL;
2124 jrxd->rx_dmamap = NULL;
2125 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2126 &jrxd->rx_dmamap);
2127 if (error != 0) {
2128 device_printf(sc_if->sk_if_dev,
2129 "failed to create jumbo Rx dmamap\n");
2130 goto jumbo_fail;
2131 }
2132 }
2133
2134 return (0);
2135
2136 jumbo_fail:
2137 sk_dma_jumbo_free(sc_if);
2138 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
2139 "resource shortage\n");
2140 sc_if->sk_jumbo_disable = 1;
2141 return (0);
2142 }
2143
2144 static void
sk_dma_free(struct sk_if_softc * sc_if)2145 sk_dma_free(struct sk_if_softc *sc_if)
2146 {
2147 struct sk_txdesc *txd;
2148 struct sk_rxdesc *rxd;
2149 int i;
2150
2151 /* Tx ring */
2152 if (sc_if->sk_cdata.sk_tx_ring_tag) {
2153 if (sc_if->sk_rdata.sk_tx_ring_paddr)
2154 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2155 sc_if->sk_cdata.sk_tx_ring_map);
2156 if (sc_if->sk_rdata.sk_tx_ring)
2157 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2158 sc_if->sk_rdata.sk_tx_ring,
2159 sc_if->sk_cdata.sk_tx_ring_map);
2160 sc_if->sk_rdata.sk_tx_ring = NULL;
2161 sc_if->sk_rdata.sk_tx_ring_paddr = 0;
2162 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2163 sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2164 }
2165 /* Rx ring */
2166 if (sc_if->sk_cdata.sk_rx_ring_tag) {
2167 if (sc_if->sk_rdata.sk_rx_ring_paddr)
2168 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2169 sc_if->sk_cdata.sk_rx_ring_map);
2170 if (sc_if->sk_rdata.sk_rx_ring)
2171 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2172 sc_if->sk_rdata.sk_rx_ring,
2173 sc_if->sk_cdata.sk_rx_ring_map);
2174 sc_if->sk_rdata.sk_rx_ring = NULL;
2175 sc_if->sk_rdata.sk_rx_ring_paddr = 0;
2176 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2177 sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2178 }
2179 /* Tx buffers */
2180 if (sc_if->sk_cdata.sk_tx_tag) {
2181 for (i = 0; i < SK_TX_RING_CNT; i++) {
2182 txd = &sc_if->sk_cdata.sk_txdesc[i];
2183 if (txd->tx_dmamap) {
2184 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2185 txd->tx_dmamap);
2186 txd->tx_dmamap = NULL;
2187 }
2188 }
2189 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2190 sc_if->sk_cdata.sk_tx_tag = NULL;
2191 }
2192 /* Rx buffers */
2193 if (sc_if->sk_cdata.sk_rx_tag) {
2194 for (i = 0; i < SK_RX_RING_CNT; i++) {
2195 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2196 if (rxd->rx_dmamap) {
2197 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2198 rxd->rx_dmamap);
2199 rxd->rx_dmamap = NULL;
2200 }
2201 }
2202 if (sc_if->sk_cdata.sk_rx_sparemap) {
2203 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2204 sc_if->sk_cdata.sk_rx_sparemap);
2205 sc_if->sk_cdata.sk_rx_sparemap = NULL;
2206 }
2207 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2208 sc_if->sk_cdata.sk_rx_tag = NULL;
2209 }
2210
2211 if (sc_if->sk_cdata.sk_parent_tag) {
2212 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2213 sc_if->sk_cdata.sk_parent_tag = NULL;
2214 }
2215 }
2216
2217 static void
sk_dma_jumbo_free(struct sk_if_softc * sc_if)2218 sk_dma_jumbo_free(struct sk_if_softc *sc_if)
2219 {
2220 struct sk_rxdesc *jrxd;
2221 int i;
2222
2223 /* jumbo Rx ring */
2224 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2225 if (sc_if->sk_rdata.sk_jumbo_rx_ring_paddr)
2226 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2227 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2228 if (sc_if->sk_rdata.sk_jumbo_rx_ring)
2229 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2230 sc_if->sk_rdata.sk_jumbo_rx_ring,
2231 sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2232 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2233 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = 0;
2234 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2235 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2236 }
2237
2238 /* jumbo Rx buffers */
2239 if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2240 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2241 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2242 if (jrxd->rx_dmamap) {
2243 bus_dmamap_destroy(
2244 sc_if->sk_cdata.sk_jumbo_rx_tag,
2245 jrxd->rx_dmamap);
2246 jrxd->rx_dmamap = NULL;
2247 }
2248 }
2249 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2250 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2251 sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2252 sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
2253 }
2254 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2255 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2256 }
2257 }
2258
2259 static void
sk_txcksum(if_t ifp,struct mbuf * m,struct sk_tx_desc * f)2260 sk_txcksum(if_t ifp, struct mbuf *m, struct sk_tx_desc *f)
2261 {
2262 struct ip *ip;
2263 u_int16_t offset;
2264 u_int8_t *p;
2265
2266 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2267 for(; m && m->m_len == 0; m = m->m_next)
2268 ;
2269 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2270 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2271 /* checksum may be corrupted */
2272 goto sendit;
2273 }
2274 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2275 if (m->m_len != ETHER_HDR_LEN) {
2276 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2277 __func__);
2278 /* checksum may be corrupted */
2279 goto sendit;
2280 }
2281 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2282 ;
2283 if (m == NULL) {
2284 offset = sizeof(struct ip) + ETHER_HDR_LEN;
2285 /* checksum may be corrupted */
2286 goto sendit;
2287 }
2288 ip = mtod(m, struct ip *);
2289 } else {
2290 p = mtod(m, u_int8_t *);
2291 p += ETHER_HDR_LEN;
2292 ip = (struct ip *)p;
2293 }
2294 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2295
2296 sendit:
2297 f->sk_csum_startval = 0;
2298 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2299 (offset << 16));
2300 }
2301
2302 static int
sk_encap(struct sk_if_softc * sc_if,struct mbuf ** m_head)2303 sk_encap(struct sk_if_softc *sc_if, struct mbuf **m_head)
2304 {
2305 struct sk_txdesc *txd;
2306 struct sk_tx_desc *f = NULL;
2307 struct mbuf *m;
2308 bus_dma_segment_t txsegs[SK_MAXTXSEGS];
2309 u_int32_t cflags, frag, si, sk_ctl;
2310 int error, i, nseg;
2311
2312 SK_IF_LOCK_ASSERT(sc_if);
2313
2314 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2315 return (ENOBUFS);
2316
2317 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2318 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2319 if (error == EFBIG) {
2320 m = m_defrag(*m_head, M_NOWAIT);
2321 if (m == NULL) {
2322 m_freem(*m_head);
2323 *m_head = NULL;
2324 return (ENOMEM);
2325 }
2326 *m_head = m;
2327 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2328 txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2329 if (error != 0) {
2330 m_freem(*m_head);
2331 *m_head = NULL;
2332 return (error);
2333 }
2334 } else if (error != 0)
2335 return (error);
2336 if (nseg == 0) {
2337 m_freem(*m_head);
2338 *m_head = NULL;
2339 return (EIO);
2340 }
2341 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2342 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2343 return (ENOBUFS);
2344 }
2345
2346 m = *m_head;
2347 if ((m->m_pkthdr.csum_flags & if_gethwassist(sc_if->sk_ifp)) != 0)
2348 cflags = SK_OPCODE_CSUM;
2349 else
2350 cflags = SK_OPCODE_DEFAULT;
2351 si = frag = sc_if->sk_cdata.sk_tx_prod;
2352 for (i = 0; i < nseg; i++) {
2353 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2354 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2355 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2356 sk_ctl = txsegs[i].ds_len | cflags;
2357 if (i == 0) {
2358 if (cflags == SK_OPCODE_CSUM)
2359 sk_txcksum(sc_if->sk_ifp, m, f);
2360 sk_ctl |= SK_TXCTL_FIRSTFRAG;
2361 } else
2362 sk_ctl |= SK_TXCTL_OWN;
2363 f->sk_ctl = htole32(sk_ctl);
2364 sc_if->sk_cdata.sk_tx_cnt++;
2365 SK_INC(frag, SK_TX_RING_CNT);
2366 }
2367 sc_if->sk_cdata.sk_tx_prod = frag;
2368
2369 /* set EOF on the last descriptor */
2370 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2371 f = &sc_if->sk_rdata.sk_tx_ring[frag];
2372 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2373
2374 /* turn the first descriptor ownership to NIC */
2375 f = &sc_if->sk_rdata.sk_tx_ring[si];
2376 f->sk_ctl |= htole32(SK_TXCTL_OWN);
2377
2378 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2379 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2380 txd->tx_m = m;
2381
2382 /* sync descriptors */
2383 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2384 BUS_DMASYNC_PREWRITE);
2385 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2386 sc_if->sk_cdata.sk_tx_ring_map,
2387 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2388
2389 return (0);
2390 }
2391
2392 static void
sk_start(if_t ifp)2393 sk_start(if_t ifp)
2394 {
2395 struct sk_if_softc *sc_if;
2396
2397 sc_if = if_getsoftc(ifp);
2398
2399 SK_IF_LOCK(sc_if);
2400 sk_start_locked(ifp);
2401 SK_IF_UNLOCK(sc_if);
2402
2403 return;
2404 }
2405
2406 static void
sk_start_locked(if_t ifp)2407 sk_start_locked(if_t ifp)
2408 {
2409 struct sk_softc *sc;
2410 struct sk_if_softc *sc_if;
2411 struct mbuf *m_head;
2412 int enq;
2413
2414 sc_if = if_getsoftc(ifp);
2415 sc = sc_if->sk_softc;
2416
2417 SK_IF_LOCK_ASSERT(sc_if);
2418
2419 for (enq = 0; !if_sendq_empty(ifp) &&
2420 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2421 m_head = if_dequeue(ifp);
2422 if (m_head == NULL)
2423 break;
2424
2425 /*
2426 * Pack the data into the transmit ring. If we
2427 * don't have room, set the OACTIVE flag and wait
2428 * for the NIC to drain the ring.
2429 */
2430 if (sk_encap(sc_if, &m_head)) {
2431 if (m_head == NULL)
2432 break;
2433 if_sendq_prepend(ifp, m_head);
2434 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
2435 break;
2436 }
2437
2438 enq++;
2439 /*
2440 * If there's a BPF listener, bounce a copy of this frame
2441 * to him.
2442 */
2443 BPF_MTAP(ifp, m_head);
2444 }
2445
2446 if (enq > 0) {
2447 /* Transmit */
2448 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2449
2450 /* Set a timeout in case the chip goes out to lunch. */
2451 sc_if->sk_watchdog_timer = 5;
2452 }
2453 }
2454
2455 static void
sk_watchdog(void * arg)2456 sk_watchdog(void *arg)
2457 {
2458 struct sk_if_softc *sc_if;
2459 if_t ifp;
2460
2461 ifp = arg;
2462 sc_if = if_getsoftc(ifp);
2463
2464 SK_IF_LOCK_ASSERT(sc_if);
2465
2466 if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2467 goto done;
2468
2469 /*
2470 * Reclaim first as there is a possibility of losing Tx completion
2471 * interrupts.
2472 */
2473 sk_txeof(sc_if);
2474 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2475 if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2476 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2477 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2478 sk_init_locked(sc_if);
2479 }
2480
2481 done:
2482 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2483
2484 return;
2485 }
2486
2487 static int
skc_shutdown(device_t dev)2488 skc_shutdown(device_t dev)
2489 {
2490 struct sk_softc *sc;
2491
2492 sc = device_get_softc(dev);
2493 SK_LOCK(sc);
2494
2495 /* Turn off the 'driver is loaded' LED. */
2496 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2497
2498 /*
2499 * Reset the GEnesis controller. Doing this should also
2500 * assert the resets on the attached XMAC(s).
2501 */
2502 sk_reset(sc);
2503 SK_UNLOCK(sc);
2504
2505 return (0);
2506 }
2507
2508 static int
skc_suspend(device_t dev)2509 skc_suspend(device_t dev)
2510 {
2511 struct sk_softc *sc;
2512 struct sk_if_softc *sc_if0, *sc_if1;
2513 if_t ifp0 = NULL, ifp1 = NULL;
2514
2515 sc = device_get_softc(dev);
2516
2517 SK_LOCK(sc);
2518
2519 sc_if0 = sc->sk_if[SK_PORT_A];
2520 sc_if1 = sc->sk_if[SK_PORT_B];
2521 if (sc_if0 != NULL)
2522 ifp0 = sc_if0->sk_ifp;
2523 if (sc_if1 != NULL)
2524 ifp1 = sc_if1->sk_ifp;
2525 if (ifp0 != NULL)
2526 sk_stop(sc_if0);
2527 if (ifp1 != NULL)
2528 sk_stop(sc_if1);
2529 sc->sk_suspended = 1;
2530
2531 SK_UNLOCK(sc);
2532
2533 return (0);
2534 }
2535
2536 static int
skc_resume(device_t dev)2537 skc_resume(device_t dev)
2538 {
2539 struct sk_softc *sc;
2540 struct sk_if_softc *sc_if0, *sc_if1;
2541 if_t ifp0 = NULL, ifp1 = NULL;
2542
2543 sc = device_get_softc(dev);
2544
2545 SK_LOCK(sc);
2546
2547 sc_if0 = sc->sk_if[SK_PORT_A];
2548 sc_if1 = sc->sk_if[SK_PORT_B];
2549 if (sc_if0 != NULL)
2550 ifp0 = sc_if0->sk_ifp;
2551 if (sc_if1 != NULL)
2552 ifp1 = sc_if1->sk_ifp;
2553 if (ifp0 != NULL && if_getflags(ifp0) & IFF_UP)
2554 sk_init_locked(sc_if0);
2555 if (ifp1 != NULL && if_getflags(ifp1) & IFF_UP)
2556 sk_init_locked(sc_if1);
2557 sc->sk_suspended = 0;
2558
2559 SK_UNLOCK(sc);
2560
2561 return (0);
2562 }
2563
2564 /*
2565 * According to the data sheet from SK-NET GENESIS the hardware can compute
2566 * two Rx checksums at the same time(Each checksum start position is
2567 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2568 * does not work at least on my Yukon hardware. I tried every possible ways
2569 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2570 * checksum offload was disabled at the moment and only IP checksum offload
2571 * was enabled.
2572 * As normal IP header size is 20 bytes I can't expect it would give an
2573 * increase in throughput. However it seems it doesn't hurt performance in
2574 * my testing. If there is a more detailed information for checksum secret
2575 * of the hardware in question please contact yongari@FreeBSD.org to add
2576 * TCP/UDP checksum offload support.
2577 */
2578 static __inline void
sk_rxcksum(if_t ifp,struct mbuf * m,u_int32_t csum)2579 sk_rxcksum(if_t ifp, struct mbuf *m, u_int32_t csum)
2580 {
2581 struct ether_header *eh;
2582 struct ip *ip;
2583 int32_t hlen, len, pktlen;
2584 u_int16_t csum1, csum2, ipcsum;
2585
2586 pktlen = m->m_pkthdr.len;
2587 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2588 return;
2589 eh = mtod(m, struct ether_header *);
2590 if (eh->ether_type != htons(ETHERTYPE_IP))
2591 return;
2592 ip = (struct ip *)(eh + 1);
2593 if (ip->ip_v != IPVERSION)
2594 return;
2595 hlen = ip->ip_hl << 2;
2596 pktlen -= sizeof(struct ether_header);
2597 if (hlen < sizeof(struct ip))
2598 return;
2599 if (ntohs(ip->ip_len) < hlen)
2600 return;
2601 if (ntohs(ip->ip_len) != pktlen)
2602 return;
2603
2604 csum1 = htons(csum & 0xffff);
2605 csum2 = htons((csum >> 16) & 0xffff);
2606 ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2607 /* checksum fixup for IP options */
2608 len = hlen - sizeof(struct ip);
2609 if (len > 0) {
2610 /*
2611 * If the second checksum value is correct we can compute IP
2612 * checksum with simple math. Unfortunately the second checksum
2613 * value is wrong so we can't verify the checksum from the
2614 * value(It seems there is some magic here to get correct
2615 * value). If the second checksum value is correct it also
2616 * means we can get TCP/UDP checksum) here. However, it still
2617 * needs pseudo header checksum calculation due to hardware
2618 * limitations.
2619 */
2620 return;
2621 }
2622 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2623 if (ipcsum == 0xffff)
2624 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2625 }
2626
2627 static __inline int
sk_rxvalid(struct sk_softc * sc,u_int32_t stat,u_int32_t len)2628 sk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
2629 {
2630
2631 if (sc->sk_type == SK_GENESIS) {
2632 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2633 XM_RXSTAT_BYTES(stat) != len)
2634 return (0);
2635 } else {
2636 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2637 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2638 YU_RXSTAT_JABBER)) != 0 ||
2639 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2640 YU_RXSTAT_BYTES(stat) != len)
2641 return (0);
2642 }
2643
2644 return (1);
2645 }
2646
2647 static void
sk_rxeof(struct sk_if_softc * sc_if)2648 sk_rxeof(struct sk_if_softc *sc_if)
2649 {
2650 struct sk_softc *sc;
2651 struct mbuf *m;
2652 if_t ifp;
2653 struct sk_rx_desc *cur_rx;
2654 struct sk_rxdesc *rxd;
2655 int cons, prog;
2656 u_int32_t csum, rxstat, sk_ctl;
2657
2658 sc = sc_if->sk_softc;
2659 ifp = sc_if->sk_ifp;
2660
2661 SK_IF_LOCK_ASSERT(sc_if);
2662
2663 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2664 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
2665
2666 prog = 0;
2667 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
2668 prog++, SK_INC(cons, SK_RX_RING_CNT)) {
2669 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
2670 sk_ctl = le32toh(cur_rx->sk_ctl);
2671 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2672 break;
2673 rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
2674 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2675
2676 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2677 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2678 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2679 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2680 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
2681 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2682 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2683 sk_discard_rxbuf(sc_if, cons);
2684 continue;
2685 }
2686
2687 m = rxd->rx_m;
2688 csum = le32toh(cur_rx->sk_csum);
2689 if (sk_newbuf(sc_if, cons) != 0) {
2690 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2691 /* reuse old buffer */
2692 sk_discard_rxbuf(sc_if, cons);
2693 continue;
2694 }
2695 m->m_pkthdr.rcvif = ifp;
2696 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2697 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2698 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2699 sk_rxcksum(ifp, m, csum);
2700 SK_IF_UNLOCK(sc_if);
2701 if_input(ifp, m);
2702 SK_IF_LOCK(sc_if);
2703 }
2704
2705 if (prog > 0) {
2706 sc_if->sk_cdata.sk_rx_cons = cons;
2707 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2708 sc_if->sk_cdata.sk_rx_ring_map,
2709 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2710 }
2711 }
2712
2713 static void
sk_jumbo_rxeof(struct sk_if_softc * sc_if)2714 sk_jumbo_rxeof(struct sk_if_softc *sc_if)
2715 {
2716 struct sk_softc *sc;
2717 struct mbuf *m;
2718 if_t ifp;
2719 struct sk_rx_desc *cur_rx;
2720 struct sk_rxdesc *jrxd;
2721 int cons, prog;
2722 u_int32_t csum, rxstat, sk_ctl;
2723
2724 sc = sc_if->sk_softc;
2725 ifp = sc_if->sk_ifp;
2726
2727 SK_IF_LOCK_ASSERT(sc_if);
2728
2729 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2730 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
2731
2732 prog = 0;
2733 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
2734 prog < SK_JUMBO_RX_RING_CNT;
2735 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
2736 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
2737 sk_ctl = le32toh(cur_rx->sk_ctl);
2738 if ((sk_ctl & SK_RXCTL_OWN) != 0)
2739 break;
2740 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
2741 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2742
2743 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2744 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2745 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2746 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2747 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
2748 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2749 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2750 sk_discard_jumbo_rxbuf(sc_if, cons);
2751 continue;
2752 }
2753
2754 m = jrxd->rx_m;
2755 csum = le32toh(cur_rx->sk_csum);
2756 if (sk_jumbo_newbuf(sc_if, cons) != 0) {
2757 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2758 /* reuse old buffer */
2759 sk_discard_jumbo_rxbuf(sc_if, cons);
2760 continue;
2761 }
2762 m->m_pkthdr.rcvif = ifp;
2763 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2764 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2765 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2766 sk_rxcksum(ifp, m, csum);
2767 SK_IF_UNLOCK(sc_if);
2768 if_input(ifp, m);
2769 SK_IF_LOCK(sc_if);
2770 }
2771
2772 if (prog > 0) {
2773 sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
2774 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2775 sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2776 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2777 }
2778 }
2779
2780 static void
sk_txeof(struct sk_if_softc * sc_if)2781 sk_txeof(struct sk_if_softc *sc_if)
2782 {
2783 struct sk_txdesc *txd;
2784 struct sk_tx_desc *cur_tx;
2785 if_t ifp;
2786 u_int32_t idx, sk_ctl;
2787
2788 ifp = sc_if->sk_ifp;
2789
2790 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2791 if (txd == NULL)
2792 return;
2793 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2794 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
2795 /*
2796 * Go through our tx ring and free mbufs for those
2797 * frames that have been sent.
2798 */
2799 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
2800 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
2801 break;
2802 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
2803 sk_ctl = le32toh(cur_tx->sk_ctl);
2804 if (sk_ctl & SK_TXCTL_OWN)
2805 break;
2806 sc_if->sk_cdata.sk_tx_cnt--;
2807 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2808 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
2809 continue;
2810 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2811 BUS_DMASYNC_POSTWRITE);
2812 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2813
2814 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2815 m_freem(txd->tx_m);
2816 txd->tx_m = NULL;
2817 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
2818 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
2819 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2820 }
2821 sc_if->sk_cdata.sk_tx_cons = idx;
2822 sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
2823
2824 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2825 sc_if->sk_cdata.sk_tx_ring_map,
2826 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2827 }
2828
2829 static void
sk_tick(void * xsc_if)2830 sk_tick(void *xsc_if)
2831 {
2832 struct sk_if_softc *sc_if;
2833 struct mii_data *mii;
2834 if_t ifp;
2835 int i;
2836
2837 sc_if = xsc_if;
2838 ifp = sc_if->sk_ifp;
2839 mii = device_get_softc(sc_if->sk_miibus);
2840
2841 if (!(if_getflags(ifp) & IFF_UP))
2842 return;
2843
2844 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2845 sk_intr_bcom(sc_if);
2846 return;
2847 }
2848
2849 /*
2850 * According to SysKonnect, the correct way to verify that
2851 * the link has come back up is to poll bit 0 of the GPIO
2852 * register three times. This pin has the signal from the
2853 * link_sync pin connected to it; if we read the same link
2854 * state 3 times in a row, we know the link is up.
2855 */
2856 for (i = 0; i < 3; i++) {
2857 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2858 break;
2859 }
2860
2861 if (i != 3) {
2862 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2863 return;
2864 }
2865
2866 /* Turn the GP0 interrupt back on. */
2867 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2868 SK_XM_READ_2(sc_if, XM_ISR);
2869 mii_tick(mii);
2870 callout_stop(&sc_if->sk_tick_ch);
2871 }
2872
2873 static void
sk_yukon_tick(void * xsc_if)2874 sk_yukon_tick(void *xsc_if)
2875 {
2876 struct sk_if_softc *sc_if;
2877 struct mii_data *mii;
2878
2879 sc_if = xsc_if;
2880 mii = device_get_softc(sc_if->sk_miibus);
2881
2882 mii_tick(mii);
2883 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
2884 }
2885
2886 static void
sk_intr_bcom(struct sk_if_softc * sc_if)2887 sk_intr_bcom(struct sk_if_softc *sc_if)
2888 {
2889 struct mii_data *mii;
2890 if_t ifp;
2891 int status;
2892 mii = device_get_softc(sc_if->sk_miibus);
2893 ifp = sc_if->sk_ifp;
2894
2895 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2896
2897 /*
2898 * Read the PHY interrupt register to make sure
2899 * we clear any pending interrupts.
2900 */
2901 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2902
2903 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
2904 sk_init_xmac(sc_if);
2905 return;
2906 }
2907
2908 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2909 int lstat;
2910 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2911 BRGPHY_MII_AUXSTS);
2912
2913 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2914 mii_mediachg(mii);
2915 /* Turn off the link LED. */
2916 SK_IF_WRITE_1(sc_if, 0,
2917 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2918 sc_if->sk_link = 0;
2919 } else if (status & BRGPHY_ISR_LNK_CHG) {
2920 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2921 BRGPHY_MII_IMR, 0xFF00);
2922 mii_tick(mii);
2923 sc_if->sk_link = 1;
2924 /* Turn on the link LED. */
2925 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2926 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2927 SK_LINKLED_BLINK_OFF);
2928 } else {
2929 mii_tick(mii);
2930 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2931 }
2932 }
2933
2934 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2935
2936 return;
2937 }
2938
2939 static void
sk_intr_xmac(struct sk_if_softc * sc_if)2940 sk_intr_xmac(struct sk_if_softc *sc_if)
2941 {
2942 u_int16_t status;
2943
2944 status = SK_XM_READ_2(sc_if, XM_ISR);
2945
2946 /*
2947 * Link has gone down. Start MII tick timeout to
2948 * watch for link resync.
2949 */
2950 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2951 if (status & XM_ISR_GP0_SET) {
2952 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2953 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2954 }
2955
2956 if (status & XM_ISR_AUTONEG_DONE) {
2957 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2958 }
2959 }
2960
2961 if (status & XM_IMR_TX_UNDERRUN)
2962 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2963
2964 if (status & XM_IMR_RX_OVERRUN)
2965 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2966
2967 status = SK_XM_READ_2(sc_if, XM_ISR);
2968
2969 return;
2970 }
2971
2972 static void
sk_intr_yukon(struct sk_if_softc * sc_if)2973 sk_intr_yukon(struct sk_if_softc *sc_if)
2974 {
2975 u_int8_t status;
2976
2977 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
2978 /* RX overrun */
2979 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
2980 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2981 SK_RFCTL_RX_FIFO_OVER);
2982 }
2983 /* TX underrun */
2984 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
2985 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2986 SK_TFCTL_TX_FIFO_UNDER);
2987 }
2988 }
2989
2990 static void
sk_intr(void * xsc)2991 sk_intr(void *xsc)
2992 {
2993 struct sk_softc *sc = xsc;
2994 struct sk_if_softc *sc_if0, *sc_if1;
2995 if_t ifp0 = NULL, ifp1 = NULL;
2996 u_int32_t status;
2997
2998 SK_LOCK(sc);
2999
3000 status = CSR_READ_4(sc, SK_ISSR);
3001 if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3002 goto done_locked;
3003
3004 sc_if0 = sc->sk_if[SK_PORT_A];
3005 sc_if1 = sc->sk_if[SK_PORT_B];
3006
3007 if (sc_if0 != NULL)
3008 ifp0 = sc_if0->sk_ifp;
3009 if (sc_if1 != NULL)
3010 ifp1 = sc_if1->sk_ifp;
3011
3012 for (; (status &= sc->sk_intrmask) != 0;) {
3013 /* Handle receive interrupts first. */
3014 if (status & SK_ISR_RX1_EOF) {
3015 if (if_getmtu(ifp0) > SK_MAX_FRAMELEN)
3016 sk_jumbo_rxeof(sc_if0);
3017 else
3018 sk_rxeof(sc_if0);
3019 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3020 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3021 }
3022 if (status & SK_ISR_RX2_EOF) {
3023 if (if_getflags(ifp1) > SK_MAX_FRAMELEN)
3024 sk_jumbo_rxeof(sc_if1);
3025 else
3026 sk_rxeof(sc_if1);
3027 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3028 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3029 }
3030
3031 /* Then transmit interrupts. */
3032 if (status & SK_ISR_TX1_S_EOF) {
3033 sk_txeof(sc_if0);
3034 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3035 }
3036 if (status & SK_ISR_TX2_S_EOF) {
3037 sk_txeof(sc_if1);
3038 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3039 }
3040
3041 /* Then MAC interrupts. */
3042 if (status & SK_ISR_MAC1 &&
3043 if_getdrvflags(ifp0) & IFF_DRV_RUNNING) {
3044 if (sc->sk_type == SK_GENESIS)
3045 sk_intr_xmac(sc_if0);
3046 else
3047 sk_intr_yukon(sc_if0);
3048 }
3049
3050 if (status & SK_ISR_MAC2 &&
3051 if_getdrvflags(ifp1) & IFF_DRV_RUNNING) {
3052 if (sc->sk_type == SK_GENESIS)
3053 sk_intr_xmac(sc_if1);
3054 else
3055 sk_intr_yukon(sc_if1);
3056 }
3057
3058 if (status & SK_ISR_EXTERNAL_REG) {
3059 if (ifp0 != NULL &&
3060 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3061 sk_intr_bcom(sc_if0);
3062 if (ifp1 != NULL &&
3063 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3064 sk_intr_bcom(sc_if1);
3065 }
3066 status = CSR_READ_4(sc, SK_ISSR);
3067 }
3068
3069 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3070
3071 if (ifp0 != NULL && !if_sendq_empty(ifp0))
3072 sk_start_locked(ifp0);
3073 if (ifp1 != NULL && !if_sendq_empty(ifp1))
3074 sk_start_locked(ifp1);
3075
3076 done_locked:
3077 SK_UNLOCK(sc);
3078 }
3079
3080 static void
sk_init_xmac(struct sk_if_softc * sc_if)3081 sk_init_xmac(struct sk_if_softc *sc_if)
3082 {
3083 struct sk_softc *sc;
3084 if_t ifp;
3085 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2];
3086 static const struct sk_bcom_hack bhack[] = {
3087 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3088 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3089 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3090 { 0, 0 } };
3091
3092 SK_IF_LOCK_ASSERT(sc_if);
3093
3094 sc = sc_if->sk_softc;
3095 ifp = sc_if->sk_ifp;
3096
3097 /* Unreset the XMAC. */
3098 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3099 DELAY(1000);
3100
3101 /* Reset the XMAC's internal state. */
3102 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3103
3104 /* Save the XMAC II revision */
3105 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3106
3107 /*
3108 * Perform additional initialization for external PHYs,
3109 * namely for the 1000baseTX cards that use the XMAC's
3110 * GMII mode.
3111 */
3112 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3113 int i = 0;
3114 u_int32_t val;
3115
3116 /* Take PHY out of reset. */
3117 val = sk_win_read_4(sc, SK_GPIO);
3118 if (sc_if->sk_port == SK_PORT_A)
3119 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3120 else
3121 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3122 sk_win_write_4(sc, SK_GPIO, val);
3123
3124 /* Enable GMII mode on the XMAC. */
3125 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3126
3127 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3128 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3129 DELAY(10000);
3130 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3131 BRGPHY_MII_IMR, 0xFFF0);
3132
3133 /*
3134 * Early versions of the BCM5400 apparently have
3135 * a bug that requires them to have their reserved
3136 * registers initialized to some magic values. I don't
3137 * know what the numbers do, I'm just the messenger.
3138 */
3139 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3140 == 0x6041) {
3141 while(bhack[i].reg) {
3142 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3143 bhack[i].reg, bhack[i].val);
3144 i++;
3145 }
3146 }
3147 }
3148
3149 /* Set station address */
3150 bcopy(if_getlladdr(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3151 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3152 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3153 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3154 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3155
3156 if (if_getflags(ifp) & IFF_BROADCAST) {
3157 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3158 } else {
3159 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3160 }
3161
3162 /* We don't need the FCS appended to the packet. */
3163 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3164
3165 /* We want short frames padded to 60 bytes. */
3166 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3167
3168 /*
3169 * Enable the reception of all error frames. This is is
3170 * a necessary evil due to the design of the XMAC. The
3171 * XMAC's receive FIFO is only 8K in size, however jumbo
3172 * frames can be up to 9000 bytes in length. When bad
3173 * frame filtering is enabled, the XMAC's RX FIFO operates
3174 * in 'store and forward' mode. For this to work, the
3175 * entire frame has to fit into the FIFO, but that means
3176 * that jumbo frames larger than 8192 bytes will be
3177 * truncated. Disabling all bad frame filtering causes
3178 * the RX FIFO to operate in streaming mode, in which
3179 * case the XMAC will start transferring frames out of the
3180 * RX FIFO as soon as the FIFO threshold is reached.
3181 */
3182 if (if_getmtu(ifp) > SK_MAX_FRAMELEN) {
3183 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3184 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3185 XM_MODE_RX_INRANGELEN);
3186 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3187 } else
3188 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3189
3190 /*
3191 * Bump up the transmit threshold. This helps hold off transmit
3192 * underruns when we're blasting traffic from both ports at once.
3193 */
3194 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3195
3196 /* Set Rx filter */
3197 sk_rxfilter_genesis(sc_if);
3198
3199 /* Clear and enable interrupts */
3200 SK_XM_READ_2(sc_if, XM_ISR);
3201 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3202 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3203 else
3204 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3205
3206 /* Configure MAC arbiter */
3207 switch(sc_if->sk_xmac_rev) {
3208 case XM_XMAC_REV_B2:
3209 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3210 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3211 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3212 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3213 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3214 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3215 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3216 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3217 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3218 break;
3219 case XM_XMAC_REV_C1:
3220 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3221 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3222 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3223 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3224 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3225 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3226 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3227 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3228 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3229 break;
3230 default:
3231 break;
3232 }
3233 sk_win_write_2(sc, SK_MACARB_CTL,
3234 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3235
3236 sc_if->sk_link = 1;
3237
3238 return;
3239 }
3240
3241 static void
sk_init_yukon(struct sk_if_softc * sc_if)3242 sk_init_yukon(struct sk_if_softc *sc_if)
3243 {
3244 u_int32_t phy, v;
3245 u_int16_t reg;
3246 struct sk_softc *sc;
3247 if_t ifp;
3248 u_int8_t *eaddr;
3249 int i;
3250
3251 SK_IF_LOCK_ASSERT(sc_if);
3252
3253 sc = sc_if->sk_softc;
3254 ifp = sc_if->sk_ifp;
3255
3256 if (sc->sk_type == SK_YUKON_LITE &&
3257 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3258 /*
3259 * Workaround code for COMA mode, set PHY reset.
3260 * Otherwise it will not correctly take chip out of
3261 * powerdown (coma)
3262 */
3263 v = sk_win_read_4(sc, SK_GPIO);
3264 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3265 sk_win_write_4(sc, SK_GPIO, v);
3266 }
3267
3268 /* GMAC and GPHY Reset */
3269 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3270 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3271 DELAY(1000);
3272
3273 if (sc->sk_type == SK_YUKON_LITE &&
3274 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3275 /*
3276 * Workaround code for COMA mode, clear PHY reset
3277 */
3278 v = sk_win_read_4(sc, SK_GPIO);
3279 v |= SK_GPIO_DIR9;
3280 v &= ~SK_GPIO_DAT9;
3281 sk_win_write_4(sc, SK_GPIO, v);
3282 }
3283
3284 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3285 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3286
3287 if (sc->sk_coppertype)
3288 phy |= SK_GPHY_COPPER;
3289 else
3290 phy |= SK_GPHY_FIBER;
3291
3292 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3293 DELAY(1000);
3294 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3295 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3296 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3297
3298 /* unused read of the interrupt source register */
3299 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3300
3301 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3302
3303 /* MIB Counter Clear Mode set */
3304 reg |= YU_PAR_MIB_CLR;
3305 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3306
3307 /* MIB Counter Clear Mode clear */
3308 reg &= ~YU_PAR_MIB_CLR;
3309 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3310
3311 /* receive control reg */
3312 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3313
3314 /* transmit parameter register */
3315 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3316 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3317
3318 /* serial mode register */
3319 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3320 if (if_getmtu(ifp) > SK_MAX_FRAMELEN)
3321 reg |= YU_SMR_MFL_JUMBO;
3322 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3323
3324 /* Setup Yukon's station address */
3325 eaddr = if_getlladdr(sc_if->sk_ifp);
3326 for (i = 0; i < 3; i++)
3327 SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4,
3328 eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3329 /* Set GMAC source address of flow control. */
3330 for (i = 0; i < 3; i++)
3331 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3332 eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3333 /* Set GMAC virtual address. */
3334 for (i = 0; i < 3; i++)
3335 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4,
3336 eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3337
3338 /* Set Rx filter */
3339 sk_rxfilter_yukon(sc_if);
3340
3341 /* enable interrupt mask for counter overflows */
3342 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3343 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3344 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3345
3346 /* Configure RX MAC FIFO Flush Mask */
3347 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3348 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3349 YU_RXSTAT_JABBER;
3350 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3351
3352 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3353 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3354 v = SK_TFCTL_OPERATION_ON;
3355 else
3356 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3357 /* Configure RX MAC FIFO */
3358 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3359 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3360
3361 /* Increase flush threshould to 64 bytes */
3362 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3363 SK_RFCTL_FIFO_THRESHOLD + 1);
3364
3365 /* Configure TX MAC FIFO */
3366 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3367 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3368 }
3369
3370 /*
3371 * Note that to properly initialize any part of the GEnesis chip,
3372 * you first have to take it out of reset mode.
3373 */
3374 static void
sk_init(void * xsc)3375 sk_init(void *xsc)
3376 {
3377 struct sk_if_softc *sc_if = xsc;
3378
3379 SK_IF_LOCK(sc_if);
3380 sk_init_locked(sc_if);
3381 SK_IF_UNLOCK(sc_if);
3382
3383 return;
3384 }
3385
3386 static void
sk_init_locked(struct sk_if_softc * sc_if)3387 sk_init_locked(struct sk_if_softc *sc_if)
3388 {
3389 struct sk_softc *sc;
3390 if_t ifp;
3391 struct mii_data *mii;
3392 u_int16_t reg;
3393 u_int32_t imr;
3394 int error;
3395
3396 SK_IF_LOCK_ASSERT(sc_if);
3397
3398 ifp = sc_if->sk_ifp;
3399 sc = sc_if->sk_softc;
3400 mii = device_get_softc(sc_if->sk_miibus);
3401
3402 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
3403 return;
3404
3405 /* Cancel pending I/O and free all RX/TX buffers. */
3406 sk_stop(sc_if);
3407
3408 if (sc->sk_type == SK_GENESIS) {
3409 /* Configure LINK_SYNC LED */
3410 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3411 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3412 SK_LINKLED_LINKSYNC_ON);
3413
3414 /* Configure RX LED */
3415 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3416 SK_RXLEDCTL_COUNTER_START);
3417
3418 /* Configure TX LED */
3419 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3420 SK_TXLEDCTL_COUNTER_START);
3421 }
3422
3423 /*
3424 * Configure descriptor poll timer
3425 *
3426 * SK-NET GENESIS data sheet says that possibility of losing Start
3427 * transmit command due to CPU/cache related interim storage problems
3428 * under certain conditions. The document recommends a polling
3429 * mechanism to send a Start transmit command to initiate transfer
3430 * of ready descriptors regulary. To cope with this issue sk(4) now
3431 * enables descriptor poll timer to initiate descriptor processing
3432 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3433 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3434 * command instead of waiting for next descriptor polling time.
3435 * The same rule may apply to Rx side too but it seems that is not
3436 * needed at the moment.
3437 * Since sk(4) uses descriptor polling as a last resort there is no
3438 * need to set smaller polling time than maximum allowable one.
3439 */
3440 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3441
3442 /* Configure I2C registers */
3443
3444 /* Configure XMAC(s) */
3445 switch (sc->sk_type) {
3446 case SK_GENESIS:
3447 sk_init_xmac(sc_if);
3448 break;
3449 case SK_YUKON:
3450 case SK_YUKON_LITE:
3451 case SK_YUKON_LP:
3452 sk_init_yukon(sc_if);
3453 break;
3454 }
3455 mii_mediachg(mii);
3456
3457 if (sc->sk_type == SK_GENESIS) {
3458 /* Configure MAC FIFOs */
3459 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3460 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3461 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3462
3463 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3464 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3465 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3466 }
3467
3468 /* Configure transmit arbiter(s) */
3469 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3470 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3471
3472 /* Configure RAMbuffers */
3473 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3474 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3475 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3476 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3477 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3478 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3479
3480 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3481 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3482 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3483 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3484 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3485 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3486 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3487
3488 /* Configure BMUs */
3489 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3490 if (if_getmtu(ifp) > SK_MAX_FRAMELEN) {
3491 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3492 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3493 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3494 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3495 } else {
3496 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3497 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3498 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3499 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3500 }
3501
3502 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3503 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3504 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3505 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3506 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3507
3508 /* Init descriptors */
3509 if (if_getmtu(ifp) > SK_MAX_FRAMELEN)
3510 error = sk_init_jumbo_rx_ring(sc_if);
3511 else
3512 error = sk_init_rx_ring(sc_if);
3513 if (error != 0) {
3514 device_printf(sc_if->sk_if_dev,
3515 "initialization failed: no memory for rx buffers\n");
3516 sk_stop(sc_if);
3517 return;
3518 }
3519 sk_init_tx_ring(sc_if);
3520
3521 /* Set interrupt moderation if changed via sysctl. */
3522 imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3523 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3524 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3525 sc->sk_int_ticks));
3526 if (bootverbose)
3527 device_printf(sc_if->sk_if_dev,
3528 "interrupt moderation is %d us.\n",
3529 sc->sk_int_mod);
3530 }
3531
3532 /* Configure interrupt handling */
3533 CSR_READ_4(sc, SK_ISSR);
3534 if (sc_if->sk_port == SK_PORT_A)
3535 sc->sk_intrmask |= SK_INTRS1;
3536 else
3537 sc->sk_intrmask |= SK_INTRS2;
3538
3539 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3540
3541 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3542
3543 /* Start BMUs. */
3544 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3545
3546 switch(sc->sk_type) {
3547 case SK_GENESIS:
3548 /* Enable XMACs TX and RX state machines */
3549 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3550 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3551 break;
3552 case SK_YUKON:
3553 case SK_YUKON_LITE:
3554 case SK_YUKON_LP:
3555 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3556 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3557 #if 0
3558 /* XXX disable 100Mbps and full duplex mode? */
3559 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3560 #endif
3561 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3562 }
3563
3564 /* Activate descriptor polling timer */
3565 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3566 /* start transfer of Tx descriptors */
3567 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3568
3569 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
3570 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3571
3572 switch (sc->sk_type) {
3573 case SK_YUKON:
3574 case SK_YUKON_LITE:
3575 case SK_YUKON_LP:
3576 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3577 break;
3578 }
3579
3580 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3581
3582 return;
3583 }
3584
3585 static void
sk_stop(struct sk_if_softc * sc_if)3586 sk_stop(struct sk_if_softc *sc_if)
3587 {
3588 int i;
3589 struct sk_softc *sc;
3590 struct sk_txdesc *txd;
3591 struct sk_rxdesc *rxd;
3592 struct sk_rxdesc *jrxd;
3593 if_t ifp;
3594 u_int32_t val;
3595
3596 SK_IF_LOCK_ASSERT(sc_if);
3597 sc = sc_if->sk_softc;
3598 ifp = sc_if->sk_ifp;
3599
3600 callout_stop(&sc_if->sk_tick_ch);
3601 callout_stop(&sc_if->sk_watchdog_ch);
3602
3603 /* stop Tx descriptor polling timer */
3604 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3605 /* stop transfer of Tx descriptors */
3606 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3607 for (i = 0; i < SK_TIMEOUT; i++) {
3608 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3609 if ((val & SK_TXBMU_TX_STOP) == 0)
3610 break;
3611 DELAY(1);
3612 }
3613 if (i == SK_TIMEOUT)
3614 device_printf(sc_if->sk_if_dev,
3615 "can not stop transfer of Tx descriptor\n");
3616 /* stop transfer of Rx descriptors */
3617 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3618 for (i = 0; i < SK_TIMEOUT; i++) {
3619 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
3620 if ((val & SK_RXBMU_RX_STOP) == 0)
3621 break;
3622 DELAY(1);
3623 }
3624 if (i == SK_TIMEOUT)
3625 device_printf(sc_if->sk_if_dev,
3626 "can not stop transfer of Rx descriptor\n");
3627
3628 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3629 /* Put PHY back into reset. */
3630 val = sk_win_read_4(sc, SK_GPIO);
3631 if (sc_if->sk_port == SK_PORT_A) {
3632 val |= SK_GPIO_DIR0;
3633 val &= ~SK_GPIO_DAT0;
3634 } else {
3635 val |= SK_GPIO_DIR2;
3636 val &= ~SK_GPIO_DAT2;
3637 }
3638 sk_win_write_4(sc, SK_GPIO, val);
3639 }
3640
3641 /* Turn off various components of this interface. */
3642 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3643 switch (sc->sk_type) {
3644 case SK_GENESIS:
3645 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
3646 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
3647 break;
3648 case SK_YUKON:
3649 case SK_YUKON_LITE:
3650 case SK_YUKON_LP:
3651 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
3652 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
3653 break;
3654 }
3655 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
3656 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3657 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
3658 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3659 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
3660 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3661 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3662 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3663 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3664
3665 /* Disable interrupts */
3666 if (sc_if->sk_port == SK_PORT_A)
3667 sc->sk_intrmask &= ~SK_INTRS1;
3668 else
3669 sc->sk_intrmask &= ~SK_INTRS2;
3670 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3671
3672 SK_XM_READ_2(sc_if, XM_ISR);
3673 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3674
3675 /* Free RX and TX mbufs still in the queues. */
3676 for (i = 0; i < SK_RX_RING_CNT; i++) {
3677 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
3678 if (rxd->rx_m != NULL) {
3679 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
3680 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3681 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
3682 rxd->rx_dmamap);
3683 m_freem(rxd->rx_m);
3684 rxd->rx_m = NULL;
3685 }
3686 }
3687 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
3688 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
3689 if (jrxd->rx_m != NULL) {
3690 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
3691 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3692 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
3693 jrxd->rx_dmamap);
3694 m_freem(jrxd->rx_m);
3695 jrxd->rx_m = NULL;
3696 }
3697 }
3698 for (i = 0; i < SK_TX_RING_CNT; i++) {
3699 txd = &sc_if->sk_cdata.sk_txdesc[i];
3700 if (txd->tx_m != NULL) {
3701 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
3702 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3703 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
3704 txd->tx_dmamap);
3705 m_freem(txd->tx_m);
3706 txd->tx_m = NULL;
3707 }
3708 }
3709
3710 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING|IFF_DRV_OACTIVE));
3711
3712 return;
3713 }
3714
3715 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)3716 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3717 {
3718 int error, value;
3719
3720 if (!arg1)
3721 return (EINVAL);
3722 value = *(int *)arg1;
3723 error = sysctl_handle_int(oidp, &value, 0, req);
3724 if (error || !req->newptr)
3725 return (error);
3726 if (value < low || value > high)
3727 return (EINVAL);
3728 *(int *)arg1 = value;
3729 return (0);
3730 }
3731
3732 static int
sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)3733 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3734 {
3735 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
3736 }
3737