xref: /openbsd/sys/dev/pci/if_se.c (revision 8529ddd3)
1 /*	$OpenBSD: if_se.c,v 1.12 2015/04/30 07:51:07 mpi Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009, 2010 Christopher Zimmermann <madroach@zakweb.de>
5  * Copyright (c) 2008, 2009, 2010 Nikolay Denev <ndenev@gmail.com>
6  * Copyright (c) 2007, 2008 Alexander Pohoyda <alexander.pohoyda@gmx.net>
7  * Copyright (c) 1997, 1998, 1999
8  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by Bill Paul.
21  * 4. Neither the name of the author nor the names of any co-contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS''
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
28  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL AUTHORS OR
29  * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
34  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
36  * OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * SiS 190/191 PCI Ethernet NIC driver.
41  *
42  * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original
43  * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by
44  * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu
45  * <kmliu@sis.com>.  Thanks to Pyun YongHyeon <pyunyh@gmail.com> for
46  * review and very useful comments.
47  *
48  * Ported to OpenBSD by Christopher Zimmermann 2009/10
49  *
50  * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the
51  * Linux and Solaris drivers.
52  */
53 
54 #include "bpfilter.h"
55 
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/ioctl.h>
60 #include <sys/kernel.h>
61 #include <sys/mbuf.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 #include <sys/timeout.h>
65 
66 #include <net/if.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
69 #include <net/if_types.h>
70 
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 
74 #if NBPFILTER > 0
75 #include <net/bpf.h>
76 #endif
77 
78 #include <dev/mii/miivar.h>
79 
80 #include <dev/pci/pcidevs.h>
81 #include <dev/pci/pcireg.h>
82 #include <dev/pci/pcivar.h>
83 
84 #include <dev/pci/if_sereg.h>
85 
86 #define SE_RX_RING_CNT		256 /* [8, 1024] */
87 #define SE_TX_RING_CNT		256 /* [8, 8192] */
88 #define	SE_RX_BUF_ALIGN		sizeof(uint64_t)
89 
90 #define SE_RX_RING_SZ		(SE_RX_RING_CNT * sizeof(struct se_desc))
91 #define SE_TX_RING_SZ		(SE_TX_RING_CNT * sizeof(struct se_desc))
92 
93 struct se_list_data {
94 	struct se_desc		*se_rx_ring;
95 	struct se_desc		*se_tx_ring;
96 	bus_dmamap_t		se_rx_dmamap;
97 	bus_dmamap_t		se_tx_dmamap;
98 };
99 
100 struct se_chain_data {
101 	struct mbuf		*se_rx_mbuf[SE_RX_RING_CNT];
102 	struct mbuf		*se_tx_mbuf[SE_TX_RING_CNT];
103 	bus_dmamap_t		se_rx_map[SE_RX_RING_CNT];
104 	bus_dmamap_t		se_tx_map[SE_TX_RING_CNT];
105 	uint			se_rx_prod;
106 	uint			se_tx_prod;
107 	uint			se_tx_cons;
108 	uint			se_tx_cnt;
109 };
110 
111 struct se_softc {
112     	struct device		 sc_dev;
113 	void			*sc_ih;
114 	bus_space_tag_t		 sc_iot;
115 	bus_space_handle_t	 sc_ioh;
116 	bus_dma_tag_t		 sc_dmat;
117 
118 	struct mii_data		 sc_mii;
119 	struct arpcom		 sc_ac;
120 
121 	struct se_list_data	 se_ldata;
122 	struct se_chain_data	 se_cdata;
123 
124 	struct timeout		 sc_tick_tmo;
125 
126 	int			 sc_flags;
127 #define	SE_FLAG_FASTETHER	0x0001
128 #define	SE_FLAG_RGMII		0x0010
129 #define	SE_FLAG_LINK		0x8000
130 };
131 
132 /*
133  * Various supported device vendors/types and their names.
134  */
135 const struct pci_matchid se_devices[] = {
136 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190 },
137 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_191 }
138 };
139 
140 int	se_match(struct device *, void *, void *);
141 void	se_attach(struct device *, struct device *, void *);
142 int	se_activate(struct device *, int);
143 
144 const struct cfattach se_ca = {
145 	sizeof(struct se_softc),
146 	se_match, se_attach, NULL, se_activate
147 };
148 
149 struct cfdriver se_cd = {
150 	0, "se", DV_IFNET
151 };
152 
153 uint32_t
154 	se_miibus_cmd(struct se_softc *, uint32_t);
155 int	se_miibus_readreg(struct device *, int, int);
156 void	se_miibus_writereg(struct device *, int, int, int);
157 void	se_miibus_statchg(struct device *);
158 
159 int	se_newbuf(struct se_softc *, uint);
160 void	se_discard_rxbuf(struct se_softc *, uint);
161 int	se_encap(struct se_softc *, struct mbuf *, uint *);
162 void	se_rxeof(struct se_softc *);
163 void	se_txeof(struct se_softc *);
164 int	se_intr(void *);
165 void	se_tick(void *);
166 void	se_start(struct ifnet *);
167 int	se_ioctl(struct ifnet *, u_long, caddr_t);
168 int	se_init(struct ifnet *);
169 void	se_stop(struct se_softc *);
170 void	se_watchdog(struct ifnet *);
171 int	se_ifmedia_upd(struct ifnet *);
172 void	se_ifmedia_sts(struct ifnet *, struct ifmediareq *);
173 
174 int	se_pcib_match(struct pci_attach_args *);
175 int	se_get_mac_addr_apc(struct se_softc *, uint8_t *);
176 int	se_get_mac_addr_eeprom(struct se_softc *, uint8_t *);
177 uint16_t
178 	se_read_eeprom(struct se_softc *, int);
179 
180 void	se_iff(struct se_softc *);
181 void	se_reset(struct se_softc *);
182 int	se_list_rx_init(struct se_softc *);
183 int	se_list_rx_free(struct se_softc *);
184 int	se_list_tx_init(struct se_softc *);
185 int	se_list_tx_free(struct se_softc *);
186 
187 /*
188  * Register space access macros.
189  */
190 
191 #define	CSR_WRITE_4(sc, reg, val) \
192 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, val)
193 #define	CSR_WRITE_2(sc, reg, val) \
194 	bus_space_write_2((sc)->sc_iot, (sc)->sc_ioh, reg, val)
195 #define	CSR_WRITE_1(sc, reg, val) \
196 	bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, reg, val)
197 
198 #define	CSR_READ_4(sc, reg) \
199 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg)
200 #define	CSR_READ_2(sc, reg) \
201 	bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, reg)
202 #define	CSR_READ_1(sc, reg) \
203 	bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, reg)
204 
205 /*
206  * Read a sequence of words from the EEPROM.
207  */
208 uint16_t
209 se_read_eeprom(struct se_softc *sc, int offset)
210 {
211 	uint32_t val;
212 	int i;
213 
214 	KASSERT(offset <= EI_OFFSET);
215 
216 	CSR_WRITE_4(sc, ROMInterface,
217 	    EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT));
218 	DELAY(500);
219 	for (i = 0; i < SE_TIMEOUT; i++) {
220 		val = CSR_READ_4(sc, ROMInterface);
221 		if ((val & EI_REQ) == 0)
222 			break;
223 		DELAY(100);
224 	}
225 	if (i == SE_TIMEOUT) {
226 		printf("%s: EEPROM read timeout: 0x%08x\n",
227 		    sc->sc_dev.dv_xname, val);
228 		return 0xffff;
229 	}
230 
231 	return (val & EI_DATA) >> EI_DATA_SHIFT;
232 }
233 
234 int
235 se_get_mac_addr_eeprom(struct se_softc *sc, uint8_t *dest)
236 {
237 	uint16_t val;
238 	int i;
239 
240 	val = se_read_eeprom(sc, EEPROMSignature);
241 	if (val == 0xffff || val == 0x0000) {
242 		printf("%s: invalid EEPROM signature : 0x%04x\n",
243 		    sc->sc_dev.dv_xname, val);
244 		return (EINVAL);
245 	}
246 
247 	for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
248 		val = se_read_eeprom(sc, EEPROMMACAddr + i / 2);
249 		dest[i + 0] = (uint8_t)val;
250 		dest[i + 1] = (uint8_t)(val >> 8);
251 	}
252 
253 	if ((se_read_eeprom(sc, EEPROMInfo) & 0x80) != 0)
254 		sc->sc_flags |= SE_FLAG_RGMII;
255 	return (0);
256 }
257 
258 /*
259  * For SiS96x, APC CMOS RAM is used to store Ethernet address.
260  * APC CMOS RAM is accessed through ISA bridge.
261  */
262 #if defined(__amd64__) || defined(__i386__)
263 int
264 se_pcib_match(struct pci_attach_args *pa)
265 {
266 	const struct pci_matchid apc_devices[] = {
267 		{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_965 },
268 		{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_966 },
269 		{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_968 }
270 	};
271 
272 	return pci_matchbyid(pa, apc_devices, nitems(apc_devices));
273 }
274 #endif
275 
276 int
277 se_get_mac_addr_apc(struct se_softc *sc, uint8_t *dest)
278 {
279 #if defined(__amd64__) || defined(__i386__)
280 	struct pci_attach_args pa;
281 	pcireg_t reg;
282 	bus_space_handle_t ioh;
283 	int rc, i;
284 
285 	if (pci_find_device(&pa, se_pcib_match) == 0) {
286 		printf("\n%s: couldn't find PCI-ISA bridge\n",
287 		    sc->sc_dev.dv_xname);
288 		return EINVAL;
289 	}
290 
291 	/* Enable port 0x78 and 0x79 to access APC registers. */
292 	reg = pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
293 	pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg & ~0x02);
294 	DELAY(50);
295 	(void)pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
296 
297 	/* XXX this abuses bus_space implementation knowledge */
298 	rc = _bus_space_map(pa.pa_iot, 0x78, 2, 0, &ioh);
299 	if (rc == 0) {
300 		/* Read stored Ethernet address. */
301 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
302 			bus_space_write_1(pa.pa_iot, ioh, 0, 0x09 + i);
303 			dest[i] = bus_space_read_1(pa.pa_iot, ioh, 1);
304 		}
305 		bus_space_write_1(pa.pa_iot, ioh, 0, 0x12);
306 		if ((bus_space_read_1(pa.pa_iot, ioh, 1) & 0x80) != 0)
307 			sc->sc_flags |= SE_FLAG_RGMII;
308 		_bus_space_unmap(pa.pa_iot, ioh, 2, NULL);
309 	} else
310 		rc = EINVAL;
311 
312 	/* Restore access to APC registers. */
313 	pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg);
314 
315 	return rc;
316 #endif
317 	return EINVAL;
318 }
319 
320 uint32_t
321 se_miibus_cmd(struct se_softc *sc, uint32_t ctrl)
322 {
323 	int i;
324 	uint32_t val;
325 
326 	CSR_WRITE_4(sc, GMIIControl, ctrl);
327 	DELAY(10);
328 	for (i = 0; i < SE_TIMEOUT; i++) {
329 		val = CSR_READ_4(sc, GMIIControl);
330 		if ((val & GMI_REQ) == 0)
331 			return val;
332 		DELAY(10);
333 	}
334 
335 	return GMI_REQ;
336 }
337 
338 int
339 se_miibus_readreg(struct device *self, int phy, int reg)
340 {
341 	struct se_softc *sc = (struct se_softc *)self;
342 	uint32_t ctrl, val;
343 
344 	ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) |
345 	    GMI_OP_RD | GMI_REQ;
346 	val = se_miibus_cmd(sc, ctrl);
347 	if ((val & GMI_REQ) != 0) {
348 		printf("%s: PHY read timeout : %d\n",
349 		    sc->sc_dev.dv_xname, reg);
350 		return 0;
351 	}
352 	return (val & GMI_DATA) >> GMI_DATA_SHIFT;
353 }
354 
355 void
356 se_miibus_writereg(struct device *self, int phy, int reg, int data)
357 {
358 	struct se_softc *sc = (struct se_softc *)self;
359 	uint32_t ctrl, val;
360 
361 	ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) |
362 	    GMI_OP_WR | (data << GMI_DATA_SHIFT) | GMI_REQ;
363 	val = se_miibus_cmd(sc, ctrl);
364 	if ((val & GMI_REQ) != 0) {
365 		printf("%s: PHY write timeout : %d\n",
366 		    sc->sc_dev.dv_xname, reg);
367 	}
368 }
369 
370 void
371 se_miibus_statchg(struct device *self)
372 {
373 	struct se_softc *sc = (struct se_softc *)self;
374 #ifdef SE_DEBUG
375 	struct ifnet *ifp = &sc->sc_ac.ac_if;
376 #endif
377 	struct mii_data *mii = &sc->sc_mii;
378 	uint32_t ctl, speed;
379 
380 	speed = 0;
381 	sc->sc_flags &= ~SE_FLAG_LINK;
382 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
383 	    (IFM_ACTIVE | IFM_AVALID)) {
384 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
385 		case IFM_10_T:
386 #ifdef SE_DEBUG
387 			if (ifp->if_flags & IFF_DEBUG)
388 				printf("%s: 10baseT link\n", ifp->if_xname);
389 #endif
390 			sc->sc_flags |= SE_FLAG_LINK;
391 			speed = SC_SPEED_10;
392 			break;
393 		case IFM_100_TX:
394 #ifdef SE_DEBUG
395 			if (ifp->if_flags & IFF_DEBUG)
396 				printf("%s: 100baseTX link\n", ifp->if_xname);
397 #endif
398 			sc->sc_flags |= SE_FLAG_LINK;
399 			speed = SC_SPEED_100;
400 			break;
401 		case IFM_1000_T:
402 #ifdef SE_DEBUG
403 			if (ifp->if_flags & IFF_DEBUG)
404 				printf("%s: 1000baseT link\n", ifp->if_xname);
405 #endif
406 			if ((sc->sc_flags & SE_FLAG_FASTETHER) == 0) {
407 				sc->sc_flags |= SE_FLAG_LINK;
408 				speed = SC_SPEED_1000;
409 			}
410 			break;
411 		default:
412 			break;
413 		}
414 	}
415 	if ((sc->sc_flags & SE_FLAG_LINK) == 0) {
416 #ifdef SE_DEBUG
417 		if (ifp->if_flags & IFF_DEBUG)
418 			printf("%s: no link\n", ifp->if_xname);
419 #endif
420 		return;
421 	}
422 	/* Reprogram MAC to resolved speed/duplex/flow-control paramters. */
423 	ctl = CSR_READ_4(sc, StationControl);
424 	ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK);
425 	if (speed == SC_SPEED_1000)
426 		ctl |= 0x07000000;
427 	else
428 		ctl |= 0x04000000;
429 #ifdef notyet
430 	if ((sc->sc_flags & SE_FLAG_GMII) != 0)
431 		ctl |= 0x03000000;
432 #endif
433 	ctl |= speed;
434 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
435 		ctl |= SC_FDX;
436 	CSR_WRITE_4(sc, StationControl, ctl);
437 	if ((sc->sc_flags & SE_FLAG_RGMII) != 0) {
438 		CSR_WRITE_4(sc, RGMIIDelay, 0x0441);
439 		CSR_WRITE_4(sc, RGMIIDelay, 0x0440);
440 	}
441 }
442 
443 void
444 se_iff(struct se_softc *sc)
445 {
446 	struct arpcom *ac = &sc->sc_ac;
447 	struct ifnet *ifp = &ac->ac_if;
448 	struct ether_multi *enm;
449 	struct ether_multistep step;
450 	uint32_t crc, hashes[2];
451 	uint16_t rxfilt;
452 
453 	rxfilt = CSR_READ_2(sc, RxMacControl);
454 	rxfilt &= ~(AcceptAllPhys | AcceptBroadcast | AcceptMulticast);
455 	ifp->if_flags &= ~IFF_ALLMULTI;
456 
457 	/*
458 	 * Always accept broadcast frames.
459 	 * Always accept frames destined to our station address.
460 	 */
461 	rxfilt |= AcceptBroadcast | AcceptMyPhys;
462 
463 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
464 		ifp->if_flags |= IFF_ALLMULTI;
465 		if (ifp->if_flags & IFF_PROMISC)
466 			rxfilt |= AcceptAllPhys;
467 		rxfilt |= AcceptMulticast;
468 		hashes[0] = hashes[1] = 0xffffffff;
469 	} else {
470 		rxfilt |= AcceptMulticast;
471 		hashes[0] = hashes[1] = 0;
472 
473 		ETHER_FIRST_MULTI(step, ac, enm);
474 		while (enm != NULL) {
475 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
476 
477 			hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
478 
479 			ETHER_NEXT_MULTI(step, enm);
480 		}
481 	}
482 
483 	CSR_WRITE_2(sc, RxMacControl, rxfilt);
484 	CSR_WRITE_4(sc, RxHashTable, hashes[0]);
485 	CSR_WRITE_4(sc, RxHashTable2, hashes[1]);
486 }
487 
488 void
489 se_reset(struct se_softc *sc)
490 {
491 	CSR_WRITE_4(sc, IntrMask, 0);
492 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
493 
494 	/* Soft reset. */
495 	CSR_WRITE_4(sc, IntrControl, 0x8000);
496 	CSR_READ_4(sc, IntrControl);
497 	DELAY(100);
498 	CSR_WRITE_4(sc, IntrControl, 0);
499 	/* Stop MAC. */
500 	CSR_WRITE_4(sc, TX_CTL, 0x1a00);
501 	CSR_WRITE_4(sc, RX_CTL, 0x1a00);
502 
503 	CSR_WRITE_4(sc, IntrMask, 0);
504 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
505 
506 	CSR_WRITE_4(sc, GMIIControl, 0);
507 }
508 
509 /*
510  * Probe for an SiS chip. Check the PCI vendor and device
511  * IDs against our list and return a device name if we find a match.
512  */
513 int
514 se_match(struct device *parent, void *match, void *aux)
515 {
516 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
517 
518 	return pci_matchbyid(pa, se_devices, nitems(se_devices));
519 }
520 
521 /*
522  * Attach the interface. Do ifmedia setup and ethernet/BPF attach.
523  */
524 void
525 se_attach(struct device *parent, struct device *self, void *aux)
526 {
527 	struct se_softc *sc = (struct se_softc *)self;
528 	struct arpcom *ac = &sc->sc_ac;
529 	struct ifnet *ifp = &ac->ac_if;
530 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
531 	uint8_t eaddr[ETHER_ADDR_LEN];
532 	const char *intrstr;
533 	pci_intr_handle_t ih;
534 	bus_size_t iosize;
535 	bus_dma_segment_t seg;
536 	struct se_list_data *ld;
537 	struct se_chain_data *cd;
538 	int nseg;
539 	uint i;
540 	int rc;
541 
542 	printf(": ");
543 
544 	/*
545 	 * Map control/status registers.
546 	 */
547 
548 	rc = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM, 0,
549 	    &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0);
550 	if (rc != 0) {
551 		printf("can't map i/o space\n");
552 		return;
553 	}
554 
555 	if (pci_intr_map(pa, &ih)) {
556 		printf("can't map interrupt\n");
557 		goto fail1;
558 	}
559 	intrstr = pci_intr_string(pa->pa_pc, ih);
560 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, se_intr, sc,
561 	    self->dv_xname);
562 	if (sc->sc_ih == NULL) {
563 		printf("can't establish interrupt");
564 		if (intrstr != NULL)
565 			printf(" at %s", intrstr);
566 		printf("\n");
567 		goto fail1;
568 	}
569 
570 	printf("%s", intrstr);
571 
572 	if (pa->pa_id == PCI_ID_CODE(PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190))
573 		sc->sc_flags |= SE_FLAG_FASTETHER;
574 
575 	/* Reset the adapter. */
576 	se_reset(sc);
577 
578 	/* Get MAC address from the EEPROM. */
579 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x70) & (0x01 << 24)) != 0)
580 		se_get_mac_addr_apc(sc, eaddr);
581 	else
582 		se_get_mac_addr_eeprom(sc, eaddr);
583 	printf(", address %s\n", ether_sprintf(eaddr));
584 	bcopy(eaddr, ac->ac_enaddr, ETHER_ADDR_LEN);
585 
586 	/*
587 	 * Now do all the DMA mapping stuff
588 	 */
589 
590 	sc->sc_dmat = pa->pa_dmat;
591 	ld = &sc->se_ldata;
592 	cd = &sc->se_cdata;
593 
594 	/* First create TX/RX busdma maps. */
595 	for (i = 0; i < SE_RX_RING_CNT; i++) {
596 		rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
597 		    0, BUS_DMA_NOWAIT, &cd->se_rx_map[i]);
598 		if (rc != 0) {
599 			printf("%s: cannot init the RX map array\n",
600 			    self->dv_xname);
601 			goto fail2;
602 		}
603 	}
604 
605 	for (i = 0; i < SE_TX_RING_CNT; i++) {
606 		rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
607 		    0, BUS_DMA_NOWAIT, &cd->se_tx_map[i]);
608 		if (rc != 0) {
609 			printf("%s: cannot init the TX map array\n",
610 			    self->dv_xname);
611 			goto fail2;
612 		}
613 	}
614 
615 	/*
616 	 * Now allocate a chunk of DMA-able memory for RX and TX ring
617 	 * descriptors, as a contiguous block of memory.
618 	 * XXX fix deallocation upon error
619 	 */
620 
621 	/* RX */
622 	rc = bus_dmamem_alloc(sc->sc_dmat, SE_RX_RING_SZ, PAGE_SIZE, 0,
623 	    &seg, 1, &nseg, BUS_DMA_NOWAIT);
624 	if (rc != 0) {
625 		printf("%s: no memory for RX descriptors\n", self->dv_xname);
626 		goto fail2;
627 	}
628 
629 	rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_RX_RING_SZ,
630 	    (caddr_t *)&ld->se_rx_ring, BUS_DMA_NOWAIT);
631 	if (rc != 0) {
632 		printf("%s: can't map RX descriptors\n", self->dv_xname);
633 		goto fail2;
634 	}
635 
636 	rc = bus_dmamap_create(sc->sc_dmat, SE_RX_RING_SZ, 1,
637 	    SE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_rx_dmamap);
638 	if (rc != 0) {
639 		printf("%s: can't alloc RX DMA map\n", self->dv_xname);
640 		goto fail2;
641 	}
642 
643 	rc = bus_dmamap_load(sc->sc_dmat, ld->se_rx_dmamap,
644 	    (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT);
645 	if (rc != 0) {
646 		printf("%s: can't load RX DMA map\n", self->dv_xname);
647 		bus_dmamem_unmap(sc->sc_dmat,
648 		    (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ);
649 		bus_dmamap_destroy(sc->sc_dmat, ld->se_rx_dmamap);
650 		bus_dmamem_free(sc->sc_dmat, &seg, nseg);
651 		goto fail2;
652 	}
653 
654 	/* TX */
655 	rc = bus_dmamem_alloc(sc->sc_dmat, SE_TX_RING_SZ, PAGE_SIZE, 0,
656 	    &seg, 1, &nseg, BUS_DMA_NOWAIT);
657 	if (rc != 0) {
658 		printf("%s: no memory for TX descriptors\n", self->dv_xname);
659 		goto fail2;
660 	}
661 
662 	rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_TX_RING_SZ,
663 	    (caddr_t *)&ld->se_tx_ring, BUS_DMA_NOWAIT);
664 	if (rc != 0) {
665 		printf("%s: can't map TX descriptors\n", self->dv_xname);
666 		goto fail2;
667 	}
668 
669 	rc = bus_dmamap_create(sc->sc_dmat, SE_TX_RING_SZ, 1,
670 	    SE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_tx_dmamap);
671 	if (rc != 0) {
672 		printf("%s: can't alloc TX DMA map\n", self->dv_xname);
673 		goto fail2;
674 	}
675 
676 	rc = bus_dmamap_load(sc->sc_dmat, ld->se_tx_dmamap,
677 	    (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT);
678 	if (rc != 0) {
679 		printf("%s: can't load TX DMA map\n", self->dv_xname);
680 		bus_dmamem_unmap(sc->sc_dmat,
681 		    (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ);
682 		bus_dmamap_destroy(sc->sc_dmat, ld->se_tx_dmamap);
683 		bus_dmamem_free(sc->sc_dmat, &seg, nseg);
684 		goto fail2;
685 	}
686 
687 	timeout_set(&sc->sc_tick_tmo, se_tick, sc);
688 
689 	ifp = &sc->sc_ac.ac_if;
690 	ifp->if_softc = sc;
691 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
692 	ifp->if_ioctl = se_ioctl;
693 	ifp->if_start = se_start;
694 	ifp->if_watchdog = se_watchdog;
695 	IFQ_SET_MAXLEN(&ifp->if_snd, SE_TX_RING_CNT - 1);
696 	IFQ_SET_READY(&ifp->if_snd);
697 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
698 
699 	ifp->if_capabilities = IFCAP_VLAN_MTU;
700 
701 	/*
702 	 * Do MII setup.
703 	 */
704 
705 	sc->sc_mii.mii_ifp = ifp;
706 	sc->sc_mii.mii_readreg = se_miibus_readreg;
707 	sc->sc_mii.mii_writereg = se_miibus_writereg;
708 	sc->sc_mii.mii_statchg = se_miibus_statchg;
709 	ifmedia_init(&sc->sc_mii.mii_media, 0, se_ifmedia_upd,
710 	    se_ifmedia_sts);
711 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
712 	    MII_OFFSET_ANY, 0);
713 
714 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
715 		/* No PHY attached */
716 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
717 		    0, NULL);
718 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
719 	} else
720 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
721 
722 	/*
723 	 * Call MI attach routine.
724 	 */
725 	if_attach(ifp);
726 	ether_ifattach(ifp);
727 
728 	return;
729 
730 fail2:
731 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
732 fail1:
733 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
734 }
735 
736 int
737 se_activate(struct device *self, int act)
738 {
739 	struct se_softc *sc = (struct se_softc *)self;
740 	struct ifnet *ifp = &sc->sc_ac.ac_if;
741 	int rv = 0;
742 
743 	switch (act) {
744 	case DVACT_SUSPEND:
745 		if (ifp->if_flags & IFF_RUNNING)
746 			se_stop(sc);
747 		rv = config_activate_children(self, act);
748 		break;
749 	case DVACT_RESUME:
750 		if (ifp->if_flags & IFF_UP)
751 			(void)se_init(ifp);
752 		break;
753 	default:
754 		rv = config_activate_children(self, act);
755 		break;
756 	}
757 
758 	return (rv);
759 }
760 
761 /*
762  * Initialize the TX descriptors.
763  */
764 int
765 se_list_tx_init(struct se_softc *sc)
766 {
767 	struct se_list_data *ld = &sc->se_ldata;
768 	struct se_chain_data *cd = &sc->se_cdata;
769 
770 	bzero(ld->se_tx_ring, SE_TX_RING_SZ);
771 	ld->se_tx_ring[SE_TX_RING_CNT - 1].se_flags = htole32(RING_END);
772 	bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,
773 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
774 	cd->se_tx_prod = 0;
775 	cd->se_tx_cons = 0;
776 	cd->se_tx_cnt = 0;
777 
778 	return 0;
779 }
780 
781 int
782 se_list_tx_free(struct se_softc *sc)
783 {
784 	struct se_chain_data *cd = &sc->se_cdata;
785 	uint i;
786 
787 	for (i = 0; i < SE_TX_RING_CNT; i++) {
788 		if (cd->se_tx_mbuf[i] != NULL) {
789 			bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]);
790 			m_free(cd->se_tx_mbuf[i]);
791 			cd->se_tx_mbuf[i] = NULL;
792 		}
793 	}
794 
795 	return 0;
796 }
797 
798 /*
799  * Initialize the RX descriptors and allocate mbufs for them.
800  */
801 int
802 se_list_rx_init(struct se_softc *sc)
803 {
804 	struct se_list_data *ld = &sc->se_ldata;
805 	struct se_chain_data *cd = &sc->se_cdata;
806 	uint i;
807 
808 	bzero(ld->se_rx_ring, SE_RX_RING_SZ);
809 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,
810 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
811 	for (i = 0; i < SE_RX_RING_CNT; i++) {
812 		if (se_newbuf(sc, i) != 0)
813 			return ENOBUFS;
814 	}
815 
816 	cd->se_rx_prod = 0;
817 
818 	return 0;
819 }
820 
821 int
822 se_list_rx_free(struct se_softc *sc)
823 {
824 	struct se_chain_data *cd = &sc->se_cdata;
825 	uint i;
826 
827 	for (i = 0; i < SE_RX_RING_CNT; i++) {
828 		if (cd->se_rx_mbuf[i] != NULL) {
829 			bus_dmamap_unload(sc->sc_dmat, cd->se_rx_map[i]);
830 			m_free(cd->se_rx_mbuf[i]);
831 			cd->se_rx_mbuf[i] = NULL;
832 		}
833 	}
834 
835 	return 0;
836 }
837 
838 /*
839  * Initialize an RX descriptor and attach an MBUF cluster.
840  */
841 int
842 se_newbuf(struct se_softc *sc, uint i)
843 {
844 #ifdef SE_DEBUG
845 	struct ifnet *ifp = &sc->sc_ac.ac_if;
846 #endif
847 	struct se_list_data *ld = &sc->se_ldata;
848 	struct se_chain_data *cd = &sc->se_cdata;
849 	struct se_desc *desc;
850 	struct mbuf *m;
851 	int rc;
852 
853 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
854 	if (m == NULL) {
855 #ifdef SE_DEBUG
856 		if (ifp->if_flags & IFF_DEBUG)
857 			printf("%s: MCLGETI failed\n", ifp->if_xname);
858 #endif
859 		return ENOBUFS;
860 	}
861 	m->m_len = m->m_pkthdr.len = MCLBYTES;
862 	m_adj(m, SE_RX_BUF_ALIGN);
863 
864 	rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_rx_map[i],
865 	    m, BUS_DMA_NOWAIT);
866 	KASSERT(cd->se_rx_map[i]->dm_nsegs == 1);
867 	if (rc != 0) {
868 		m_freem(m);
869 		return ENOBUFS;
870 	}
871 	bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,
872 	    cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
873 
874 	cd->se_rx_mbuf[i] = m;
875 	desc = &ld->se_rx_ring[i];
876 	desc->se_sts_size = 0;
877 	desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR);
878 	desc->se_ptr = htole32((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr);
879 	desc->se_flags = htole32(cd->se_rx_map[i]->dm_segs[0].ds_len);
880 	if (i == SE_RX_RING_CNT - 1)
881 		desc->se_flags |= htole32(RING_END);
882 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),
883 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
884 
885 	return 0;
886 }
887 
888 void
889 se_discard_rxbuf(struct se_softc *sc, uint i)
890 {
891 	struct se_list_data *ld = &sc->se_ldata;
892 	struct se_desc *desc;
893 
894 	desc = &ld->se_rx_ring[i];
895 	desc->se_sts_size = 0;
896 	desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR);
897 	desc->se_flags = htole32(MCLBYTES - SE_RX_BUF_ALIGN);
898 	if (i == SE_RX_RING_CNT - 1)
899 		desc->se_flags |= htole32(RING_END);
900 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),
901 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
902 }
903 
904 /*
905  * A frame has been uploaded: pass the resulting mbuf chain up to
906  * the higher level protocols.
907  */
908 void
909 se_rxeof(struct se_softc *sc)
910 {
911 	struct mbuf *m;
912 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
913 	struct ifnet *ifp = &sc->sc_ac.ac_if;
914 	struct se_list_data *ld = &sc->se_ldata;
915 	struct se_chain_data *cd = &sc->se_cdata;
916 	struct se_desc *cur_rx;
917 	uint32_t rxinfo, rxstat;
918 	uint i;
919 
920 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,
921 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
922 	for (i = cd->se_rx_prod; ; SE_INC(i, SE_RX_RING_CNT)) {
923 		cur_rx = &ld->se_rx_ring[i];
924 		rxinfo = letoh32(cur_rx->se_cmdsts);
925 		if ((rxinfo & RDC_OWN) != 0)
926 			break;
927 		rxstat = letoh32(cur_rx->se_sts_size);
928 
929 		/*
930 		 * If an error occurs, update stats, clear the
931 		 * status word and leave the mbuf cluster in place:
932 		 * it should simply get re-used next time this descriptor
933 		 * comes up in the ring.
934 		 */
935 		if ((rxstat & RDS_CRCOK) == 0 || SE_RX_ERROR(rxstat) != 0 ||
936 		    SE_RX_NSEGS(rxstat) != 1) {
937 			/* XXX We don't support multi-segment frames yet. */
938 			if (ifp->if_flags & IFF_DEBUG)
939 				printf("%s: rx error %b\n",
940 				    ifp->if_xname, rxstat, RX_ERR_BITS);
941 			se_discard_rxbuf(sc, i);
942 			ifp->if_ierrors++;
943 			continue;
944 		}
945 
946 		/* No errors; receive the packet. */
947 		bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,
948 		    cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
949 		m = cd->se_rx_mbuf[i];
950 		if (se_newbuf(sc, i) != 0) {
951 			se_discard_rxbuf(sc, i);
952 			ifp->if_iqdrops++;
953 			continue;
954 		}
955 		/*
956 		 * Account for 10 bytes auto padding which is used
957 		 * to align IP header on a 32bit boundary.  Also note,
958 		 * CRC bytes are automatically removed by the hardware.
959 		 */
960 		m->m_data += SE_RX_PAD_BYTES;
961 		m->m_pkthdr.len = m->m_len =
962 		    SE_RX_BYTES(rxstat) - SE_RX_PAD_BYTES;
963 
964 		ml_enqueue(&ml, m);
965 		ifp->if_ipackets++;
966 	}
967 
968 	if_input(ifp, &ml);
969 
970 	cd->se_rx_prod = i;
971 }
972 
973 /*
974  * A frame was downloaded to the chip. It's safe for us to clean up
975  * the list buffers.
976  */
977 
978 void
979 se_txeof(struct se_softc *sc)
980 {
981 	struct ifnet *ifp = &sc->sc_ac.ac_if;
982 	struct se_list_data *ld = &sc->se_ldata;
983 	struct se_chain_data *cd = &sc->se_cdata;
984 	struct se_desc *cur_tx;
985 	uint32_t txstat;
986 	uint i;
987 
988 	/*
989 	 * Go through our tx list and free mbufs for those
990 	 * frames that have been transmitted.
991 	 */
992 	bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,
993 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
994 	for (i = cd->se_tx_cons; cd->se_tx_cnt > 0;
995 	    cd->se_tx_cnt--, SE_INC(i, SE_TX_RING_CNT)) {
996 		cur_tx = &ld->se_tx_ring[i];
997 		txstat = letoh32(cur_tx->se_cmdsts);
998 		if ((txstat & TDC_OWN) != 0)
999 			break;
1000 
1001 		ifp->if_flags &= ~IFF_OACTIVE;
1002 
1003 		if (SE_TX_ERROR(txstat) != 0) {
1004 			if (ifp->if_flags & IFF_DEBUG)
1005 				printf("%s: tx error %b\n",
1006 				    ifp->if_xname, txstat, TX_ERR_BITS);
1007 			ifp->if_oerrors++;
1008 			/* TODO: better error differentiation */
1009 		} else
1010 			ifp->if_opackets++;
1011 
1012 		if (cd->se_tx_mbuf[i] != NULL) {
1013 			bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,
1014 			    cd->se_tx_map[i]->dm_mapsize,
1015 			    BUS_DMASYNC_POSTWRITE);
1016 			bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]);
1017 			m_free(cd->se_tx_mbuf[i]);
1018 			cd->se_tx_mbuf[i] = NULL;
1019 		}
1020 
1021 		cur_tx->se_sts_size = 0;
1022 		cur_tx->se_cmdsts = 0;
1023 		cur_tx->se_ptr = 0;
1024 		cur_tx->se_flags &= htole32(RING_END);
1025 		bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,
1026 		    i * sizeof(*cur_tx), sizeof(*cur_tx),
1027 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1028 	}
1029 
1030 	cd->se_tx_cons = i;
1031 	if (cd->se_tx_cnt == 0)
1032 		ifp->if_timer = 0;
1033 }
1034 
1035 void
1036 se_tick(void *xsc)
1037 {
1038 	struct se_softc *sc = xsc;
1039 	struct mii_data *mii;
1040 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1041 	int s;
1042 
1043 	s = splnet();
1044 	mii = &sc->sc_mii;
1045 	mii_tick(mii);
1046 	if ((sc->sc_flags & SE_FLAG_LINK) == 0) {
1047 		se_miibus_statchg(&sc->sc_dev);
1048 		if ((sc->sc_flags & SE_FLAG_LINK) != 0 &&
1049 		    !IFQ_IS_EMPTY(&ifp->if_snd))
1050 			se_start(ifp);
1051 	}
1052 	splx(s);
1053 
1054 	timeout_add_sec(&sc->sc_tick_tmo, 1);
1055 }
1056 
1057 int
1058 se_intr(void *arg)
1059 {
1060 	struct se_softc *sc = arg;
1061 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1062 	uint32_t status;
1063 
1064 	status = CSR_READ_4(sc, IntrStatus);
1065 	if (status == 0xffffffff || (status & SE_INTRS) == 0) {
1066 		/* Not ours. */
1067 		return 0;
1068 	}
1069 	/* Ack interrupts/ */
1070 	CSR_WRITE_4(sc, IntrStatus, status);
1071 	/* Disable further interrupts. */
1072 	CSR_WRITE_4(sc, IntrMask, 0);
1073 
1074 	for (;;) {
1075 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1076 			break;
1077 		if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) {
1078 			se_rxeof(sc);
1079 			/* Wakeup Rx MAC. */
1080 			if ((status & INTR_RX_IDLE) != 0)
1081 				CSR_WRITE_4(sc, RX_CTL,
1082 				    0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1083 		}
1084 		if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0)
1085 			se_txeof(sc);
1086 		status = CSR_READ_4(sc, IntrStatus);
1087 		if ((status & SE_INTRS) == 0)
1088 			break;
1089 		/* Ack interrupts. */
1090 		CSR_WRITE_4(sc, IntrStatus, status);
1091 	}
1092 
1093 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
1094 		/* Re-enable interrupts */
1095 		CSR_WRITE_4(sc, IntrMask, SE_INTRS);
1096 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1097 			se_start(ifp);
1098 	}
1099 
1100 	return 1;
1101 }
1102 
1103 /*
1104  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1105  * pointers to the fragment pointers.
1106  */
1107 int
1108 se_encap(struct se_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1109 {
1110 #ifdef SE_DEBUG
1111 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1112 #endif
1113 	struct mbuf *m;
1114 	struct se_list_data *ld = &sc->se_ldata;
1115 	struct se_chain_data *cd = &sc->se_cdata;
1116 	struct se_desc *desc;
1117 	uint i, cnt = 0;
1118 	int rc;
1119 
1120 	/*
1121 	 * If there's no way we can send any packets, return now.
1122 	 */
1123 	if (SE_TX_RING_CNT - cd->se_tx_cnt < 2) {
1124 #ifdef SE_DEBUG
1125 		if (ifp->if_flags & IFF_DEBUG)
1126 			printf("%s: encap failed, not enough TX desc\n",
1127 			    ifp->if_xname);
1128 #endif
1129 		return ENOBUFS;
1130 	}
1131 
1132 	if (m_defrag(m_head, M_DONTWAIT) != 0) {
1133 #ifdef SE_DEBUG
1134 		if (ifp->if_flags & IFF_DEBUG)
1135 			printf("%s: m_defrag failed\n", ifp->if_xname);
1136 #endif
1137 		return ENOBUFS;	/* XXX should not be fatal */
1138 	}
1139 
1140 	/*
1141 	 * Start packing the mbufs in this chain into
1142 	 * the fragment pointers. Stop when we run out
1143 	 * of fragments or hit the end of the mbuf chain.
1144 	 */
1145 	i = *txidx;
1146 
1147 	for (m = m_head; m != NULL; m = m->m_next) {
1148 		if (m->m_len == 0)
1149 			continue;
1150 		if ((SE_TX_RING_CNT - (cd->se_tx_cnt + cnt)) < 2) {
1151 #ifdef SE_DEBUG
1152 			if (ifp->if_flags & IFF_DEBUG)
1153 				printf("%s: encap failed, not enough TX desc\n",
1154 				    ifp->if_xname);
1155 #endif
1156 			return ENOBUFS;
1157 		}
1158 		cd->se_tx_mbuf[i] = m;
1159 		rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_tx_map[i],
1160 		    m, BUS_DMA_NOWAIT);
1161 		if (rc != 0)
1162 			return ENOBUFS;
1163 		KASSERT(cd->se_tx_map[i]->dm_nsegs == 1);
1164 		bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,
1165 		    cd->se_tx_map[i]->dm_mapsize, BUS_DMASYNC_PREWRITE);
1166 
1167 		desc = &ld->se_tx_ring[i];
1168 		desc->se_sts_size = htole32(cd->se_tx_map[i]->dm_segs->ds_len);
1169 		desc->se_ptr =
1170 		    htole32((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr);
1171 		desc->se_flags = htole32(cd->se_tx_map[i]->dm_segs->ds_len);
1172 		if (i == SE_TX_RING_CNT - 1)
1173 			desc->se_flags |= htole32(RING_END);
1174 		desc->se_cmdsts = htole32(TDC_OWN | TDC_INTR | TDC_DEF |
1175 		    TDC_CRC | TDC_PAD | TDC_BST);
1176 		bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,
1177 		    i * sizeof(*desc), sizeof(*desc),
1178 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1179 
1180 		SE_INC(i, SE_TX_RING_CNT);
1181 		cnt++;
1182 	}
1183 
1184 	/* can't happen */
1185 	if (m != NULL)
1186 		return ENOBUFS;
1187 
1188 	cd->se_tx_cnt += cnt;
1189 	*txidx = i;
1190 
1191 	return 0;
1192 }
1193 
1194 /*
1195  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1196  * to the mbuf data regions directly in the transmit lists. We also save a
1197  * copy of the pointers since the transmit list fragment pointers are
1198  * physical addresses.
1199  */
1200 void
1201 se_start(struct ifnet *ifp)
1202 {
1203 	struct se_softc *sc = ifp->if_softc;
1204 	struct mbuf *m_head = NULL;
1205 	struct se_chain_data *cd = &sc->se_cdata;
1206 	uint i, queued = 0;
1207 
1208 	if ((sc->sc_flags & SE_FLAG_LINK) == 0 ||
1209 	    (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) {
1210 #ifdef SE_DEBUG
1211 		if (ifp->if_flags & IFF_DEBUG)
1212 			printf("%s: can't tx, flags 0x%x 0x%04x\n",
1213 			    ifp->if_xname, sc->sc_flags, (uint)ifp->if_flags);
1214 #endif
1215 		return;
1216 	}
1217 
1218 	i = cd->se_tx_prod;
1219 
1220 	while (cd->se_tx_mbuf[i] == NULL) {
1221 		IFQ_POLL(&ifp->if_snd, m_head);
1222 		if (m_head == NULL)
1223 			break;
1224 
1225 		if (se_encap(sc, m_head, &i) != 0) {
1226 			ifp->if_flags |= IFF_OACTIVE;
1227 			break;
1228 		}
1229 
1230 		/* now we are committed to transmit the packet */
1231 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1232 		queued++;
1233 
1234 		/*
1235 		 * If there's a BPF listener, bounce a copy of this frame
1236 		 * to him.
1237 		 */
1238 #if NBPFILTER > 0
1239 		if (ifp->if_bpf)
1240 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1241 #endif
1242 	}
1243 
1244 	if (queued > 0) {
1245 		/* Transmit */
1246 		cd->se_tx_prod = i;
1247 		CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL);
1248 		ifp->if_timer = 5;
1249 	}
1250 }
1251 
1252 int
1253 se_init(struct ifnet *ifp)
1254 {
1255 	struct se_softc *sc = ifp->if_softc;
1256 	uint16_t rxfilt;
1257 	int i;
1258 
1259 	splassert(IPL_NET);
1260 
1261 	/*
1262 	 * Cancel pending I/O and free all RX/TX buffers.
1263 	 */
1264 	se_stop(sc);
1265 	se_reset(sc);
1266 
1267 	/* Init circular RX list. */
1268 	if (se_list_rx_init(sc) == ENOBUFS) {
1269 		se_stop(sc);	/* XXX necessary? */
1270 		return ENOBUFS;
1271 	}
1272 
1273 	/* Init TX descriptors. */
1274 	se_list_tx_init(sc);
1275 
1276 	/*
1277 	 * Load the address of the RX and TX lists.
1278 	 */
1279 	CSR_WRITE_4(sc, TX_DESC,
1280 	    (uint32_t)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr);
1281 	CSR_WRITE_4(sc, RX_DESC,
1282 	    (uint32_t)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr);
1283 
1284 	CSR_WRITE_4(sc, TxMacControl, 0x60);
1285 	CSR_WRITE_4(sc, RxWakeOnLan, 0);
1286 	CSR_WRITE_4(sc, RxWakeOnLanData, 0);
1287 	CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN +
1288 	    SE_RX_PAD_BYTES);
1289 
1290 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1291 		CSR_WRITE_1(sc, RxMacAddr + i, sc->sc_ac.ac_enaddr[i]);
1292 	/* Configure RX MAC. */
1293 	rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB;
1294 	CSR_WRITE_2(sc, RxMacControl, rxfilt);
1295 
1296 	/* Program promiscuous mode and multicast filters. */
1297 	se_iff(sc);
1298 
1299 	/*
1300 	 * Clear and enable interrupts.
1301 	 */
1302 	CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF);
1303 	CSR_WRITE_4(sc, IntrMask, SE_INTRS);
1304 
1305 	/* Enable receiver and transmitter. */
1306 	CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB);
1307 	CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1308 
1309 	ifp->if_flags |= IFF_RUNNING;
1310 	ifp->if_flags &= ~IFF_OACTIVE;
1311 
1312 	sc->sc_flags &= ~SE_FLAG_LINK;
1313 	mii_mediachg(&sc->sc_mii);
1314 	timeout_add_sec(&sc->sc_tick_tmo, 1);
1315 
1316 	return 0;
1317 }
1318 
1319 /*
1320  * Set media options.
1321  */
1322 int
1323 se_ifmedia_upd(struct ifnet *ifp)
1324 {
1325 	struct se_softc *sc = ifp->if_softc;
1326 	struct mii_data *mii;
1327 
1328 	mii = &sc->sc_mii;
1329 	sc->sc_flags &= ~SE_FLAG_LINK;
1330 	if (mii->mii_instance) {
1331 		struct mii_softc *miisc;
1332 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1333 			mii_phy_reset(miisc);
1334 	}
1335 	return mii_mediachg(mii);
1336 }
1337 
1338 /*
1339  * Report current media status.
1340  */
1341 void
1342 se_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1343 {
1344 	struct se_softc *sc = ifp->if_softc;
1345 	struct mii_data *mii;
1346 
1347 	mii = &sc->sc_mii;
1348 	mii_pollstat(mii);
1349 	ifmr->ifm_active = mii->mii_media_active;
1350 	ifmr->ifm_status = mii->mii_media_status;
1351 }
1352 
1353 int
1354 se_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1355 {
1356 	struct se_softc *sc = ifp->if_softc;
1357 	struct ifreq *ifr = (struct ifreq *) data;
1358 	struct ifaddr *ifa = (struct ifaddr *)data;
1359 	int s, rc = 0;
1360 
1361 	s = splnet();
1362 
1363 	switch (command) {
1364 	case SIOCSIFADDR:
1365 		ifp->if_flags |= IFF_UP;
1366 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1367 			rc = se_init(ifp);
1368 		if (rc == 0) {
1369 			if (ifa->ifa_addr->sa_family == AF_INET)
1370 				arp_ifinit(&sc->sc_ac, ifa);
1371 		}
1372 		break;
1373 	case SIOCSIFFLAGS:
1374 		if (ifp->if_flags & IFF_UP) {
1375 			if (ifp->if_flags & IFF_RUNNING)
1376 				rc = ENETRESET;
1377 			else
1378 				rc = se_init(ifp);
1379 		} else {
1380 			if (ifp->if_flags & IFF_RUNNING)
1381 				se_stop(sc);
1382 		}
1383 		break;
1384 	case SIOCGIFMEDIA:
1385 	case SIOCSIFMEDIA:
1386 		rc = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1387 		break;
1388 	default:
1389 		rc = ether_ioctl(ifp, &sc->sc_ac, command, data);
1390 		break;
1391 	}
1392 
1393 	if (rc == ENETRESET) {
1394 		if (ifp->if_flags & IFF_RUNNING)
1395 			se_iff(sc);
1396 		rc = 0;
1397 	}
1398 
1399 	splx(s);
1400 	return rc;
1401 }
1402 
1403 void
1404 se_watchdog(struct ifnet *ifp)
1405 {
1406 	struct se_softc *sc = ifp->if_softc;
1407 	int s;
1408 
1409 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1410 	ifp->if_oerrors++;
1411 
1412 	s = splnet();
1413 	se_init(ifp);
1414 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1415 		se_start(ifp);
1416 	splx(s);
1417 }
1418 
1419 /*
1420  * Stop the adapter and free any mbufs allocated to the
1421  * RX and TX lists.
1422  */
1423 void
1424 se_stop(struct se_softc *sc)
1425 {
1426 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1427 
1428 	ifp->if_timer = 0;
1429 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1430 	timeout_del(&sc->sc_tick_tmo);
1431 	mii_down(&sc->sc_mii);
1432 
1433 	CSR_WRITE_4(sc, IntrMask, 0);
1434 	CSR_READ_4(sc, IntrMask);
1435 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1436 	/* Stop TX/RX MAC. */
1437 	CSR_WRITE_4(sc, TX_CTL, 0x1a00);
1438 	CSR_WRITE_4(sc, RX_CTL, 0x1a00);
1439 	/* XXX Can we assume active DMA cycles gone? */
1440 	DELAY(2000);
1441 	CSR_WRITE_4(sc, IntrMask, 0);
1442 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1443 
1444 	sc->sc_flags &= ~SE_FLAG_LINK;
1445 	se_list_rx_free(sc);
1446 	se_list_tx_free(sc);
1447 }
1448