xref: /openbsd/sys/dev/pci/if_xge.c (revision 0f9891f1)
1 /*	$OpenBSD: if_xge.c,v 1.84 2024/05/24 06:02:57 jsg Exp $	*/
2 /*	$NetBSD: if_xge.c,v 1.1 2005/09/09 10:30:27 ragge Exp $	*/
3 
4 /*
5  * Copyright (c) 2004, SUNET, Swedish University Computer Network.
6  * All rights reserved.
7  *
8  * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed for the NetBSD Project by
21  *      SUNET, Swedish University Computer Network.
22  * 4. The name of SUNET may not be used to endorse or promote products
23  *    derived from this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL SUNET
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Driver for the Neterion Xframe Ten Gigabit Ethernet controller.
40  */
41 
42 #include "bpfilter.h"
43 #include "vlan.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sockio.h>
48 #include <sys/mbuf.h>
49 #include <sys/device.h>
50 #include <sys/endian.h>
51 
52 #include <net/if.h>
53 #include <net/if_media.h>
54 
55 #include <netinet/in.h>
56 #include <netinet/if_ether.h>
57 
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #endif
61 
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64 
65 #include <dev/pci/pcivar.h>
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcidevs.h>
68 
69 #include <dev/pci/if_xgereg.h>
70 
71 /* Xframe chipset revisions */
72 #define XGE_TYPE_XENA		1	/* Xframe */
73 #define XGE_TYPE_HERC		2	/* Xframe-II */
74 
75 #define XGE_PCISIZE_XENA	26
76 #define XGE_PCISIZE_HERC	64
77 
78 /*
79  * Some tunable constants, tune with care!
80  */
81 #define RX_MODE		RX_MODE_1  /* Receive mode (buffer usage, see below) */
82 #define NRXDESCS	1016	   /* # of receive descriptors (requested) */
83 #define NTXDESCS	2048	   /* Number of transmit descriptors */
84 #define NTXFRAGS	100	   /* Max fragments per packet */
85 
86 /*
87  * Receive buffer modes; 1, 3 or 5 buffers.
88  */
89 #define RX_MODE_1 1
90 #define RX_MODE_3 3
91 #define RX_MODE_5 5
92 
93 /*
94  * Use clever macros to avoid a bunch of #ifdef's.
95  */
96 #define XCONCAT3(x,y,z) x ## y ## z
97 #define CONCAT3(x,y,z) XCONCAT3(x,y,z)
98 #define NDESC_BUFMODE CONCAT3(NDESC_,RX_MODE,BUFMODE)
99 #define rxd_4k CONCAT3(rxd,RX_MODE,_4k)
100 /* XXX */
101 #if 0
102 #define rxdesc ___CONCAT(rxd,RX_MODE)
103 #endif
104 #define rxdesc rxd1
105 
106 #define NEXTTX(x)	(((x)+1) % NTXDESCS)
107 #define NRXFRAGS	RX_MODE /* hardware imposed frags */
108 #define NRXPAGES	((NRXDESCS/NDESC_BUFMODE)+1)
109 #define NRXREAL		(NRXPAGES*NDESC_BUFMODE)
110 #define RXMAPSZ		(NRXPAGES*PAGE_SIZE)
111 
112 /*
113  * Magic to fix a bug when the MAC address cannot be read correctly.
114  * This came from the Linux driver.
115  */
116 static const uint64_t xge_fix_mac[] = {
117 	0x0060000000000000ULL, 0x0060600000000000ULL,
118 	0x0040600000000000ULL, 0x0000600000000000ULL,
119 	0x0020600000000000ULL, 0x0060600000000000ULL,
120 	0x0020600000000000ULL, 0x0060600000000000ULL,
121 	0x0020600000000000ULL, 0x0060600000000000ULL,
122 	0x0020600000000000ULL, 0x0060600000000000ULL,
123 	0x0020600000000000ULL, 0x0060600000000000ULL,
124 	0x0020600000000000ULL, 0x0060600000000000ULL,
125 	0x0020600000000000ULL, 0x0060600000000000ULL,
126 	0x0020600000000000ULL, 0x0060600000000000ULL,
127 	0x0020600000000000ULL, 0x0060600000000000ULL,
128 	0x0020600000000000ULL, 0x0060600000000000ULL,
129 	0x0020600000000000ULL, 0x0000600000000000ULL,
130 	0x0040600000000000ULL, 0x0060600000000000ULL,
131 };
132 
133 /*
134  * Constants to be programmed into Hercules's registers, to configure
135  * the XGXS transceiver.
136  */
137 static const uint64_t xge_herc_dtx_cfg[] = {
138 	0x8000051536750000ULL, 0x80000515367500E0ULL,
139 	0x8000051536750004ULL, 0x80000515367500E4ULL,
140 
141 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
142 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
143 
144 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
145 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
146 
147 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
148 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
149 };
150 
151 static const uint64_t xge_xena_dtx_cfg[] = {
152 	0x8000051500000000ULL, 0x80000515000000E0ULL,
153 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
154 
155 	0x8001051500000000ULL, 0x80010515000000E0ULL,
156 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
157 
158 	0x8002051500000000ULL, 0x80020515000000E0ULL,
159 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
160  };
161 
162 struct xge_softc {
163 	struct device		sc_dev;
164 	struct arpcom		sc_arpcom;
165 	struct ifmedia		xena_media;
166 
167 	void			*sc_ih;
168 
169 	bus_dma_tag_t		sc_dmat;
170 	bus_space_tag_t		sc_st;
171 	bus_space_handle_t	sc_sh;
172 	bus_space_tag_t		sc_txt;
173 	bus_space_handle_t	sc_txh;
174 
175 	pcireg_t		sc_pciregs[16];
176 
177 	int			xge_type; /* chip type */
178 	int			xge_if_flags;
179 
180 	/* Transmit structures */
181 	struct txd		*sc_txd[NTXDESCS]; /* transmit frags array */
182 	bus_addr_t		sc_txdp[NTXDESCS]; /* dva of transmit frags */
183 	bus_dmamap_t		sc_txm[NTXDESCS]; /* transmit frags map */
184 	struct mbuf		*sc_txb[NTXDESCS]; /* transmit mbuf pointer */
185 	int			sc_nexttx, sc_lasttx;
186 	bus_dmamap_t		sc_txmap; /* transmit descriptor map */
187 
188 	/* Receive data */
189 	bus_dmamap_t		sc_rxmap; /* receive descriptor map */
190 	struct rxd_4k		*sc_rxd_4k[NRXPAGES]; /* receive desc pages */
191 	bus_dmamap_t		sc_rxm[NRXREAL]; /* receive buffer map */
192 	struct mbuf		*sc_rxb[NRXREAL]; /* mbufs on rx descriptors */
193 	int			sc_nextrx; /* next descriptor to check */
194 };
195 
196 #ifdef XGE_DEBUG
197 #define DPRINTF(x)	do { if (xgedebug) printf x ; } while (0)
198 #define DPRINTFN(n,x)	do { if (xgedebug >= (n)) printf x ; } while (0)
199 int	xgedebug = 0;
200 #else
201 #define DPRINTF(x)
202 #define DPRINTFN(n,x)
203 #endif
204 
205 int xge_match(struct device *, void *, void *);
206 void xge_attach(struct device *, struct device *, void *);
207 int xge_alloc_txmem(struct xge_softc *);
208 int xge_alloc_rxmem(struct xge_softc *);
209 void xge_start(struct ifnet *);
210 void xge_stop(struct ifnet *, int);
211 int xge_add_rxbuf(struct xge_softc *, int);
212 void xge_setmulti(struct xge_softc *);
213 void xge_setpromisc(struct xge_softc *);
214 int xge_setup_xgxs_xena(struct xge_softc *);
215 int xge_setup_xgxs_herc(struct xge_softc *);
216 int xge_ioctl(struct ifnet *, u_long, caddr_t);
217 int xge_init(struct ifnet *);
218 void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
219 int xge_xgmii_mediachange(struct ifnet *);
220 void xge_enable(struct xge_softc *);
221 int xge_intr(void  *);
222 
223 /*
224  * Helpers to address registers.
225  */
226 #define PIF_WCSR(csr, val)	pif_wcsr(sc, csr, val)
227 #define PIF_RCSR(csr)		pif_rcsr(sc, csr)
228 #define TXP_WCSR(csr, val)	txp_wcsr(sc, csr, val)
229 #define PIF_WKEY(csr, val)	pif_wkey(sc, csr, val)
230 
231 static inline void
pif_wcsr(struct xge_softc * sc,bus_size_t csr,uint64_t val)232 pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
233 {
234 #if defined(__LP64__)
235 	bus_space_write_raw_8(sc->sc_st, sc->sc_sh, csr, val);
236 #else
237 	uint32_t lval, hval;
238 
239 	lval = val&0xffffffff;
240 	hval = val>>32;
241 
242 #if BYTE_ORDER == LITTLE_ENDIAN
243 	bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, lval);
244 	bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, hval);
245 #else
246 	bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, lval);
247 	bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, hval);
248 #endif
249 #endif
250 }
251 
252 static inline uint64_t
pif_rcsr(struct xge_softc * sc,bus_size_t csr)253 pif_rcsr(struct xge_softc *sc, bus_size_t csr)
254 {
255 	uint64_t val;
256 #if defined(__LP64__)
257 	val = bus_space_read_raw_8(sc->sc_st, sc->sc_sh, csr);
258 #else
259 	uint64_t val2;
260 
261 	val = bus_space_read_raw_4(sc->sc_st, sc->sc_sh, csr);
262 	val2 = bus_space_read_raw_4(sc->sc_st, sc->sc_sh, csr+4);
263 #if BYTE_ORDER == LITTLE_ENDIAN
264 	val |= (val2 << 32);
265 #else
266 	val = (val << 32 | val2);
267 #endif
268 #endif
269 	return (val);
270 }
271 
272 static inline void
txp_wcsr(struct xge_softc * sc,bus_size_t csr,uint64_t val)273 txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
274 {
275 #if defined(__LP64__)
276 	bus_space_write_raw_8(sc->sc_txt, sc->sc_txh, csr, val);
277 #else
278 	uint32_t lval, hval;
279 
280 	lval = val&0xffffffff;
281 	hval = val>>32;
282 
283 #if BYTE_ORDER == LITTLE_ENDIAN
284 	bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr, lval);
285 	bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr+4, hval);
286 #else
287 	bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr, hval);
288 	bus_space_write_raw_4(sc->sc_txt, sc->sc_txh, csr+4, lval);
289 #endif
290 #endif
291 }
292 
293 static inline void
pif_wkey(struct xge_softc * sc,bus_size_t csr,uint64_t val)294 pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val)
295 {
296 #if defined(__LP64__)
297 	if (sc->xge_type == XGE_TYPE_XENA)
298 		PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
299 
300 	bus_space_write_raw_8(sc->sc_st, sc->sc_sh, csr, val);
301 #else
302 	uint32_t lval, hval;
303 
304 	lval = val&0xffffffff;
305 	hval = val>>32;
306 
307 	if (sc->xge_type == XGE_TYPE_XENA)
308 		PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
309 
310 #if BYTE_ORDER == LITTLE_ENDIAN
311 	bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, lval);
312 #else
313 	bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr, hval);
314 #endif
315 
316 	if (sc->xge_type == XGE_TYPE_XENA)
317 		PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
318 #if BYTE_ORDER == LITTLE_ENDIAN
319 	bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, hval);
320 #else
321 	bus_space_write_raw_4(sc->sc_st, sc->sc_sh, csr+4, lval);
322 #endif
323 #endif
324 }
325 
326 const struct cfattach xge_ca = {
327 	sizeof(struct xge_softc), xge_match, xge_attach
328 };
329 
330 struct cfdriver xge_cd = {
331 	NULL, "xge", DV_IFNET
332 };
333 
334 #define XNAME sc->sc_dev.dv_xname
335 
336 #define XGE_RXSYNC(desc, what) \
337 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \
338 	(desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \
339 	(desc%NDESC_BUFMODE), sizeof(struct rxdesc), what)
340 #define XGE_RXD(desc)	&sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \
341 	r4_rxd[desc%NDESC_BUFMODE]
342 
343 /*
344  * Non-tunable constants.
345  */
346 #define XGE_MAX_FRAMELEN	9622
347 #define XGE_MAX_MTU		(XGE_MAX_FRAMELEN - ETHER_HDR_LEN - \
348 				 ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
349 
350 const struct pci_matchid xge_devices[] = {
351 	{ PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME },
352 	{ PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME_2 }
353 };
354 
355 int
xge_match(struct device * parent,void * match,void * aux)356 xge_match(struct device *parent, void *match, void *aux)
357 {
358 	return (pci_matchbyid((struct pci_attach_args *)aux, xge_devices,
359 	    nitems(xge_devices)));
360 }
361 
362 void
xge_attach(struct device * parent,struct device * self,void * aux)363 xge_attach(struct device *parent, struct device *self, void *aux)
364 {
365 	struct pci_attach_args *pa = aux;
366 	struct xge_softc *sc;
367 	struct ifnet *ifp;
368 	pcireg_t memtype;
369 	pci_intr_handle_t ih;
370 	const char *intrstr = NULL;
371 	pci_chipset_tag_t pc = pa->pa_pc;
372 	uint8_t enaddr[ETHER_ADDR_LEN];
373 	uint64_t val;
374 	int i;
375 
376 	sc = (struct xge_softc *)self;
377 
378 	sc->sc_dmat = pa->pa_dmat;
379 
380 	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETERION_XFRAME)
381 		sc->xge_type = XGE_TYPE_XENA;
382 	else
383 		sc->xge_type = XGE_TYPE_HERC;
384 
385 	/* Get BAR0 address */
386 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR);
387 	if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0,
388 	    &sc->sc_st, &sc->sc_sh, 0, 0, 0)) {
389 		printf(": unable to map PIF BAR registers\n");
390 		return;
391 	}
392 
393 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR);
394 	if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0,
395 	    &sc->sc_txt, &sc->sc_txh, 0, 0, 0)) {
396 		printf(": unable to map TXP BAR registers\n");
397 		return;
398 	}
399 
400 	if (sc->xge_type == XGE_TYPE_XENA) {
401 		/* Save PCI config space */
402 		for (i = 0; i < XGE_PCISIZE_XENA; i += 4)
403 			sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
404 	}
405 
406 #if BYTE_ORDER == LITTLE_ENDIAN
407 	val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
408 	val &= ~(TxF_R_SE|RxF_W_SE);
409 	PIF_WCSR(SWAPPER_CTRL, val);
410 	PIF_WCSR(SWAPPER_CTRL, val);
411 #endif
412 	if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
413 		printf(": failed configuring endian (read), %llx != %llx!\n",
414 		    (unsigned long long)val, SWAPPER_MAGIC);
415 	}
416 
417 	PIF_WCSR(XMSI_ADDRESS, SWAPPER_MAGIC);
418 	if ((val = PIF_RCSR(XMSI_ADDRESS)) != SWAPPER_MAGIC) {
419 		printf(": failed configuring endian (write), %llx != %llx!\n",
420 			(unsigned long long)val, SWAPPER_MAGIC);
421 	}
422 
423 	/*
424 	 * Fix for all "FFs" MAC address problems observed on
425 	 * Alpha platforms. Not needed for Herc.
426 	 */
427 	if (sc->xge_type == XGE_TYPE_XENA) {
428 		/*
429 		 * The MAC addr may be all FF's, which is not good.
430 		 * Resolve it by writing some magics to GPIO_CONTROL and
431 		 * force a chip reset to read in the serial eeprom again.
432 		 */
433 		for (i = 0; i < nitems(xge_fix_mac); i++) {
434 			PIF_WCSR(GPIO_CONTROL, xge_fix_mac[i]);
435 			PIF_RCSR(GPIO_CONTROL);
436 		}
437 
438 		/*
439 		 * Reset the chip and restore the PCI registers.
440 		 */
441 		PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL);
442 		DELAY(500000);
443 		for (i = 0; i < XGE_PCISIZE_XENA; i += 4)
444 			pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]);
445 
446 		/*
447 		 * Restore the byte order registers.
448 		 */
449 #if BYTE_ORDER == LITTLE_ENDIAN
450 		val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
451 		val &= ~(TxF_R_SE|RxF_W_SE);
452 		PIF_WCSR(SWAPPER_CTRL, val);
453 		PIF_WCSR(SWAPPER_CTRL, val);
454 #endif
455 
456 		if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
457 			printf(": failed configuring endian2 (read), %llx != %llx!\n",
458 			    (unsigned long long)val, SWAPPER_MAGIC);
459 			return;
460 		}
461 
462 		PIF_WCSR(XMSI_ADDRESS, SWAPPER_MAGIC);
463 		if ((val = PIF_RCSR(XMSI_ADDRESS)) != SWAPPER_MAGIC) {
464 			printf(": failed configuring endian2 (write), %llx != %llx!\n",
465 			    (unsigned long long)val, SWAPPER_MAGIC);
466 			return;
467 		}
468 	}
469 
470 	/*
471 	 * XGXS initialization.
472 	 */
473 
474 	/*
475 	 * For Herc, bring EOI out of reset before XGXS.
476 	 */
477 	if (sc->xge_type == XGE_TYPE_HERC) {
478 		val = PIF_RCSR(SW_RESET);
479 		val &= 0xffff00ffffffffffULL;
480 		PIF_WCSR(SW_RESET,val);
481 		delay(1000*1000);	/* wait for 1 sec */
482 	}
483 
484 	/* 29, Bring adapter out of reset */
485 	val = PIF_RCSR(SW_RESET);
486 	val &= 0xffffff00ffffffffULL;
487 	PIF_WCSR(SW_RESET, val);
488 	DELAY(500000);
489 
490 	/* Ensure that it's safe to access registers by checking
491 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
492 	 */
493 	if (sc->xge_type == XGE_TYPE_HERC){
494 		for (i = 0; i < 50; i++) {
495 			val = PIF_RCSR(ADAPTER_STATUS);
496 			if (!(val & RIC_RUNNING))
497 				break;
498 			delay(20*1000);
499 		}
500 
501 		if (i == 50) {
502 			printf(": not safe to access registers\n");
503 			return;
504 		}
505 	}
506 
507 	/* 30, configure XGXS transceiver */
508 	if (sc->xge_type == XGE_TYPE_XENA)
509 		xge_setup_xgxs_xena(sc);
510 	else if(sc->xge_type == XGE_TYPE_HERC)
511 		xge_setup_xgxs_herc(sc);
512 
513 	/* 33, program MAC address (not needed here) */
514 	/* Get ethernet address */
515 	PIF_WCSR(RMAC_ADDR_CMD_MEM,
516 	    RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0));
517 	while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
518 		;
519 	val = PIF_RCSR(RMAC_ADDR_DATA0_MEM);
520 	for (i = 0; i < ETHER_ADDR_LEN; i++)
521 		enaddr[i] = (uint8_t)(val >> (56 - (8*i)));
522 
523 	/*
524 	 * Get memory for transmit descriptor lists.
525 	 */
526 	if (xge_alloc_txmem(sc)) {
527 		printf(": failed allocating txmem.\n");
528 		return;
529 	}
530 
531 	/* 9 and 10 - set FIFO number/prio */
532 	PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS));
533 	PIF_WCSR(TX_FIFO_P1, 0ULL);
534 	PIF_WCSR(TX_FIFO_P2, 0ULL);
535 	PIF_WCSR(TX_FIFO_P3, 0ULL);
536 
537 	/* 11, XXX set round-robin prio? */
538 
539 	/* 12, enable transmit FIFO */
540 	val = PIF_RCSR(TX_FIFO_P0);
541 	val |= TX_FIFO_ENABLE;
542 	PIF_WCSR(TX_FIFO_P0, val);
543 
544 	/* 13, disable some error checks */
545 	PIF_WCSR(TX_PA_CFG,
546 	    TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE);
547 
548 	/* Create transmit DMA maps */
549 	for (i = 0; i < NTXDESCS; i++) {
550 		if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN,
551 		    NTXFRAGS, XGE_MAX_FRAMELEN, 0, BUS_DMA_NOWAIT,
552 		    &sc->sc_txm[i])) {
553 			printf(": cannot create TX DMA maps\n");
554 			return;
555 		}
556 	}
557 
558 	sc->sc_lasttx = NTXDESCS-1;
559 
560 	/*
561 	 * RxDMA initialization.
562 	 * Only use one out of 8 possible receive queues.
563 	 */
564 	/* allocate rx descriptor memory */
565 	if (xge_alloc_rxmem(sc)) {
566 		printf(": failed allocating rxmem\n");
567 		return;
568 	}
569 
570 	/* Create receive buffer DMA maps */
571 	for (i = 0; i < NRXREAL; i++) {
572 		if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN,
573 		    NRXFRAGS, XGE_MAX_FRAMELEN, 0, BUS_DMA_NOWAIT,
574 		    &sc->sc_rxm[i])) {
575 			printf(": cannot create RX DMA maps\n");
576 			return;
577 		}
578 	}
579 
580 	/* allocate mbufs to receive descriptors */
581 	for (i = 0; i < NRXREAL; i++)
582 		if (xge_add_rxbuf(sc, i))
583 			panic("out of mbufs too early");
584 
585 	/* 14, setup receive ring priority */
586 	PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL); /* only use one ring */
587 
588 	/* 15, setup receive ring round-robin calendar */
589 	PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL); /* only use one ring */
590 	PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL);
591 	PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL);
592 	PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL);
593 	PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL);
594 
595 	/* 16, write receive ring start address */
596 	PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr);
597 	/* PRC_RXD0_[1-7] are not used */
598 
599 	/* 17, Setup alarm registers */
600 	PIF_WCSR(PRC_ALARM_ACTION, 0ULL); /* Default everything to retry */
601 
602 	/* 18, init receive ring controller */
603 #if RX_MODE == RX_MODE_1
604 	val = RING_MODE_1;
605 #elif RX_MODE == RX_MODE_3
606 	val = RING_MODE_3;
607 #else /* RX_MODE == RX_MODE_5 */
608 	val = RING_MODE_5;
609 #endif
610 	PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val);
611 	/* leave 1-7 disabled */
612 	/* XXXX snoop configuration? */
613 
614 	/* 19, set chip memory assigned to the queue */
615 	if (sc->xge_type == XGE_TYPE_XENA) {
616 		/* all 64M to queue 0 */
617 		PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64));
618 	} else {
619 		/* all 32M to queue 0 */
620 		PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 32));
621 	}
622 
623 	/* 20, setup RLDRAM parameters */
624 	/* do not touch it for now */
625 
626 	/* 21, setup pause frame thresholds */
627 	/* so not touch the defaults */
628 	/* XXX - must 0xff be written as stated in the manual? */
629 
630 	/* 22, configure RED */
631 	/* we do not want to drop packets, so ignore */
632 
633 	/* 23, initiate RLDRAM */
634 	val = PIF_RCSR(MC_RLDRAM_MRS);
635 	val |= MC_QUEUE_SIZE_ENABLE|MC_RLDRAM_MRS_ENABLE;
636 	PIF_WCSR(MC_RLDRAM_MRS, val);
637 	DELAY(1000);
638 
639 	/*
640 	 * Setup interrupt policies.
641 	 */
642 	/* 40, Transmit interrupts */
643 	PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC |
644 	    TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48));
645 	PIF_WCSR(TTI_DATA2_MEM,
646 	    TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512));
647 	PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE);
648 	while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE)
649 		;
650 
651 	/* 41, Receive interrupts */
652 	PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC |
653 	    RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50));
654 	PIF_WCSR(RTI_DATA2_MEM,
655 	    RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512));
656 	PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE);
657 	while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE)
658 		;
659 
660 	/*
661 	 * Setup media stuff.
662 	 */
663 	ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange,
664 	    xge_ifmedia_status);
665 	ifmedia_add(&sc->xena_media, IFM_ETHER|IFM_10G_SR, 0, NULL);
666 	ifmedia_set(&sc->xena_media, IFM_ETHER|IFM_10G_SR);
667 
668 	ifp = &sc->sc_arpcom.ac_if;
669 	strlcpy(ifp->if_xname, XNAME, IFNAMSIZ);
670 	memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
671 	ifp->if_baudrate = IF_Gbps(10);
672 	ifp->if_softc = sc;
673 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
674 	ifp->if_ioctl = xge_ioctl;
675 	ifp->if_start = xge_start;
676 	ifp->if_hardmtu = XGE_MAX_MTU;
677 	ifq_init_maxlen(&ifp->if_snd, NTXDESCS - 1);
678 
679 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
680 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
681 
682 #if NVLAN > 0
683 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
684 #endif
685 
686 	/*
687 	 * Attach the interface.
688 	 */
689 	if_attach(ifp);
690 	ether_ifattach(ifp);
691 
692 	/*
693 	 * Setup interrupt vector before initializing.
694 	 */
695 	if (pci_intr_map(pa, &ih)) {
696 		printf(": unable to map interrupt\n");
697 		return;
698 	}
699 	intrstr = pci_intr_string(pc, ih);
700 	if ((sc->sc_ih =
701 	    pci_intr_establish(pc, ih, IPL_NET, xge_intr, sc, XNAME)) == NULL) {
702 		printf(": unable to establish interrupt at %s\n",
703 		    intrstr ? intrstr : "<unknown>");
704 		return;
705 	    }
706 	printf(": %s, address %s\n", intrstr, ether_sprintf(enaddr));
707 }
708 
709 void
xge_ifmedia_status(struct ifnet * ifp,struct ifmediareq * ifmr)710 xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
711 {
712 	struct xge_softc *sc = ifp->if_softc;
713 	uint64_t reg;
714 
715 	ifmr->ifm_status = IFM_AVALID;
716 	ifmr->ifm_active = IFM_ETHER|IFM_10G_SR;
717 
718 	reg = PIF_RCSR(ADAPTER_STATUS);
719 	if ((reg & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
720 		ifmr->ifm_status |= IFM_ACTIVE;
721 }
722 
723 int
xge_xgmii_mediachange(struct ifnet * ifp)724 xge_xgmii_mediachange(struct ifnet *ifp)
725 {
726 	return (0);
727 }
728 
729 void
xge_enable(struct xge_softc * sc)730 xge_enable(struct xge_softc *sc)
731 {
732 	uint64_t val;
733 
734 	/* 2, enable adapter */
735 	val = PIF_RCSR(ADAPTER_CONTROL);
736 	val |= ADAPTER_EN;
737 	PIF_WCSR(ADAPTER_CONTROL, val);
738 
739 	/* 3, light the card enable led */
740 	val = PIF_RCSR(ADAPTER_CONTROL);
741 	val |= LED_ON;
742 	PIF_WCSR(ADAPTER_CONTROL, val);
743 #ifdef XGE_DEBUG
744 	printf("%s: link up\n", XNAME);
745 #endif
746 }
747 
748 int
xge_init(struct ifnet * ifp)749 xge_init(struct ifnet *ifp)
750 {
751 	struct xge_softc *sc = ifp->if_softc;
752 	uint64_t val;
753 	int s;
754 
755 	s = splnet();
756 
757 	/*
758 	 * Cancel any pending I/O
759 	 */
760 	xge_stop(ifp, 0);
761 
762 	/* 31+32, setup MAC config */
763 	PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS|
764 	    RMAC_BCAST_EN|RMAC_DISCARD_PFRM);
765 
766 	DELAY(1000);
767 
768 	/* 54, ensure that the adapter is 'quiescent' */
769 	val = PIF_RCSR(ADAPTER_STATUS);
770 	if ((val & QUIESCENT) != QUIESCENT) {
771 #if 0
772 		char buf[200];
773 #endif
774 		printf("%s: adapter not quiescent, aborting\n", XNAME);
775 		val = (val & QUIESCENT) ^ QUIESCENT;
776 #if 0
777 		bitmask_snprintf(val, QUIESCENT_BMSK, buf, sizeof buf);
778 		printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf);
779 #endif
780 		splx(s);
781 		return (1);
782 	}
783 
784 	if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) {
785 		/* disable VLAN tag stripping */
786 		val = PIF_RCSR(RX_PA_CFG);
787 		val &= ~STRIP_VLAN_TAG;
788 		PIF_WCSR(RX_PA_CFG, val);
789 	}
790 
791 	/* set MRU */
792 	PIF_WCSR(RMAC_MAX_PYLD_LEN, RMAC_PYLD_LEN(XGE_MAX_FRAMELEN));
793 
794 	/* 56, enable the transmit laser */
795 	val = PIF_RCSR(ADAPTER_CONTROL);
796 	val |= EOI_TX_ON;
797 	PIF_WCSR(ADAPTER_CONTROL, val);
798 
799 	xge_enable(sc);
800 
801 	/*
802 	 * Enable all interrupts
803 	 */
804 	PIF_WCSR(TX_TRAFFIC_MASK, 0);
805 	PIF_WCSR(RX_TRAFFIC_MASK, 0);
806 	PIF_WCSR(TXPIC_INT_MASK, 0);
807 	PIF_WCSR(RXPIC_INT_MASK, 0);
808 
809 	PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT); /* only from RMAC */
810 	PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
811 	PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT);
812 	PIF_WCSR(GENERAL_INT_MASK, 0);
813 
814 	xge_setpromisc(sc);
815 
816 	xge_setmulti(sc);
817 
818 	/* Done... */
819 	ifp->if_flags |= IFF_RUNNING;
820 	ifq_clr_oactive(&ifp->if_snd);
821 
822 	splx(s);
823 
824 	return (0);
825 }
826 
827 void
xge_stop(struct ifnet * ifp,int disable)828 xge_stop(struct ifnet *ifp, int disable)
829 {
830 	struct xge_softc *sc = ifp->if_softc;
831 	uint64_t val;
832 
833 	ifp->if_flags &= ~IFF_RUNNING;
834 	ifq_clr_oactive(&ifp->if_snd);
835 
836 	val = PIF_RCSR(ADAPTER_CONTROL);
837 	val &= ~ADAPTER_EN;
838 	PIF_WCSR(ADAPTER_CONTROL, val);
839 
840 	while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
841 		;
842 }
843 
844 int
xge_intr(void * pv)845 xge_intr(void *pv)
846 {
847 	struct xge_softc *sc = pv;
848 	struct txd *txd;
849 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
850 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
851 	bus_dmamap_t dmp;
852 	uint64_t val;
853 	int i, lasttx, plen;
854 
855 	val = PIF_RCSR(GENERAL_INT_STATUS);
856 	if (val == 0)
857 		return (0); /* no interrupt here */
858 
859 	PIF_WCSR(GENERAL_INT_STATUS, val);
860 
861 	if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) {
862 		/* Wait for quiescence */
863 #ifdef XGE_DEBUG
864 		printf("%s: link down\n", XNAME);
865 #endif
866 		while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
867 			;
868 		PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
869 
870 		val = PIF_RCSR(ADAPTER_STATUS);
871 		if ((val & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
872 			xge_enable(sc); /* Only if link restored */
873 	}
874 
875 	if ((val = PIF_RCSR(TX_TRAFFIC_INT)))
876 		PIF_WCSR(TX_TRAFFIC_INT, val); /* clear interrupt bits */
877 	/*
878 	 * Collect sent packets.
879 	 */
880 	lasttx = sc->sc_lasttx;
881 	while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) {
882 		txd = sc->sc_txd[i];
883 		dmp = sc->sc_txm[i];
884 
885 		bus_dmamap_sync(sc->sc_dmat, dmp, 0,
886 		    dmp->dm_mapsize,
887 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
888 
889 		if (txd->txd_control1 & TXD_CTL1_OWN) {
890 			bus_dmamap_sync(sc->sc_dmat, dmp, 0,
891 			    dmp->dm_mapsize, BUS_DMASYNC_PREREAD);
892 			break;
893 		}
894 		bus_dmamap_unload(sc->sc_dmat, dmp);
895 		m_freem(sc->sc_txb[i]);
896 		sc->sc_lasttx = i;
897 	}
898 
899 	if (sc->sc_lasttx != lasttx)
900 		ifq_clr_oactive(&ifp->if_snd);
901 
902 	/* Try to get more packets on the wire */
903 	xge_start(ifp);
904 
905 	/* clear interrupt bits */
906 	if ((val = PIF_RCSR(RX_TRAFFIC_INT)))
907 		PIF_WCSR(RX_TRAFFIC_INT, val);
908 
909 	for (;;) {
910 		struct rxdesc *rxd;
911 		struct mbuf *m;
912 
913 		XGE_RXSYNC(sc->sc_nextrx,
914 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
915 
916 		rxd = XGE_RXD(sc->sc_nextrx);
917 		if (rxd->rxd_control1 & RXD_CTL1_OWN) {
918 			XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD);
919 			break;
920 		}
921 
922 		/* got a packet */
923 		m = sc->sc_rxb[sc->sc_nextrx];
924 #if RX_MODE == RX_MODE_1
925 		plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
926 #elif RX_MODE == RX_MODE_3
927 #error Fix rxmodes in xge_intr
928 #elif RX_MODE == RX_MODE_5
929 		plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
930 		plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2);
931 		plen += m->m_next->m_next->m_len =
932 		    RXD_CTL2_BUF2SIZ(rxd->rxd_control2);
933 		plen += m->m_next->m_next->m_next->m_len =
934 		    RXD_CTL3_BUF3SIZ(rxd->rxd_control3);
935 		plen += m->m_next->m_next->m_next->m_next->m_len =
936 		    RXD_CTL3_BUF4SIZ(rxd->rxd_control3);
937 #endif
938 		m->m_pkthdr.len = plen;
939 
940 		val = rxd->rxd_control1;
941 
942 		if (xge_add_rxbuf(sc, sc->sc_nextrx)) {
943 			/* Failed, recycle this mbuf */
944 #if RX_MODE == RX_MODE_1
945 			rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0);
946 			rxd->rxd_control1 = RXD_CTL1_OWN;
947 #elif RX_MODE == RX_MODE_3
948 #elif RX_MODE == RX_MODE_5
949 #endif
950 			XGE_RXSYNC(sc->sc_nextrx,
951 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
952 			ifp->if_ierrors++;
953 			break;
954 		}
955 
956 		if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_IPv4)
957 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
958 		if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP)
959 			m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
960 		if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP)
961 			m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
962 
963 #if NVLAN > 0
964 		if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_VLAN) {
965 			m->m_pkthdr.ether_vtag =
966 			    RXD_CTL2_VLANTAG(rxd->rxd_control2);
967 			m->m_flags |= M_VLANTAG;
968 		}
969 #endif
970 
971 		ml_enqueue(&ml, m);
972 
973 		if (++sc->sc_nextrx == NRXREAL)
974 			sc->sc_nextrx = 0;
975 	}
976 
977 	if_input(ifp, &ml);
978 
979 	return (1);
980 }
981 
982 int
xge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)983 xge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
984 {
985 	struct xge_softc *sc = ifp->if_softc;
986 	struct ifreq *ifr = (struct ifreq *) data;
987 	int s, error = 0;
988 
989 	s = splnet();
990 
991 	switch (cmd) {
992 	case SIOCSIFADDR:
993 		ifp->if_flags |= IFF_UP;
994 		if (!(ifp->if_flags & IFF_RUNNING))
995 			xge_init(ifp);
996 		break;
997 
998 	case SIOCSIFFLAGS:
999 		if (ifp->if_flags & IFF_UP) {
1000 			if (ifp->if_flags & IFF_RUNNING &&
1001 			    (ifp->if_flags ^ sc->xge_if_flags) &
1002 			     IFF_PROMISC) {
1003 				xge_setpromisc(sc);
1004 			} else {
1005 				if (!(ifp->if_flags & IFF_RUNNING))
1006 					xge_init(ifp);
1007 			}
1008 		} else {
1009 			if (ifp->if_flags & IFF_RUNNING)
1010 				xge_stop(ifp, 1);
1011 		}
1012 		sc->xge_if_flags = ifp->if_flags;
1013 		break;
1014 
1015 	case SIOCGIFMEDIA:
1016 	case SIOCSIFMEDIA:
1017 		error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd);
1018 		break;
1019 
1020 	default:
1021 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1022 	}
1023 
1024 	if (error == ENETRESET) {
1025 		if (ifp->if_flags & IFF_RUNNING)
1026 			xge_setmulti(sc);
1027 		error = 0;
1028 	}
1029 
1030 	splx(s);
1031 	return (error);
1032 }
1033 
1034 void
xge_setmulti(struct xge_softc * sc)1035 xge_setmulti(struct xge_softc *sc)
1036 {
1037 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1038 	struct arpcom *ac = &sc->sc_arpcom;
1039 	struct ether_multi *enm;
1040 	struct ether_multistep step;
1041 	int i, numaddr = 1; /* first slot used for card unicast address */
1042 	uint64_t val;
1043 
1044 	if (ac->ac_multirangecnt > 0)
1045 		goto allmulti;
1046 
1047 	ETHER_FIRST_MULTI(step, ac, enm);
1048 	while (enm != NULL) {
1049 		if (numaddr == MAX_MCAST_ADDR)
1050 			goto allmulti;
1051 		for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) {
1052 			val <<= 8;
1053 			val |= enm->enm_addrlo[i];
1054 		}
1055 		PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16);
1056 		PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
1057 		PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
1058 		    RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr));
1059 		while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
1060 			;
1061 		numaddr++;
1062 		ETHER_NEXT_MULTI(step, enm);
1063 	}
1064 	/* set the remaining entries to the broadcast address */
1065 	for (i = numaddr; i < MAX_MCAST_ADDR; i++) {
1066 		PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL);
1067 		PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
1068 		PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
1069 		    RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i));
1070 		while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
1071 			;
1072 	}
1073 	ifp->if_flags &= ~IFF_ALLMULTI;
1074 	return;
1075 
1076 allmulti:
1077 	/* Just receive everything with the multicast bit set */
1078 	ifp->if_flags |= IFF_ALLMULTI;
1079 	PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL);
1080 	PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL);
1081 	PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
1082 	    RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1));
1083 	while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
1084 		;
1085 }
1086 
1087 void
xge_setpromisc(struct xge_softc * sc)1088 xge_setpromisc(struct xge_softc *sc)
1089 {
1090 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1091 	uint64_t val;
1092 
1093 	val = PIF_RCSR(MAC_CFG);
1094 
1095 	if (ifp->if_flags & IFF_PROMISC)
1096 		val |= RMAC_PROM_EN;
1097 	else
1098 		val &= ~RMAC_PROM_EN;
1099 
1100 	PIF_WCSR(MAC_CFG, val);
1101 }
1102 
1103 void
xge_start(struct ifnet * ifp)1104 xge_start(struct ifnet *ifp)
1105 {
1106 	struct xge_softc *sc = ifp->if_softc;
1107 	struct txd *txd = NULL; /* XXX - gcc */
1108 	bus_dmamap_t dmp;
1109 	struct	mbuf *m;
1110 	uint64_t par, lcr;
1111 	int nexttx = 0, ntxd, i;
1112 
1113 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1114 		return;
1115 
1116 	par = lcr = 0;
1117 	for (;;) {
1118 		if (sc->sc_nexttx == sc->sc_lasttx) {
1119 			ifq_set_oactive(&ifp->if_snd);
1120 			break;	/* No more space */
1121 		}
1122 
1123 		m = ifq_dequeue(&ifp->if_snd);
1124 		if (m == NULL)
1125 			break;	/* out of packets */
1126 
1127 		nexttx = sc->sc_nexttx;
1128 		dmp = sc->sc_txm[nexttx];
1129 
1130 		switch (bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
1131 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
1132 		case 0:
1133 			break;
1134 		case EFBIG:
1135 			if (m_defrag(m, M_DONTWAIT) == 0 &&
1136 			    bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
1137 			    BUS_DMA_WRITE|BUS_DMA_NOWAIT) == 0)
1138 				break;
1139 		default:
1140 			m_freem(m);
1141 			continue;
1142 		}
1143 
1144 		bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
1145 		    BUS_DMASYNC_PREWRITE);
1146 
1147 		txd = sc->sc_txd[nexttx];
1148 		sc->sc_txb[nexttx] = m;
1149 		for (i = 0; i < dmp->dm_nsegs; i++) {
1150 			if (dmp->dm_segs[i].ds_len == 0)
1151 				continue;
1152 			txd->txd_control1 = dmp->dm_segs[i].ds_len;
1153 			txd->txd_control2 = 0;
1154 			txd->txd_bufaddr = dmp->dm_segs[i].ds_addr;
1155 			txd++;
1156 		}
1157 		ntxd = txd - sc->sc_txd[nexttx] - 1;
1158 		txd = sc->sc_txd[nexttx];
1159 		txd->txd_control1 |= TXD_CTL1_OWN|TXD_CTL1_GCF;
1160 		txd->txd_control2 = TXD_CTL2_UTIL;
1161 
1162 #if NVLAN > 0
1163 		if (m->m_flags & M_VLANTAG) {
1164 			txd->txd_control2 |= TXD_CTL2_VLANE;
1165 			txd->txd_control2 |=
1166 			    TXD_CTL2_VLANT(m->m_pkthdr.ether_vtag);
1167 		}
1168 #endif
1169 
1170 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1171 			txd->txd_control2 |= TXD_CTL2_CIPv4;
1172 		if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1173 			txd->txd_control2 |= TXD_CTL2_CTCP;
1174 		if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1175 			txd->txd_control2 |= TXD_CTL2_CUDP;
1176 
1177 		txd[ntxd].txd_control1 |= TXD_CTL1_GCL;
1178 
1179 		bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
1180 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1181 
1182 		par = sc->sc_txdp[nexttx];
1183 		lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST;
1184 		TXP_WCSR(TXDL_PAR, par);
1185 		TXP_WCSR(TXDL_LCR, lcr);
1186 
1187 #if NBPFILTER > 0
1188 		if (ifp->if_bpf)
1189 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1190 #endif /* NBPFILTER > 0 */
1191 
1192 		sc->sc_nexttx = NEXTTX(nexttx);
1193 	}
1194 }
1195 
1196 /*
1197  * Allocate DMA memory for transmit descriptor fragments.
1198  * Only one map is used for all descriptors.
1199  */
1200 int
xge_alloc_txmem(struct xge_softc * sc)1201 xge_alloc_txmem(struct xge_softc *sc)
1202 {
1203 	struct txd *txp;
1204 	bus_dma_segment_t seg;
1205 	bus_addr_t txdp;
1206 	caddr_t kva;
1207 	int i, rseg, state;
1208 
1209 #define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd))
1210 	state = 0;
1211 	if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0,
1212 	    &seg, 1, &rseg, BUS_DMA_NOWAIT))
1213 		goto err;
1214 	state++;
1215 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva,
1216 	    BUS_DMA_NOWAIT))
1217 		goto err;
1218 
1219 	state++;
1220 	if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0,
1221 	    BUS_DMA_NOWAIT, &sc->sc_txmap))
1222 		goto err;
1223 	state++;
1224 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap,
1225 	    kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT))
1226 		goto err;
1227 
1228 	/* setup transmit array pointers */
1229 	txp = (struct txd *)kva;
1230 	txdp = seg.ds_addr;
1231 	for (i = 0; i < NTXDESCS; i++) {
1232 		sc->sc_txd[i] = txp;
1233 		sc->sc_txdp[i] = txdp;
1234 		txp += NTXFRAGS;
1235 		txdp += (NTXFRAGS * sizeof(struct txd));
1236 	}
1237 
1238 	return (0);
1239 
1240 err:
1241 	if (state > 2)
1242 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
1243 	if (state > 1)
1244 		bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
1245 	if (state > 0)
1246 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1247 	return (ENOBUFS);
1248 }
1249 
1250 /*
1251  * Allocate DMA memory for receive descriptor,
1252  * only one map is used for all descriptors.
1253  * link receive descriptor pages together.
1254  */
1255 int
xge_alloc_rxmem(struct xge_softc * sc)1256 xge_alloc_rxmem(struct xge_softc *sc)
1257 {
1258 	struct rxd_4k *rxpp;
1259 	bus_dma_segment_t seg;
1260 	caddr_t kva;
1261 	int i, rseg, state;
1262 
1263 	/* sanity check */
1264 	if (sizeof(struct rxd_4k) != XGE_PAGE) {
1265 		printf("bad compiler struct alignment, %d != %d\n",
1266 		    (int)sizeof(struct rxd_4k), XGE_PAGE);
1267 		return (EINVAL);
1268 	}
1269 
1270 	state = 0;
1271 	if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0,
1272 	    &seg, 1, &rseg, BUS_DMA_NOWAIT))
1273 		goto err;
1274 	state++;
1275 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva,
1276 	    BUS_DMA_NOWAIT))
1277 		goto err;
1278 
1279 	state++;
1280 	if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0,
1281 	    BUS_DMA_NOWAIT, &sc->sc_rxmap))
1282 		goto err;
1283 	state++;
1284 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap,
1285 	    kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT))
1286 		goto err;
1287 
1288 	/* setup receive page link pointers */
1289 	for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) {
1290 		sc->sc_rxd_4k[i] = rxpp;
1291 		rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr +
1292 		    (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k);
1293 	}
1294 	sc->sc_rxd_4k[NRXPAGES-1]->r4_next =
1295 	    (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr;
1296 
1297 	return (0);
1298 
1299 err:
1300 	if (state > 2)
1301 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap);
1302 	if (state > 1)
1303 		bus_dmamem_unmap(sc->sc_dmat, kva, RXMAPSZ);
1304 	if (state > 0)
1305 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1306 	return (ENOBUFS);
1307 }
1308 
1309 
1310 /*
1311  * Add a new mbuf chain to descriptor id.
1312  */
1313 int
xge_add_rxbuf(struct xge_softc * sc,int id)1314 xge_add_rxbuf(struct xge_softc *sc, int id)
1315 {
1316 	struct rxdesc *rxd;
1317 	struct mbuf *m[5];
1318 	int page, desc, error;
1319 #if RX_MODE == RX_MODE_5
1320 	int i;
1321 #endif
1322 
1323 	page = id/NDESC_BUFMODE;
1324 	desc = id%NDESC_BUFMODE;
1325 
1326 	rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc];
1327 
1328 	/*
1329 	 * Allocate mbufs.
1330 	 * Currently five mbufs and two clusters are used,
1331 	 * the hardware will put (ethernet, ip, tcp/udp) headers in
1332 	 * their own buffer and the clusters are only used for data.
1333 	 */
1334 #if RX_MODE == RX_MODE_1
1335 	MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1336 	if (m[0] == NULL)
1337 		return (ENOBUFS);
1338 	MCLGETL(m[0], M_DONTWAIT, XGE_MAX_FRAMELEN + ETHER_ALIGN);
1339 	if ((m[0]->m_flags & M_EXT) == 0) {
1340 		m_freem(m[0]);
1341 		return (ENOBUFS);
1342 	}
1343 	m[0]->m_len = m[0]->m_pkthdr.len = XGE_MAX_FRAMELEN + ETHER_ALIGN;
1344 #elif RX_MODE == RX_MODE_3
1345 #error missing rxmode 3.
1346 #elif RX_MODE == RX_MODE_5
1347 	MGETHDR(m[0], M_DONTWAIT, MT_DATA);
1348 	for (i = 1; i < 5; i++) {
1349 		MGET(m[i], M_DONTWAIT, MT_DATA);
1350 	}
1351 	if (m[3])
1352 		MCLGET(m[3], M_DONTWAIT);
1353 	if (m[4])
1354 		MCLGET(m[4], M_DONTWAIT);
1355 	if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] ||
1356 	    ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) {
1357 		/* Out of something */
1358 		for (i = 0; i < 5; i++)
1359 			m_free(m[i]);
1360 		return (ENOBUFS);
1361 	}
1362 	/* Link'em together */
1363 	m[0]->m_next = m[1];
1364 	m[1]->m_next = m[2];
1365 	m[2]->m_next = m[3];
1366 	m[3]->m_next = m[4];
1367 #else
1368 #error bad mode RX_MODE
1369 #endif
1370 
1371 	if (sc->sc_rxb[id])
1372 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]);
1373 	sc->sc_rxb[id] = m[0];
1374 
1375 	m_adj(m[0], ETHER_ALIGN);
1376 
1377 	error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0],
1378 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1379 	if (error)
1380 		return (error);
1381 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0,
1382 	    sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD);
1383 
1384 #if RX_MODE == RX_MODE_1
1385 	rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0);
1386 	rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1387 	rxd->rxd_control1 = RXD_CTL1_OWN;
1388 #elif RX_MODE == RX_MODE_3
1389 #elif RX_MODE == RX_MODE_5
1390 	rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len);
1391 	rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len);
1392 	rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
1393 	rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr;
1394 	rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr;
1395 	rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr;
1396 	rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr;
1397 	rxd->rxd_control1 = RXD_CTL1_OWN;
1398 #endif
1399 
1400 	XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1401 	return (0);
1402 }
1403 
1404 /*
1405  * This magic comes from the FreeBSD driver.
1406  */
1407 int
xge_setup_xgxs_xena(struct xge_softc * sc)1408 xge_setup_xgxs_xena(struct xge_softc *sc)
1409 {
1410 	int i;
1411 
1412 	for (i = 0; i < nitems(xge_xena_dtx_cfg); i++) {
1413 		PIF_WCSR(DTX_CONTROL, xge_xena_dtx_cfg[i]);
1414 		DELAY(100);
1415 	}
1416 
1417 	return (0);
1418 }
1419 
1420 int
xge_setup_xgxs_herc(struct xge_softc * sc)1421 xge_setup_xgxs_herc(struct xge_softc *sc)
1422 {
1423 	int i;
1424 
1425 	for (i = 0; i < nitems(xge_herc_dtx_cfg); i++) {
1426 		PIF_WCSR(DTX_CONTROL, xge_herc_dtx_cfg[i]);
1427 		DELAY(100);
1428 	}
1429 
1430 	return (0);
1431 }
1432