1 /* $OpenBSD: fxp.c,v 1.135 2024/08/31 16:23:09 deraadt Exp $ */
2 /* $NetBSD: if_fxp.c,v 1.2 1997/06/05 02:01:55 thorpej Exp $ */
3
4 /*
5 * Copyright (c) 1995, David Greenman
6 * All rights reserved.
7 *
8 * Modifications to support NetBSD:
9 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Id: if_fxp.c,v 1.55 1998/08/04 08:53:12 dg Exp
34 */
35
36 /*
37 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
38 */
39
40 #include "bpfilter.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/syslog.h>
49 #include <sys/timeout.h>
50
51 #include <net/if.h>
52 #include <net/if_media.h>
53
54 #include <netinet/in.h>
55
56 #if NBPFILTER > 0
57 #include <net/bpf.h>
58 #endif
59
60 #include <sys/ioctl.h>
61 #include <sys/errno.h>
62 #include <sys/device.h>
63
64 #include <netinet/if_ether.h>
65
66 #include <machine/cpu.h>
67 #include <machine/bus.h>
68 #include <machine/intr.h>
69
70 #include <dev/mii/miivar.h>
71
72 #include <dev/ic/fxpreg.h>
73 #include <dev/ic/fxpvar.h>
74
75 /*
76 * NOTE! On the Alpha, we have an alignment constraint. The
77 * card DMAs the packet immediately following the RFA. However,
78 * the first thing in the packet is a 14-byte Ethernet header.
79 * This means that the packet is misaligned. To compensate,
80 * we actually offset the RFA 2 bytes into the cluster. This
81 * aligns the packet after the Ethernet header at a 32-bit
82 * boundary. HOWEVER! This means that the RFA is misaligned!
83 */
84 #define RFA_ALIGNMENT_FUDGE (2 + sizeof(bus_dmamap_t *))
85
86 /*
87 * Inline function to copy a 16-bit aligned 32-bit quantity.
88 */
89 static __inline void fxp_lwcopy(volatile u_int32_t *,
90 volatile u_int32_t *);
91
92 static __inline void
fxp_lwcopy(volatile u_int32_t * src,volatile u_int32_t * dst)93 fxp_lwcopy(volatile u_int32_t *src, volatile u_int32_t *dst)
94 {
95 volatile u_int16_t *a = (u_int16_t *)src;
96 volatile u_int16_t *b = (u_int16_t *)dst;
97
98 b[0] = a[0];
99 b[1] = a[1];
100 }
101
102 /*
103 * Template for default configuration parameters.
104 * See struct fxp_cb_config for the bit definitions.
105 * Note, cb_command is filled in later.
106 */
107 static u_char fxp_cb_config_template[] = {
108 0x0, 0x0, /* cb_status */
109 0x0, 0x0, /* cb_command */
110 0xff, 0xff, 0xff, 0xff, /* link_addr */
111 0x16, /* 0 Byte count. */
112 0x08, /* 1 Fifo limit */
113 0x00, /* 2 Adaptive ifs */
114 0x00, /* 3 ctrl0 */
115 0x00, /* 4 rx_dma_bytecount */
116 0x80, /* 5 tx_dma_bytecount */
117 0xb2, /* 6 ctrl 1*/
118 0x03, /* 7 ctrl 2*/
119 0x01, /* 8 mediatype */
120 0x00, /* 9 void2 */
121 0x26, /* 10 ctrl3 */
122 0x00, /* 11 linear priority */
123 0x60, /* 12 interfrm_spacing */
124 0x00, /* 13 void31 */
125 0xf2, /* 14 void32 */
126 0x48, /* 15 promiscuous */
127 0x00, /* 16 void41 */
128 0x40, /* 17 void42 */
129 0xf3, /* 18 stripping */
130 0x00, /* 19 fdx_pin */
131 0x3f, /* 20 multi_ia */
132 0x05 /* 21 mc_all */
133 };
134
135 void fxp_eeprom_shiftin(struct fxp_softc *, int, int);
136 void fxp_eeprom_putword(struct fxp_softc *, int, u_int16_t);
137 void fxp_write_eeprom(struct fxp_softc *, u_short *, int, int);
138 int fxp_mediachange(struct ifnet *);
139 void fxp_mediastatus(struct ifnet *, struct ifmediareq *);
140 void fxp_scb_wait(struct fxp_softc *);
141 void fxp_start(struct ifnet *);
142 int fxp_ioctl(struct ifnet *, u_long, caddr_t);
143 void fxp_load_ucode(struct fxp_softc *);
144 void fxp_watchdog(struct ifnet *);
145 int fxp_add_rfabuf(struct fxp_softc *, struct mbuf *);
146 int fxp_mdi_read(struct device *, int, int);
147 void fxp_mdi_write(struct device *, int, int, int);
148 void fxp_autosize_eeprom(struct fxp_softc *);
149 void fxp_statchg(struct device *);
150 void fxp_read_eeprom(struct fxp_softc *, u_int16_t *,
151 int, int);
152 void fxp_stats_update(void *);
153 void fxp_mc_setup(struct fxp_softc *, int);
154 void fxp_scb_cmd(struct fxp_softc *, u_int16_t);
155
156 /*
157 * Set initial transmit threshold at 64 (512 bytes). This is
158 * increased by 64 (512 bytes) at a time, to maximum of 192
159 * (1536 bytes), if an underrun occurs.
160 */
161 static int tx_threshold = 64;
162
163 /*
164 * Interrupts coalescing code params
165 */
166 int fxp_int_delay = FXP_INT_DELAY;
167 int fxp_bundle_max = FXP_BUNDLE_MAX;
168 int fxp_min_size_mask = FXP_MIN_SIZE_MASK;
169
170 /*
171 * TxCB list index mask. This is used to do list wrap-around.
172 */
173 #define FXP_TXCB_MASK (FXP_NTXCB - 1)
174
175 /*
176 * Maximum number of seconds that the receiver can be idle before we
177 * assume it's dead and attempt to reset it by reprogramming the
178 * multicast filter. This is part of a work-around for a bug in the
179 * NIC. See fxp_stats_update().
180 */
181 #define FXP_MAX_RX_IDLE 15
182
183 /*
184 * Wait for the previous command to be accepted (but not necessarily
185 * completed).
186 */
187 void
fxp_scb_wait(struct fxp_softc * sc)188 fxp_scb_wait(struct fxp_softc *sc)
189 {
190 int i = FXP_CMD_TMO;
191
192 while ((CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff) && --i)
193 DELAY(2);
194 if (i == 0)
195 printf("%s: warning: SCB timed out\n", sc->sc_dev.dv_xname);
196 }
197
198 void
fxp_eeprom_shiftin(struct fxp_softc * sc,int data,int length)199 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
200 {
201 u_int16_t reg;
202 int x;
203
204 /*
205 * Shift in data.
206 */
207 for (x = 1 << (length - 1); x; x >>= 1) {
208 if (data & x)
209 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
210 else
211 reg = FXP_EEPROM_EECS;
212 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
213 DELAY(1);
214 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
215 DELAY(1);
216 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
217 DELAY(1);
218 }
219 }
220
221 void
fxp_eeprom_putword(struct fxp_softc * sc,int offset,u_int16_t data)222 fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data)
223 {
224 int i;
225
226 /*
227 * Erase/write enable.
228 */
229 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
230 fxp_eeprom_shiftin(sc, 0x4, 3);
231 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
232 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
233 DELAY(1);
234 /*
235 * Shift in write opcode, address, data.
236 */
237 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
238 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
239 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
240 fxp_eeprom_shiftin(sc, data, 16);
241 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
242 DELAY(1);
243 /*
244 * Wait for EEPROM to finish up.
245 */
246 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
247 DELAY(1);
248 for (i = 0; i < 1000; i++) {
249 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
250 break;
251 DELAY(50);
252 }
253 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
254 DELAY(1);
255 /*
256 * Erase/write disable.
257 */
258 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
259 fxp_eeprom_shiftin(sc, 0x4, 3);
260 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
261 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
262 DELAY(1);
263 }
264
265 void
fxp_write_eeprom(struct fxp_softc * sc,u_short * data,int offset,int words)266 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
267 {
268 int i;
269
270 for (i = 0; i < words; i++)
271 fxp_eeprom_putword(sc, offset + i, data[i]);
272 }
273
274 /*************************************************************
275 * Operating system-specific autoconfiguration glue
276 *************************************************************/
277
278 struct cfdriver fxp_cd = {
279 NULL, "fxp", DV_IFNET
280 };
281
282 int
fxp_activate(struct device * self,int act)283 fxp_activate(struct device *self, int act)
284 {
285 struct fxp_softc *sc = (struct fxp_softc *)self;
286 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
287
288 switch (act) {
289 case DVACT_SUSPEND:
290 if (ifp->if_flags & IFF_RUNNING)
291 fxp_stop(sc, 1, 0);
292 break;
293 case DVACT_WAKEUP:
294 if (ifp->if_flags & IFF_UP)
295 fxp_wakeup(sc);
296 break;
297 }
298 return (0);
299 }
300
301 void
fxp_wakeup(struct fxp_softc * sc)302 fxp_wakeup(struct fxp_softc *sc)
303 {
304 int s = splnet();
305
306 /* force reload of the microcode */
307 sc->sc_flags &= ~FXPF_UCODELOADED;
308
309 fxp_init(sc);
310 splx(s);
311 }
312
313 /*************************************************************
314 * End of operating system-specific autoconfiguration glue
315 *************************************************************/
316
317 /*
318 * Do generic parts of attach.
319 */
320 int
fxp_attach(struct fxp_softc * sc,const char * intrstr)321 fxp_attach(struct fxp_softc *sc, const char *intrstr)
322 {
323 struct ifnet *ifp;
324 struct mbuf *m;
325 bus_dmamap_t rxmap;
326 u_int16_t data;
327 u_int8_t enaddr[6];
328 int i, err;
329
330 /*
331 * Reset to a stable state.
332 */
333 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
334 DELAY(10);
335
336 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct fxp_ctrl),
337 PAGE_SIZE, 0, &sc->sc_cb_seg, 1, &sc->sc_cb_nseg,
338 BUS_DMA_NOWAIT | BUS_DMA_ZERO))
339 goto fail;
340 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg,
341 sizeof(struct fxp_ctrl), (caddr_t *)&sc->sc_ctrl,
342 BUS_DMA_NOWAIT)) {
343 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
344 goto fail;
345 }
346 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct fxp_ctrl),
347 1, sizeof(struct fxp_ctrl), 0, BUS_DMA_NOWAIT,
348 &sc->tx_cb_map)) {
349 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl,
350 sizeof(struct fxp_ctrl));
351 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
352 goto fail;
353 }
354 if (bus_dmamap_load(sc->sc_dmat, sc->tx_cb_map, (caddr_t)sc->sc_ctrl,
355 sizeof(struct fxp_ctrl), NULL, BUS_DMA_NOWAIT)) {
356 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map);
357 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl,
358 sizeof(struct fxp_ctrl));
359 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
360 goto fail;
361 }
362
363 for (i = 0; i < FXP_NTXCB; i++) {
364 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
365 FXP_NTXSEG, MCLBYTES, 0, 0, &sc->txs[i].tx_map)) != 0) {
366 printf("%s: unable to create tx dma map %d, error %d\n",
367 sc->sc_dev.dv_xname, i, err);
368 goto fail;
369 }
370 sc->txs[i].tx_mbuf = NULL;
371 sc->txs[i].tx_cb = sc->sc_ctrl->tx_cb + i;
372 sc->txs[i].tx_off = offsetof(struct fxp_ctrl, tx_cb[i]);
373 sc->txs[i].tx_next = &sc->txs[(i + 1) & FXP_TXCB_MASK];
374 }
375
376 /*
377 * Pre-allocate some receive buffers.
378 */
379 sc->sc_rxfree = 0;
380 for (i = 0; i < FXP_NRFABUFS_MIN; i++) {
381 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
382 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) {
383 printf("%s: unable to create rx dma map %d, error %d\n",
384 sc->sc_dev.dv_xname, i, err);
385 goto fail;
386 }
387 sc->rx_bufs++;
388 }
389 for (i = 0; i < FXP_NRFABUFS_MIN; i++)
390 if (fxp_add_rfabuf(sc, NULL) != 0)
391 goto fail;
392
393 /*
394 * Find out how large of an SEEPROM we have.
395 */
396 fxp_autosize_eeprom(sc);
397
398 /*
399 * Get info about the primary PHY
400 */
401 fxp_read_eeprom(sc, (u_int16_t *)&data, FXP_EEPROM_REG_PHY, 1);
402 sc->phy_primary_addr = data & 0xff;
403 sc->phy_primary_device = (data >> 8) & 0x3f;
404 sc->phy_10Mbps_only = data >> 15;
405
406 /*
407 * Only 82558 and newer cards can do this.
408 */
409 if (sc->sc_revision >= FXP_REV_82558_A4) {
410 sc->sc_int_delay = fxp_int_delay;
411 sc->sc_bundle_max = fxp_bundle_max;
412 sc->sc_min_size_mask = fxp_min_size_mask;
413 }
414 /*
415 * Read MAC address.
416 */
417 fxp_read_eeprom(sc, (u_int16_t *)enaddr, FXP_EEPROM_REG_MAC, 3);
418
419 ifp = &sc->sc_arpcom.ac_if;
420 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
421 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
422 ifp->if_softc = sc;
423 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
424 ifp->if_ioctl = fxp_ioctl;
425 ifp->if_start = fxp_start;
426 ifp->if_watchdog = fxp_watchdog;
427 ifq_init_maxlen(&ifp->if_snd, FXP_NTXCB - 1);
428
429 ifp->if_capabilities = IFCAP_VLAN_MTU;
430
431 printf(": %s, address %s\n", intrstr,
432 ether_sprintf(sc->sc_arpcom.ac_enaddr));
433
434 if (sc->sc_flags & FXPF_DISABLE_STANDBY) {
435 fxp_read_eeprom(sc, &data, FXP_EEPROM_REG_ID, 1);
436 if (data & FXP_EEPROM_REG_ID_STB) {
437 u_int16_t cksum;
438
439 printf("%s: Disabling dynamic standby mode in EEPROM",
440 sc->sc_dev.dv_xname);
441 data &= ~FXP_EEPROM_REG_ID_STB;
442 fxp_write_eeprom(sc, &data, FXP_EEPROM_REG_ID, 1);
443 printf(", New ID 0x%x", data);
444 cksum = 0;
445 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
446 fxp_read_eeprom(sc, &data, i, 1);
447 cksum += data;
448 }
449 i = (1 << sc->eeprom_size) - 1;
450 cksum = 0xBABA - cksum;
451 fxp_read_eeprom(sc, &data, i, 1);
452 fxp_write_eeprom(sc, &cksum, i, 1);
453 printf(", cksum @ 0x%x: 0x%x -> 0x%x\n",
454 i, data, cksum);
455 }
456 }
457
458 /* Receiver lock-up workaround detection. */
459 fxp_read_eeprom(sc, &data, FXP_EEPROM_REG_COMPAT, 1);
460 if ((data & (FXP_EEPROM_REG_COMPAT_MC10|FXP_EEPROM_REG_COMPAT_MC100))
461 != (FXP_EEPROM_REG_COMPAT_MC10|FXP_EEPROM_REG_COMPAT_MC100))
462 sc->sc_flags |= FXPF_RECV_WORKAROUND;
463
464 /*
465 * Initialize our media structures and probe the MII.
466 */
467 sc->sc_mii.mii_ifp = ifp;
468 sc->sc_mii.mii_readreg = fxp_mdi_read;
469 sc->sc_mii.mii_writereg = fxp_mdi_write;
470 sc->sc_mii.mii_statchg = fxp_statchg;
471 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mediachange,
472 fxp_mediastatus);
473 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
474 MII_OFFSET_ANY, MIIF_NOISOLATE);
475 /* If no phy found, just use auto mode */
476 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
477 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
478 0, NULL);
479 printf("%s: no phy found, using manual mode\n",
480 sc->sc_dev.dv_xname);
481 }
482
483 if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0))
484 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
485 else if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0))
486 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
487 else
488 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
489
490 /*
491 * Attach the interface.
492 */
493 if_attach(ifp);
494 ether_ifattach(ifp);
495
496 /*
497 * Initialize timeout for statistics update.
498 */
499 timeout_set(&sc->stats_update_to, fxp_stats_update, sc);
500
501 return (0);
502
503 fail:
504 printf("%s: Failed to malloc memory\n", sc->sc_dev.dv_xname);
505 if (sc->tx_cb_map != NULL) {
506 bus_dmamap_unload(sc->sc_dmat, sc->tx_cb_map);
507 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map);
508 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl,
509 sizeof(struct fxp_cb_tx) * FXP_NTXCB);
510 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
511 }
512 m = sc->rfa_headm;
513 while (m != NULL) {
514 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf);
515 bus_dmamap_unload(sc->sc_dmat, rxmap);
516 FXP_RXMAP_PUT(sc, rxmap);
517 m = m_free(m);
518 }
519 return (ENOMEM);
520 }
521
522 /*
523 * From NetBSD:
524 *
525 * Figure out EEPROM size.
526 *
527 * 559's can have either 64-word or 256-word EEPROMs, the 558
528 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
529 * talks about the existence of 16 to 256 word EEPROMs.
530 *
531 * The only known sizes are 64 and 256, where the 256 version is used
532 * by CardBus cards to store CIS information.
533 *
534 * The address is shifted in msb-to-lsb, and after the last
535 * address-bit the EEPROM is supposed to output a `dummy zero' bit,
536 * after which follows the actual data. We try to detect this zero, by
537 * probing the data-out bit in the EEPROM control register just after
538 * having shifted in a bit. If the bit is zero, we assume we've
539 * shifted enough address bits. The data-out should be tri-state,
540 * before this, which should translate to a logical one.
541 *
542 * Other ways to do this would be to try to read a register with known
543 * contents with a varying number of address bits, but no such
544 * register seem to be available. The high bits of register 10 are 01
545 * on the 558 and 559, but apparently not on the 557.
546 *
547 * The Linux driver computes a checksum on the EEPROM data, but the
548 * value of this checksum is not very well documented.
549 */
550 void
fxp_autosize_eeprom(struct fxp_softc * sc)551 fxp_autosize_eeprom(struct fxp_softc *sc)
552 {
553 u_int16_t reg;
554 int x;
555
556 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
557 /*
558 * Shift in read opcode.
559 */
560 for (x = 3; x > 0; x--) {
561 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
562 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
563 } else {
564 reg = FXP_EEPROM_EECS;
565 }
566 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
567 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
568 reg | FXP_EEPROM_EESK);
569 DELAY(4);
570 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
571 DELAY(4);
572 }
573 /*
574 * Shift in address.
575 * Wait for the dummy zero following a correct address shift.
576 */
577 for (x = 1; x <= 8; x++) {
578 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
579 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
580 FXP_EEPROM_EECS | FXP_EEPROM_EESK);
581 DELAY(4);
582 if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0)
583 break;
584 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
585 DELAY(4);
586 }
587 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
588 DELAY(4);
589 sc->eeprom_size = x;
590 }
591
592 /*
593 * Read from the serial EEPROM. Basically, you manually shift in
594 * the read opcode (one bit at a time) and then shift in the address,
595 * and then you shift out the data (all of this one bit at a time).
596 * The word size is 16 bits, so you have to provide the address for
597 * every 16 bits of data.
598 */
599 void
fxp_read_eeprom(struct fxp_softc * sc,u_short * data,int offset,int words)600 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset,
601 int words)
602 {
603 u_int16_t reg;
604 int i, x;
605
606 for (i = 0; i < words; i++) {
607 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
608 /*
609 * Shift in read opcode.
610 */
611 for (x = 3; x > 0; x--) {
612 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
613 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
614 } else {
615 reg = FXP_EEPROM_EECS;
616 }
617 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
618 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
619 reg | FXP_EEPROM_EESK);
620 DELAY(4);
621 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
622 DELAY(4);
623 }
624 /*
625 * Shift in address.
626 */
627 for (x = sc->eeprom_size; x > 0; x--) {
628 if ((i + offset) & (1 << (x - 1))) {
629 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
630 } else {
631 reg = FXP_EEPROM_EECS;
632 }
633 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
634 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
635 reg | FXP_EEPROM_EESK);
636 DELAY(4);
637 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
638 DELAY(4);
639 }
640 reg = FXP_EEPROM_EECS;
641 data[i] = 0;
642 /*
643 * Shift out data.
644 */
645 for (x = 16; x > 0; x--) {
646 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
647 reg | FXP_EEPROM_EESK);
648 DELAY(4);
649 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) &
650 FXP_EEPROM_EEDO)
651 data[i] |= (1 << (x - 1));
652 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
653 DELAY(4);
654 }
655 data[i] = letoh16(data[i]);
656 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
657 DELAY(4);
658 }
659 }
660
661 /*
662 * Start packet transmission on the interface.
663 */
664 void
fxp_start(struct ifnet * ifp)665 fxp_start(struct ifnet *ifp)
666 {
667 struct fxp_softc *sc = ifp->if_softc;
668 struct fxp_txsw *txs = sc->sc_cbt_prod;
669 struct fxp_cb_tx *txc;
670 struct mbuf *m0;
671 int cnt = sc->sc_cbt_cnt, seg, error;
672
673 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
674 return;
675
676 while (1) {
677 if (cnt >= (FXP_NTXCB - 2)) {
678 ifq_set_oactive(&ifp->if_snd);
679 break;
680 }
681
682 txs = txs->tx_next;
683
684 m0 = ifq_dequeue(&ifp->if_snd);
685 if (m0 == NULL)
686 break;
687
688 error = bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map,
689 m0, BUS_DMA_NOWAIT);
690 switch (error) {
691 case 0:
692 break;
693 case EFBIG:
694 if (m_defrag(m0, M_DONTWAIT) == 0 &&
695 bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map,
696 m0, BUS_DMA_NOWAIT) == 0)
697 break;
698 /* FALLTHROUGH */
699 default:
700 ifp->if_oerrors++;
701 m_freem(m0);
702 /* try next packet */
703 continue;
704 }
705
706 txs->tx_mbuf = m0;
707
708 #if NBPFILTER > 0
709 if (ifp->if_bpf)
710 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
711 #endif
712
713 FXP_MBUF_SYNC(sc, txs->tx_map, BUS_DMASYNC_PREWRITE);
714
715 txc = txs->tx_cb;
716 txc->tbd_number = txs->tx_map->dm_nsegs;
717 txc->cb_status = 0;
718 txc->cb_command = htole16(FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF);
719 txc->tx_threshold = tx_threshold;
720 for (seg = 0; seg < txs->tx_map->dm_nsegs; seg++) {
721 txc->tbd[seg].tb_addr =
722 htole32(txs->tx_map->dm_segs[seg].ds_addr);
723 txc->tbd[seg].tb_size =
724 htole32(txs->tx_map->dm_segs[seg].ds_len);
725 }
726 FXP_TXCB_SYNC(sc, txs,
727 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
728
729 ++cnt;
730 sc->sc_cbt_prod = txs;
731 }
732
733 if (cnt != sc->sc_cbt_cnt) {
734 /* We enqueued at least one. */
735 ifp->if_timer = 5;
736
737 txs = sc->sc_cbt_prod;
738 txs = txs->tx_next;
739 sc->sc_cbt_prod = txs;
740 txs->tx_cb->cb_command =
741 htole16(FXP_CB_COMMAND_I | FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
742 FXP_TXCB_SYNC(sc, txs,
743 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
744
745 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev,
746 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
747 sc->sc_cbt_prev->tx_cb->cb_command &=
748 htole16(~(FXP_CB_COMMAND_S | FXP_CB_COMMAND_I));
749 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev,
750 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
751
752 sc->sc_cbt_prev = txs;
753
754 fxp_scb_wait(sc);
755 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
756
757 sc->sc_cbt_cnt = cnt + 1;
758 }
759 }
760
761 /*
762 * Process interface interrupts.
763 */
764 int
fxp_intr(void * arg)765 fxp_intr(void *arg)
766 {
767 struct fxp_softc *sc = arg;
768 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
769 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
770 u_int16_t statack;
771 bus_dmamap_t rxmap;
772 int claimed = 0;
773 int rnr = 0;
774
775 /*
776 * If the interface isn't running, don't try to
777 * service the interrupt.. just ack it and bail.
778 */
779 if ((ifp->if_flags & IFF_RUNNING) == 0) {
780 statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS);
781 if (statack) {
782 claimed = 1;
783 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS,
784 statack & FXP_SCB_STATACK_MASK);
785 }
786 return claimed;
787 }
788
789 while ((statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS)) &
790 FXP_SCB_STATACK_MASK) {
791 claimed = 1;
792 rnr = (statack & (FXP_SCB_STATACK_RNR |
793 FXP_SCB_STATACK_SWI)) ? 1 : 0;
794 /*
795 * First ACK all the interrupts in this pass.
796 */
797 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS,
798 statack & FXP_SCB_STATACK_MASK);
799
800 /*
801 * Free any finished transmit mbuf chains.
802 */
803 if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) {
804 int txcnt = sc->sc_cbt_cnt;
805 struct fxp_txsw *txs = sc->sc_cbt_cons;
806
807 FXP_TXCB_SYNC(sc, txs,
808 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
809
810 while ((txcnt > 0) &&
811 ((txs->tx_cb->cb_status & htole16(FXP_CB_STATUS_C)) ||
812 (txs->tx_cb->cb_command & htole16(FXP_CB_COMMAND_NOP)))) {
813 if (txs->tx_mbuf != NULL) {
814 FXP_MBUF_SYNC(sc, txs->tx_map,
815 BUS_DMASYNC_POSTWRITE);
816 bus_dmamap_unload(sc->sc_dmat,
817 txs->tx_map);
818 m_freem(txs->tx_mbuf);
819 txs->tx_mbuf = NULL;
820 }
821 --txcnt;
822 txs = txs->tx_next;
823 FXP_TXCB_SYNC(sc, txs,
824 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
825 }
826 sc->sc_cbt_cnt = txcnt;
827 /* Did we transmit any packets? */
828 if (sc->sc_cbt_cons != txs)
829 ifq_clr_oactive(&ifp->if_snd);
830 ifp->if_timer = sc->sc_cbt_cnt ? 5 : 0;
831 sc->sc_cbt_cons = txs;
832
833 if (!ifq_empty(&ifp->if_snd)) {
834 /*
835 * Try to start more packets transmitting.
836 */
837 fxp_start(ifp);
838 }
839 }
840 /*
841 * Process receiver interrupts. If a Receive Unit
842 * not ready (RNR) condition exists, get whatever
843 * packets we can and re-start the receiver.
844 */
845 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR |
846 FXP_SCB_STATACK_SWI)) {
847 struct mbuf *m;
848 u_int8_t *rfap;
849 rcvloop:
850 m = sc->rfa_headm;
851 rfap = m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE;
852 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf);
853 bus_dmamap_sync(sc->sc_dmat, rxmap,
854 0, MCLBYTES, BUS_DMASYNC_POSTREAD |
855 BUS_DMASYNC_POSTWRITE);
856
857 if (*(u_int16_t *)(rfap +
858 offsetof(struct fxp_rfa, rfa_status)) &
859 htole16(FXP_RFA_STATUS_C)) {
860 if (*(u_int16_t *)(rfap +
861 offsetof(struct fxp_rfa, rfa_status)) &
862 htole16(FXP_RFA_STATUS_RNR))
863 rnr = 1;
864
865 /*
866 * Remove first packet from the chain.
867 */
868 sc->rfa_headm = m->m_next;
869 m->m_next = NULL;
870
871 /*
872 * Add a new buffer to the receive chain.
873 * If this fails, the old buffer is recycled
874 * instead.
875 */
876 if (fxp_add_rfabuf(sc, m) == 0) {
877 u_int16_t total_len;
878
879 total_len = htole16(*(u_int16_t *)(rfap +
880 offsetof(struct fxp_rfa,
881 actual_size))) &
882 (MCLBYTES - 1);
883 if (total_len <
884 sizeof(struct ether_header)) {
885 m_freem(m);
886 goto rcvloop;
887 }
888 if (*(u_int16_t *)(rfap +
889 offsetof(struct fxp_rfa,
890 rfa_status)) &
891 htole16(FXP_RFA_STATUS_CRC)) {
892 m_freem(m);
893 goto rcvloop;
894 }
895
896 m->m_pkthdr.len = m->m_len = total_len;
897 ml_enqueue(&ml, m);
898 }
899 goto rcvloop;
900 }
901 }
902 if (rnr) {
903 rxmap = *((bus_dmamap_t *)
904 sc->rfa_headm->m_ext.ext_buf);
905 fxp_scb_wait(sc);
906 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
907 rxmap->dm_segs[0].ds_addr +
908 RFA_ALIGNMENT_FUDGE);
909 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
910
911 }
912 }
913
914 if_input(ifp, &ml);
915
916 return (claimed);
917 }
918
919 /*
920 * Update packet in/out/collision statistics. The i82557 doesn't
921 * allow you to access these counters without doing a fairly
922 * expensive DMA to get _all_ of the statistics it maintains, so
923 * we do this operation here only once per second. The statistics
924 * counters in the kernel are updated from the previous dump-stats
925 * DMA and then a new dump-stats DMA is started. The on-chip
926 * counters are zeroed when the DMA completes. If we can't start
927 * the DMA immediately, we don't wait - we just prepare to read
928 * them again next time.
929 */
930 void
fxp_stats_update(void * arg)931 fxp_stats_update(void *arg)
932 {
933 struct fxp_softc *sc = arg;
934 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
935 struct fxp_stats *sp = &sc->sc_ctrl->stats;
936 int s;
937
938 FXP_STATS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
939 ifp->if_collisions += letoh32(sp->tx_total_collisions);
940 if (sp->rx_good) {
941 sc->rx_idle_secs = 0;
942 } else if (sc->sc_flags & FXPF_RECV_WORKAROUND)
943 sc->rx_idle_secs++;
944 ifp->if_ierrors +=
945 letoh32(sp->rx_crc_errors) +
946 letoh32(sp->rx_alignment_errors) +
947 letoh32(sp->rx_rnr_errors) +
948 letoh32(sp->rx_overrun_errors);
949 /*
950 * If any transmit underruns occurred, bump up the transmit
951 * threshold by another 512 bytes (64 * 8).
952 */
953 if (sp->tx_underruns) {
954 ifp->if_oerrors += letoh32(sp->tx_underruns);
955 if (tx_threshold < 192)
956 tx_threshold += 64;
957 }
958 s = splnet();
959 /*
960 * If we haven't received any packets in FXP_MAX_RX_IDLE seconds,
961 * then assume the receiver has locked up and attempt to clear
962 * the condition by reprogramming the multicast filter. This is
963 * a work-around for a bug in the 82557 where the receiver locks
964 * up if it gets certain types of garbage in the synchronization
965 * bits prior to the packet header. This bug is supposed to only
966 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
967 * mode as well (perhaps due to a 10/100 speed transition).
968 */
969 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
970 sc->rx_idle_secs = 0;
971 fxp_init(sc);
972 splx(s);
973 return;
974 }
975 /*
976 * If there is no pending command, start another stats
977 * dump. Otherwise punt for now.
978 */
979 FXP_STATS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
980 if (!(CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff)) {
981 /*
982 * Start another stats dump.
983 */
984 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
985 } else {
986 /*
987 * A previous command is still waiting to be accepted.
988 * Just zero our copy of the stats and wait for the
989 * next timer event to update them.
990 */
991 sp->tx_good = 0;
992 sp->tx_underruns = 0;
993 sp->tx_total_collisions = 0;
994
995 sp->rx_good = 0;
996 sp->rx_crc_errors = 0;
997 sp->rx_alignment_errors = 0;
998 sp->rx_rnr_errors = 0;
999 sp->rx_overrun_errors = 0;
1000 }
1001
1002 /* Tick the MII clock. */
1003 mii_tick(&sc->sc_mii);
1004
1005 splx(s);
1006 /*
1007 * Schedule another timeout one second from now.
1008 */
1009 timeout_add_sec(&sc->stats_update_to, 1);
1010 }
1011
1012 void
fxp_detach(struct fxp_softc * sc)1013 fxp_detach(struct fxp_softc *sc)
1014 {
1015 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1016
1017 /* Get rid of our timeouts and mbufs */
1018 fxp_stop(sc, 1, 1);
1019
1020 /* Detach any PHYs we might have. */
1021 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
1022 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1023
1024 /* Delete any remaining media. */
1025 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
1026
1027 ether_ifdetach(ifp);
1028 if_detach(ifp);
1029
1030 #ifndef SMALL_KERNEL
1031 if (sc->sc_ucodebuf)
1032 free(sc->sc_ucodebuf, M_DEVBUF, sc->sc_ucodelen);
1033 #endif
1034 }
1035
1036 /*
1037 * Stop the interface. Cancels the statistics updater and resets
1038 * the interface.
1039 */
1040 void
fxp_stop(struct fxp_softc * sc,int drain,int softonly)1041 fxp_stop(struct fxp_softc *sc, int drain, int softonly)
1042 {
1043 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1044 int i;
1045
1046 /*
1047 * Cancel stats updater.
1048 */
1049 timeout_del(&sc->stats_update_to);
1050
1051 /*
1052 * Turn down interface (done early to avoid bad interactions
1053 * between panics, and the watchdog timer)
1054 */
1055 ifp->if_timer = 0;
1056 ifp->if_flags &= ~IFF_RUNNING;
1057 ifq_clr_oactive(&ifp->if_snd);
1058
1059 if (!softonly)
1060 mii_down(&sc->sc_mii);
1061
1062 /*
1063 * Issue software reset.
1064 */
1065 if (!softonly) {
1066 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
1067 DELAY(10);
1068 }
1069
1070 /*
1071 * Release any xmit buffers.
1072 */
1073 for (i = 0; i < FXP_NTXCB; i++) {
1074 if (sc->txs[i].tx_mbuf != NULL) {
1075 bus_dmamap_unload(sc->sc_dmat, sc->txs[i].tx_map);
1076 m_freem(sc->txs[i].tx_mbuf);
1077 sc->txs[i].tx_mbuf = NULL;
1078 }
1079 }
1080 sc->sc_cbt_cnt = 0;
1081
1082 if (drain) {
1083 bus_dmamap_t rxmap;
1084 struct mbuf *m;
1085
1086 /*
1087 * Free all the receive buffers then reallocate/reinitialize
1088 */
1089 m = sc->rfa_headm;
1090 while (m != NULL) {
1091 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf);
1092 bus_dmamap_unload(sc->sc_dmat, rxmap);
1093 FXP_RXMAP_PUT(sc, rxmap);
1094 m = m_free(m);
1095 sc->rx_bufs--;
1096 }
1097 sc->rfa_headm = NULL;
1098 sc->rfa_tailm = NULL;
1099 for (i = 0; i < FXP_NRFABUFS_MIN; i++) {
1100 if (fxp_add_rfabuf(sc, NULL) != 0) {
1101 /*
1102 * This "can't happen" - we're at splnet()
1103 * and we just freed all the buffers we need
1104 * above.
1105 */
1106 panic("fxp_stop: no buffers!");
1107 }
1108 sc->rx_bufs++;
1109 }
1110 }
1111 }
1112
1113 /*
1114 * Watchdog/transmission transmit timeout handler. Called when a
1115 * transmission is started on the interface, but no interrupt is
1116 * received before the timeout. This usually indicates that the
1117 * card has wedged for some reason.
1118 */
1119 void
fxp_watchdog(struct ifnet * ifp)1120 fxp_watchdog(struct ifnet *ifp)
1121 {
1122 struct fxp_softc *sc = ifp->if_softc;
1123
1124 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1125 ifp->if_oerrors++;
1126
1127 fxp_init(sc);
1128 }
1129
1130 /*
1131 * Submit a command to the i82557.
1132 */
1133 void
fxp_scb_cmd(struct fxp_softc * sc,u_int16_t cmd)1134 fxp_scb_cmd(struct fxp_softc *sc, u_int16_t cmd)
1135 {
1136 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, cmd);
1137 }
1138
1139 void
fxp_init(void * xsc)1140 fxp_init(void *xsc)
1141 {
1142 struct fxp_softc *sc = xsc;
1143 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1144 struct fxp_cb_config *cbp;
1145 struct fxp_cb_ias *cb_ias;
1146 struct fxp_cb_tx *txp;
1147 bus_dmamap_t rxmap;
1148 int i, prm, save_bf, lrxen, allm, bufs;
1149
1150 splassert(IPL_NET);
1151
1152 /*
1153 * Cancel any pending I/O
1154 */
1155 fxp_stop(sc, 0, 0);
1156
1157 /*
1158 * Initialize base of CBL and RFA memory. Loading with zero
1159 * sets it up for regular linear addressing.
1160 */
1161 fxp_scb_wait(sc);
1162 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1163 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
1164
1165 fxp_scb_wait(sc);
1166 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1167 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
1168
1169 #ifndef SMALL_KERNEL
1170 fxp_load_ucode(sc);
1171 #endif
1172 /* Once through to set flags */
1173 fxp_mc_setup(sc, 0);
1174
1175 /*
1176 * In order to support receiving 802.1Q VLAN frames, we have to
1177 * enable "save bad frames", since they are 4 bytes larger than
1178 * the normal Ethernet maximum frame length. On i82558 and later,
1179 * we have a better mechanism for this.
1180 */
1181 save_bf = 0;
1182 lrxen = 0;
1183
1184 if (sc->sc_revision >= FXP_REV_82558_A4)
1185 lrxen = 1;
1186 else
1187 save_bf = 1;
1188
1189 /*
1190 * Initialize base of dump-stats buffer.
1191 */
1192 fxp_scb_wait(sc);
1193 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1194 sc->tx_cb_map->dm_segs->ds_addr +
1195 offsetof(struct fxp_ctrl, stats));
1196 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
1197
1198 cbp = &sc->sc_ctrl->u.cfg;
1199 /*
1200 * This bcopy is kind of disgusting, but there are a bunch of must be
1201 * zero and must be one bits in this structure and this is the easiest
1202 * way to initialize them all to proper values.
1203 */
1204 bcopy(fxp_cb_config_template, (void *)&cbp->cb_status,
1205 sizeof(fxp_cb_config_template));
1206
1207 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1208 allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1209
1210 #if 0
1211 cbp->cb_status = 0;
1212 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1213 cbp->link_addr = 0xffffffff; /* (no) next command */
1214 cbp->byte_count = 22; /* (22) bytes to config */
1215 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
1216 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
1217 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
1218 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
1219 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
1220 cbp->dma_bce = 0; /* (disable) dma max counters */
1221 cbp->late_scb = 0; /* (don't) defer SCB update */
1222 cbp->tno_int = 0; /* (disable) tx not okay interrupt */
1223 cbp->ci_int = 1; /* interrupt on CU idle */
1224 cbp->save_bf = save_bf ? 1 : prm; /* save bad frames */
1225 cbp->disc_short_rx = !prm; /* discard short packets */
1226 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */
1227 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */
1228 cbp->nsai = 1; /* (don't) disable source addr insert */
1229 cbp->preamble_length = 2; /* (7 byte) preamble */
1230 cbp->loopback = 0; /* (don't) loopback */
1231 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
1232 cbp->linear_pri_mode = 0; /* (wait after xmit only) */
1233 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
1234 cbp->promiscuous = prm; /* promiscuous mode */
1235 cbp->bcast_disable = 0; /* (don't) disable broadcasts */
1236 cbp->crscdt = 0; /* (CRS only) */
1237 cbp->stripping = !prm; /* truncate rx packet to byte count */
1238 cbp->padding = 1; /* (do) pad short tx packets */
1239 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
1240 cbp->long_rx = lrxen; /* (enable) long packets */
1241 cbp->force_fdx = 0; /* (don't) force full duplex */
1242 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
1243 cbp->multi_ia = 0; /* (don't) accept multiple IAs */
1244 cbp->mc_all = allm;
1245 #else
1246 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL);
1247
1248 if (allm && !prm)
1249 cbp->mc_all |= 0x08; /* accept all multicasts */
1250 else
1251 cbp->mc_all &= ~0x08; /* reject all multicasts */
1252
1253 if (prm) {
1254 cbp->promiscuous |= 1; /* promiscuous mode */
1255 cbp->ctrl2 &= ~0x01; /* save short packets */
1256 cbp->stripping &= ~0x01; /* don't truncate rx packets */
1257 } else {
1258 cbp->promiscuous &= ~1; /* no promiscuous mode */
1259 cbp->ctrl2 |= 0x01; /* discard short packets */
1260 cbp->stripping |= 0x01; /* truncate rx packets */
1261 }
1262
1263 if (prm || save_bf)
1264 cbp->ctrl1 |= 0x80; /* save bad frames */
1265 else
1266 cbp->ctrl1 &= ~0x80; /* discard bad frames */
1267
1268 if (sc->sc_flags & FXPF_MWI_ENABLE)
1269 cbp->ctrl0 |= 0x01; /* enable PCI MWI command */
1270
1271 if(!sc->phy_10Mbps_only) /* interface mode */
1272 cbp->mediatype |= 0x01;
1273 else
1274 cbp->mediatype &= ~0x01;
1275
1276 if(lrxen) /* long packets */
1277 cbp->stripping |= 0x08;
1278 else
1279 cbp->stripping &= ~0x08;
1280
1281 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max, dma_dce = 0 ??? */
1282 cbp->ctrl1 |= 0x08; /* ci_int = 1 */
1283 cbp->ctrl3 |= 0x08; /* nsai */
1284 cbp->fifo_limit = 0x08; /* tx and rx fifo limit */
1285 cbp->fdx_pin |= 0x80; /* Enable full duplex setting by pin */
1286 #endif
1287
1288 /*
1289 * Start the config command/DMA.
1290 */
1291 fxp_scb_wait(sc);
1292 FXP_CFG_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1293 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1294 offsetof(struct fxp_ctrl, u.cfg));
1295 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1296 /* ...and wait for it to complete. */
1297 i = FXP_CMD_TMO;
1298 do {
1299 DELAY(1);
1300 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1301 } while ((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0 && i--);
1302
1303 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1304 if (!(cbp->cb_status & htole16(FXP_CB_STATUS_C))) {
1305 printf("%s: config command timeout\n", sc->sc_dev.dv_xname);
1306 return;
1307 }
1308
1309 /*
1310 * Now initialize the station address.
1311 */
1312 cb_ias = &sc->sc_ctrl->u.ias;
1313 cb_ias->cb_status = htole16(0);
1314 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
1315 cb_ias->link_addr = htole32(0xffffffff);
1316 bcopy(sc->sc_arpcom.ac_enaddr, (void *)cb_ias->macaddr,
1317 sizeof(sc->sc_arpcom.ac_enaddr));
1318
1319 /*
1320 * Start the IAS (Individual Address Setup) command/DMA.
1321 */
1322 fxp_scb_wait(sc);
1323 FXP_IAS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1324 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1325 offsetof(struct fxp_ctrl, u.ias));
1326 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1327 /* ...and wait for it to complete. */
1328 i = FXP_CMD_TMO;
1329 do {
1330 DELAY(1);
1331 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1332 } while (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C)) && i--);
1333
1334 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1335 if (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C))) {
1336 printf("%s: IAS command timeout\n", sc->sc_dev.dv_xname);
1337 return;
1338 }
1339
1340 /* Again, this time really upload the multicast addresses */
1341 fxp_mc_setup(sc, 1);
1342
1343 /*
1344 * Initialize transmit control block (TxCB) list.
1345 */
1346 bzero(sc->sc_ctrl->tx_cb, sizeof(struct fxp_cb_tx) * FXP_NTXCB);
1347 txp = sc->sc_ctrl->tx_cb;
1348 for (i = 0; i < FXP_NTXCB; i++) {
1349 txp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
1350 txp[i].link_addr = htole32(sc->tx_cb_map->dm_segs->ds_addr +
1351 offsetof(struct fxp_ctrl, tx_cb[(i + 1) & FXP_TXCB_MASK]));
1352 txp[i].tbd_array_addr =htole32(sc->tx_cb_map->dm_segs->ds_addr +
1353 offsetof(struct fxp_ctrl, tx_cb[i].tbd[0]));
1354 }
1355 /*
1356 * Set the suspend flag on the first TxCB and start the control
1357 * unit. It will execute the NOP and then suspend.
1358 */
1359 sc->sc_cbt_prev = sc->sc_cbt_prod = sc->sc_cbt_cons = sc->txs;
1360 sc->sc_cbt_cnt = 1;
1361 sc->sc_ctrl->tx_cb[0].cb_command = htole16(FXP_CB_COMMAND_NOP |
1362 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
1363 bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map, 0,
1364 sc->tx_cb_map->dm_mapsize,
1365 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1366
1367 fxp_scb_wait(sc);
1368 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1369 offsetof(struct fxp_ctrl, tx_cb[0]));
1370 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1371
1372 /*
1373 * Initialize receiver buffer area - RFA.
1374 */
1375 if (ifp->if_flags & IFF_UP)
1376 bufs = FXP_NRFABUFS_MAX;
1377 else
1378 bufs = FXP_NRFABUFS_MIN;
1379 if (sc->rx_bufs > bufs) {
1380 while (sc->rfa_headm != NULL && sc->rx_bufs > bufs) {
1381 rxmap = *((bus_dmamap_t *)sc->rfa_headm->m_ext.ext_buf);
1382 bus_dmamap_unload(sc->sc_dmat, rxmap);
1383 FXP_RXMAP_PUT(sc, rxmap);
1384 sc->rfa_headm = m_free(sc->rfa_headm);
1385 sc->rx_bufs--;
1386 }
1387 } else if (sc->rx_bufs < bufs) {
1388 int err, tmp_rx_bufs = sc->rx_bufs;
1389 for (i = sc->rx_bufs; i < bufs; i++) {
1390 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1391 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) {
1392 printf("%s: unable to create rx dma map %d, "
1393 "error %d\n", sc->sc_dev.dv_xname, i, err);
1394 break;
1395 }
1396 sc->rx_bufs++;
1397 }
1398 for (i = tmp_rx_bufs; i < sc->rx_bufs; i++)
1399 if (fxp_add_rfabuf(sc, NULL) != 0)
1400 break;
1401 }
1402 fxp_scb_wait(sc);
1403
1404 /*
1405 * Set current media.
1406 */
1407 mii_mediachg(&sc->sc_mii);
1408
1409 ifp->if_flags |= IFF_RUNNING;
1410 ifq_clr_oactive(&ifp->if_snd);
1411
1412 /*
1413 * Request a software generated interrupt that will be used to
1414 * (re)start the RU processing. If we direct the chip to start
1415 * receiving from the start of queue now, instead of letting the
1416 * interrupt handler first process all received packets, we run
1417 * the risk of having it overwrite mbuf clusters while they are
1418 * being processed or after they have been returned to the pool.
1419 */
1420 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND,
1421 CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) |
1422 FXP_SCB_INTRCNTL_REQUEST_SWI);
1423
1424 /*
1425 * Start stats updater.
1426 */
1427 timeout_add_sec(&sc->stats_update_to, 1);
1428 }
1429
1430 /*
1431 * Change media according to request.
1432 */
1433 int
fxp_mediachange(struct ifnet * ifp)1434 fxp_mediachange(struct ifnet *ifp)
1435 {
1436 struct fxp_softc *sc = ifp->if_softc;
1437 struct mii_data *mii = &sc->sc_mii;
1438
1439 if (mii->mii_instance) {
1440 struct mii_softc *miisc;
1441 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1442 mii_phy_reset(miisc);
1443 }
1444 mii_mediachg(&sc->sc_mii);
1445 return (0);
1446 }
1447
1448 /*
1449 * Notify the world which media we're using.
1450 */
1451 void
fxp_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1452 fxp_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1453 {
1454 struct fxp_softc *sc = ifp->if_softc;
1455
1456 mii_pollstat(&sc->sc_mii);
1457 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1458 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1459 }
1460
1461 /*
1462 * Add a buffer to the end of the RFA buffer list.
1463 * Return 0 if successful, 1 for failure. A failure results in
1464 * adding the 'oldm' (if non-NULL) on to the end of the list -
1465 * tossing out its old contents and recycling it.
1466 * The RFA struct is stuck at the beginning of mbuf cluster and the
1467 * data pointer is fixed up to point just past it.
1468 */
1469 int
fxp_add_rfabuf(struct fxp_softc * sc,struct mbuf * oldm)1470 fxp_add_rfabuf(struct fxp_softc *sc, struct mbuf *oldm)
1471 {
1472 u_int32_t v;
1473 struct mbuf *m;
1474 u_int8_t *rfap;
1475 bus_dmamap_t rxmap = NULL;
1476
1477 MGETHDR(m, M_DONTWAIT, MT_DATA);
1478 if (m != NULL) {
1479 MCLGET(m, M_DONTWAIT);
1480 if ((m->m_flags & M_EXT) == 0) {
1481 m_freem(m);
1482 if (oldm == NULL)
1483 return 1;
1484 m = oldm;
1485 m->m_data = m->m_ext.ext_buf;
1486 }
1487 if (oldm == NULL) {
1488 rxmap = FXP_RXMAP_GET(sc);
1489 *((bus_dmamap_t *)m->m_ext.ext_buf) = rxmap;
1490 bus_dmamap_load(sc->sc_dmat, rxmap,
1491 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1492 BUS_DMA_NOWAIT);
1493 } else if (oldm == m)
1494 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf);
1495 else {
1496 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf);
1497 bus_dmamap_unload(sc->sc_dmat, rxmap);
1498 bus_dmamap_load(sc->sc_dmat, rxmap,
1499 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1500 BUS_DMA_NOWAIT);
1501 *mtod(m, bus_dmamap_t *) = rxmap;
1502 }
1503 } else {
1504 if (oldm == NULL)
1505 return 1;
1506 m = oldm;
1507 m->m_data = m->m_ext.ext_buf;
1508 rxmap = *mtod(m, bus_dmamap_t *);
1509 }
1510
1511 /*
1512 * Move the data pointer up so that the incoming data packet
1513 * will be 32-bit aligned.
1514 */
1515 m->m_data += RFA_ALIGNMENT_FUDGE;
1516
1517 /*
1518 * Get a pointer to the base of the mbuf cluster and move
1519 * data start past it.
1520 */
1521 rfap = m->m_data;
1522 m->m_data += sizeof(struct fxp_rfa);
1523 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, size)) =
1524 htole16(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE);
1525
1526 /*
1527 * Initialize the rest of the RFA. Note that since the RFA
1528 * is misaligned, we cannot store values directly. Instead,
1529 * we use an optimized, inline copy.
1530 */
1531 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_status)) = 0;
1532 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) =
1533 htole16(FXP_RFA_CONTROL_EL);
1534 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, actual_size)) = 0;
1535
1536 v = -1;
1537 fxp_lwcopy(&v,
1538 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr)));
1539 fxp_lwcopy(&v,
1540 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, rbd_addr)));
1541
1542 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, MCLBYTES,
1543 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1544
1545 /*
1546 * If there are other buffers already on the list, attach this
1547 * one to the end by fixing up the tail to point to this one.
1548 */
1549 if (sc->rfa_headm != NULL) {
1550 sc->rfa_tailm->m_next = m;
1551 v = htole32(rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE);
1552 rfap = sc->rfa_tailm->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE;
1553 fxp_lwcopy(&v,
1554 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr)));
1555 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) &=
1556 htole16((u_int16_t)~FXP_RFA_CONTROL_EL);
1557 /* XXX we only need to sync the control struct */
1558 bus_dmamap_sync(sc->sc_dmat,
1559 *((bus_dmamap_t *)sc->rfa_tailm->m_ext.ext_buf), 0,
1560 MCLBYTES, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1561 } else
1562 sc->rfa_headm = m;
1563
1564 sc->rfa_tailm = m;
1565
1566 return (m == oldm);
1567 }
1568
1569 int
fxp_mdi_read(struct device * self,int phy,int reg)1570 fxp_mdi_read(struct device *self, int phy, int reg)
1571 {
1572 struct fxp_softc *sc = (struct fxp_softc *)self;
1573 int count = FXP_CMD_TMO;
1574 int value;
1575
1576 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1577 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
1578
1579 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
1580 && count--)
1581 DELAY(10);
1582
1583 if (count <= 0)
1584 printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname);
1585
1586 return (value & 0xffff);
1587 }
1588
1589 void
fxp_statchg(struct device * self)1590 fxp_statchg(struct device *self)
1591 {
1592 /* Nothing to do. */
1593 }
1594
1595 void
fxp_mdi_write(struct device * self,int phy,int reg,int value)1596 fxp_mdi_write(struct device *self, int phy, int reg, int value)
1597 {
1598 struct fxp_softc *sc = (struct fxp_softc *)self;
1599 int count = FXP_CMD_TMO;
1600
1601 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1602 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
1603 (value & 0xffff));
1604
1605 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
1606 count--)
1607 DELAY(10);
1608
1609 if (count <= 0)
1610 printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname);
1611 }
1612
1613 int
fxp_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1614 fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1615 {
1616 struct fxp_softc *sc = ifp->if_softc;
1617 struct ifreq *ifr = (struct ifreq *)data;
1618 int s, error = 0;
1619
1620 s = splnet();
1621
1622 switch (command) {
1623 case SIOCSIFADDR:
1624 ifp->if_flags |= IFF_UP;
1625 if (!(ifp->if_flags & IFF_RUNNING))
1626 fxp_init(sc);
1627 break;
1628
1629 case SIOCSIFFLAGS:
1630 if (ifp->if_flags & IFF_UP) {
1631 if (ifp->if_flags & IFF_RUNNING)
1632 error = ENETRESET;
1633 else
1634 fxp_init(sc);
1635 } else {
1636 if (ifp->if_flags & IFF_RUNNING)
1637 fxp_stop(sc, 1, 0);
1638 }
1639 break;
1640
1641 case SIOCSIFMEDIA:
1642 case SIOCGIFMEDIA:
1643 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1644 break;
1645
1646 default:
1647 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
1648 }
1649
1650 if (error == ENETRESET) {
1651 if (ifp->if_flags & IFF_RUNNING)
1652 fxp_init(sc);
1653 error = 0;
1654 }
1655
1656 splx(s);
1657 return (error);
1658 }
1659
1660 /*
1661 * Program the multicast filter.
1662 *
1663 * We have an artificial restriction that the multicast setup command
1664 * must be the first command in the chain, so we take steps to ensure
1665 * this. By requiring this, it allows us to keep up the performance of
1666 * the pre-initialized command ring (esp. link pointers) by not actually
1667 * inserting the mcsetup command in the ring - i.e. its link pointer
1668 * points to the TxCB ring, but the mcsetup descriptor itself is not part
1669 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
1670 * lead into the regular TxCB ring when it completes.
1671 *
1672 * This function must be called at splnet.
1673 */
1674 void
fxp_mc_setup(struct fxp_softc * sc,int doit)1675 fxp_mc_setup(struct fxp_softc *sc, int doit)
1676 {
1677 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1678 struct arpcom *ac = &sc->sc_arpcom;
1679 struct fxp_cb_mcs *mcsp = &sc->sc_ctrl->u.mcs;
1680 struct ether_multistep step;
1681 struct ether_multi *enm;
1682 int i, nmcasts = 0;
1683
1684 splassert(IPL_NET);
1685
1686 ifp->if_flags &= ~IFF_ALLMULTI;
1687
1688 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1689 ac->ac_multicnt >= MAXMCADDR) {
1690 ifp->if_flags |= IFF_ALLMULTI;
1691 } else {
1692 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm);
1693 while (enm != NULL) {
1694 bcopy(enm->enm_addrlo,
1695 (void *)&mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
1696
1697 nmcasts++;
1698
1699 ETHER_NEXT_MULTI(step, enm);
1700 }
1701 }
1702
1703 if (doit == 0)
1704 return;
1705
1706 /*
1707 * Initialize multicast setup descriptor.
1708 */
1709 mcsp->cb_status = htole16(0);
1710 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
1711 mcsp->link_addr = htole32(-1);
1712 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN);
1713
1714 /*
1715 * Wait until command unit is not active. This should never
1716 * be the case when nothing is queued, but make sure anyway.
1717 */
1718 for (i = FXP_CMD_TMO; (CSR_READ_2(sc, FXP_CSR_SCB_STATUS) &
1719 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE && i--; DELAY(1));
1720
1721 if ((CSR_READ_2(sc, FXP_CSR_SCB_STATUS) &
1722 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE) {
1723 printf("%s: timeout waiting for CU ready\n",
1724 sc->sc_dev.dv_xname);
1725 return;
1726 }
1727
1728 /*
1729 * Start the multicast setup command.
1730 */
1731 fxp_scb_wait(sc);
1732 FXP_MCS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1733 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1734 offsetof(struct fxp_ctrl, u.mcs));
1735 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1736
1737 i = FXP_CMD_TMO;
1738 do {
1739 DELAY(1);
1740 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1741 } while (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C)) && i--);
1742
1743 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1744 if (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C))) {
1745 printf("%s: multicast command timeout\n", sc->sc_dev.dv_xname);
1746 return;
1747 }
1748
1749 }
1750
1751 #ifndef SMALL_KERNEL
1752 #include <dev/microcode/fxp/rcvbundl.h>
1753 struct ucode {
1754 u_int16_t revision;
1755 u_int16_t int_delay_offset;
1756 u_int16_t bundle_max_offset;
1757 u_int16_t min_size_mask_offset;
1758 const char *uname;
1759 } const ucode_table[] = {
1760 { FXP_REV_82558_A4, D101_CPUSAVER_DWORD,
1761 0, 0,
1762 "fxp-d101a" },
1763
1764 { FXP_REV_82558_B0, D101_CPUSAVER_DWORD,
1765 0, 0,
1766 "fxp-d101b0" },
1767
1768 { FXP_REV_82559_A0, D101M_CPUSAVER_DWORD,
1769 D101M_CPUSAVER_BUNDLE_MAX_DWORD, D101M_CPUSAVER_MIN_SIZE_DWORD,
1770 "fxp-d101ma" },
1771
1772 { FXP_REV_82559S_A, D101S_CPUSAVER_DWORD,
1773 D101S_CPUSAVER_BUNDLE_MAX_DWORD, D101S_CPUSAVER_MIN_SIZE_DWORD,
1774 "fxp-d101s" },
1775
1776 { FXP_REV_82550, D102_B_CPUSAVER_DWORD,
1777 D102_B_CPUSAVER_BUNDLE_MAX_DWORD, D102_B_CPUSAVER_MIN_SIZE_DWORD,
1778 "fxp-d102" },
1779
1780 { FXP_REV_82550_C, D102_C_CPUSAVER_DWORD,
1781 D102_C_CPUSAVER_BUNDLE_MAX_DWORD, D102_C_CPUSAVER_MIN_SIZE_DWORD,
1782 "fxp-d102c" },
1783
1784 { FXP_REV_82551_F, D102_E_CPUSAVER_DWORD,
1785 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD,
1786 "fxp-d102e" },
1787
1788 { FXP_REV_82551_10, D102_E_CPUSAVER_DWORD,
1789 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD,
1790 "fxp-d102e" },
1791
1792 { 0, 0,
1793 0, 0,
1794 NULL }
1795 };
1796
1797 void
fxp_load_ucode(struct fxp_softc * sc)1798 fxp_load_ucode(struct fxp_softc *sc)
1799 {
1800 const struct ucode *uc;
1801 struct fxp_cb_ucode *cbp = &sc->sc_ctrl->u.code;
1802 int i, error;
1803
1804 if (sc->sc_flags & FXPF_NOUCODE)
1805 return;
1806
1807 for (uc = ucode_table; uc->revision != 0; uc++)
1808 if (sc->sc_revision == uc->revision)
1809 break;
1810 if (uc->revision == 0) {
1811 sc->sc_flags |= FXPF_NOUCODE;
1812 return; /* no ucode for this chip is found */
1813 }
1814
1815 if (sc->sc_ucodebuf)
1816 goto reloadit;
1817
1818 if (sc->sc_revision == FXP_REV_82550_C) {
1819 u_int16_t data;
1820
1821 /*
1822 * 82550C without the server extensions
1823 * locks up with the microcode patch.
1824 */
1825 fxp_read_eeprom(sc, &data, FXP_EEPROM_REG_COMPAT, 1);
1826 if ((data & FXP_EEPROM_REG_COMPAT_SRV) == 0) {
1827 sc->sc_flags |= FXPF_NOUCODE;
1828 return;
1829 }
1830 }
1831
1832 error = loadfirmware(uc->uname, (u_char **)&sc->sc_ucodebuf,
1833 &sc->sc_ucodelen);
1834 if (error) {
1835 printf("%s: error %d, could not read firmware %s\n",
1836 sc->sc_dev.dv_xname, error, uc->uname);
1837 return;
1838 }
1839
1840 reloadit:
1841 if (sc->sc_flags & FXPF_UCODELOADED)
1842 return;
1843
1844 cbp->cb_status = 0;
1845 cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE|FXP_CB_COMMAND_EL);
1846 cbp->link_addr = 0xffffffff; /* (no) next command */
1847 for (i = 0; i < (sc->sc_ucodelen / sizeof(u_int32_t)); i++)
1848 cbp->ucode[i] = sc->sc_ucodebuf[i];
1849
1850 if (uc->int_delay_offset)
1851 *((u_int16_t *)&cbp->ucode[uc->int_delay_offset]) =
1852 htole16(sc->sc_int_delay + sc->sc_int_delay / 2);
1853
1854 if (uc->bundle_max_offset)
1855 *((u_int16_t *)&cbp->ucode[uc->bundle_max_offset]) =
1856 htole16(sc->sc_bundle_max);
1857
1858 if (uc->min_size_mask_offset)
1859 *((u_int16_t *)&cbp->ucode[uc->min_size_mask_offset]) =
1860 htole16(sc->sc_min_size_mask);
1861
1862 FXP_UCODE_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1863
1864 /*
1865 * Download the ucode to the chip.
1866 */
1867 fxp_scb_wait(sc);
1868 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr
1869 + offsetof(struct fxp_ctrl, u.code));
1870 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1871
1872 /* ...and wait for it to complete. */
1873 i = FXP_CMD_TMO;
1874 do {
1875 DELAY(2);
1876 FXP_UCODE_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1877 } while (((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0) && --i);
1878 if (i == 0) {
1879 printf("%s: timeout loading microcode\n", sc->sc_dev.dv_xname);
1880 return;
1881 }
1882 sc->sc_flags |= FXPF_UCODELOADED;
1883
1884 #ifdef DEBUG
1885 printf("%s: microcode loaded, int_delay: %d usec",
1886 sc->sc_dev.dv_xname, sc->sc_int_delay);
1887
1888 if (uc->bundle_max_offset)
1889 printf(", bundle_max %d\n", sc->sc_bundle_max);
1890 else
1891 printf("\n");
1892 #endif
1893 }
1894 #endif /* SMALL_KERNEL */
1895