1 /*
2 * sfe_util.c: general ethernet mac driver framework version 2.6
3 *
4 * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * 3. Neither the name of the author nor the names of its contributors may be
17 * used to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 */
33
34 /*
35 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
36 * Use is subject to license terms.
37 */
38
39 /*
40 * System Header files.
41 */
42 #include <sys/types.h>
43 #include <sys/conf.h>
44 #include <sys/debug.h>
45 #include <sys/kmem.h>
46 #include <sys/vtrace.h>
47 #include <sys/ethernet.h>
48 #include <sys/modctl.h>
49 #include <sys/errno.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/stream.h> /* required for MBLK* */
53 #include <sys/strsun.h> /* required for mionack() */
54 #include <sys/byteorder.h>
55 #include <sys/sysmacros.h>
56 #include <sys/pci.h>
57 #include <inet/common.h>
58 #include <inet/led.h>
59 #include <inet/mi.h>
60 #include <inet/nd.h>
61 #include <sys/crc32.h>
62
63 #include <sys/note.h>
64
65 #include "sfe_mii.h"
66 #include "sfe_util.h"
67
68
69
70 extern char ident[];
71
72 /* Debugging support */
73 #ifdef GEM_DEBUG_LEVEL
74 static int gem_debug = GEM_DEBUG_LEVEL;
75 #define DPRINTF(n, args) if (gem_debug > (n)) cmn_err args
76 #else
77 #define DPRINTF(n, args)
78 #undef ASSERT
79 #define ASSERT(x)
80 #endif
81
82 #define IOC_LINESIZE 0x40 /* Is it right for amd64? */
83
84 /*
85 * Useful macros and typedefs
86 */
87 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
88
89 #define GET_NET16(p) ((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
90 #define GET_ETHERTYPE(p) GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
91
92 #define GET_IPTYPEv4(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 9])
93 #define GET_IPTYPEv6(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 6])
94
95
96 #ifndef INT32_MAX
97 #define INT32_MAX 0x7fffffff
98 #endif
99
100 #define VTAG_OFF (ETHERADDRL*2)
101 #ifndef VTAG_SIZE
102 #define VTAG_SIZE 4
103 #endif
104 #ifndef VTAG_TPID
105 #define VTAG_TPID 0x8100U
106 #endif
107
108 #define GET_TXBUF(dp, sn) \
109 &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
110
111 #define TXFLAG_VTAG(flag) \
112 (((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
113
114 #define MAXPKTBUF(dp) \
115 ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
116
117 #define WATCH_INTERVAL_FAST drv_usectohz(100*1000) /* 100mS */
118 #define BOOLEAN(x) ((x) != 0)
119
120 /*
121 * Macros to distinct chip generation.
122 */
123
124 /*
125 * Private functions
126 */
127 static void gem_mii_start(struct gem_dev *);
128 static void gem_mii_stop(struct gem_dev *);
129
130 /* local buffer management */
131 static void gem_nd_setup(struct gem_dev *dp);
132 static void gem_nd_cleanup(struct gem_dev *dp);
133 static int gem_alloc_memory(struct gem_dev *);
134 static void gem_free_memory(struct gem_dev *);
135 static void gem_init_rx_ring(struct gem_dev *);
136 static void gem_init_tx_ring(struct gem_dev *);
137 __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
138
139 static void gem_tx_timeout(struct gem_dev *);
140 static void gem_mii_link_watcher(struct gem_dev *dp);
141 static int gem_mac_init(struct gem_dev *dp);
142 static int gem_mac_start(struct gem_dev *dp);
143 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
144 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
145
146 static struct ether_addr gem_etherbroadcastaddr = {
147 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
148 };
149
150 int gem_speed_value[] = {10, 100, 1000};
151
152 /* ============================================================== */
153 /*
154 * Misc runtime routines
155 */
156 /* ============================================================== */
157 /*
158 * Ether CRC calculation according to 21143 data sheet
159 */
160 uint32_t
gem_ether_crc_le(const uint8_t * addr,int len)161 gem_ether_crc_le(const uint8_t *addr, int len)
162 {
163 uint32_t crc;
164
165 CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
166 return (crc);
167 }
168
169 uint32_t
gem_ether_crc_be(const uint8_t * addr,int len)170 gem_ether_crc_be(const uint8_t *addr, int len)
171 {
172 int idx;
173 int bit;
174 uint_t data;
175 uint32_t crc;
176 #define CRC32_POLY_BE 0x04c11db7
177
178 crc = 0xffffffff;
179 for (idx = 0; idx < len; idx++) {
180 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
181 crc = (crc << 1)
182 ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
183 }
184 }
185 return (crc);
186 #undef CRC32_POLY_BE
187 }
188
189 int
gem_prop_get_int(struct gem_dev * dp,char * prop_template,int def_val)190 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
191 {
192 char propname[32];
193
194 (void) sprintf(propname, prop_template, dp->name);
195
196 return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
197 DDI_PROP_DONTPASS, propname, def_val));
198 }
199
200 static int
gem_population(uint32_t x)201 gem_population(uint32_t x)
202 {
203 int i;
204 int cnt;
205
206 cnt = 0;
207 for (i = 0; i < 32; i++) {
208 if (x & (1 << i)) {
209 cnt++;
210 }
211 }
212 return (cnt);
213 }
214
215 #ifdef GEM_DEBUG_LEVEL
216 #ifdef GEM_DEBUG_VLAN
217 static void
gem_dump_packet(struct gem_dev * dp,char * title,mblk_t * mp,boolean_t check_cksum)218 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
219 boolean_t check_cksum)
220 {
221 char msg[180];
222 uint8_t buf[18+20+20];
223 uint8_t *p;
224 size_t offset;
225 uint_t ethertype;
226 uint_t proto;
227 uint_t ipproto = 0;
228 uint_t iplen;
229 uint_t iphlen;
230 uint_t tcplen;
231 uint_t udplen;
232 uint_t cksum;
233 int rest;
234 int len;
235 char *bp;
236 mblk_t *tp;
237 extern uint_t ip_cksum(mblk_t *, int, uint32_t);
238
239 msg[0] = 0;
240 bp = msg;
241
242 rest = sizeof (buf);
243 offset = 0;
244 for (tp = mp; tp; tp = tp->b_cont) {
245 len = tp->b_wptr - tp->b_rptr;
246 len = min(rest, len);
247 bcopy(tp->b_rptr, &buf[offset], len);
248 rest -= len;
249 offset += len;
250 if (rest == 0) {
251 break;
252 }
253 }
254
255 offset = 0;
256 p = &buf[offset];
257
258 /* ethernet address */
259 sprintf(bp,
260 "ether: %02x:%02x:%02x:%02x:%02x:%02x"
261 " -> %02x:%02x:%02x:%02x:%02x:%02x",
262 p[6], p[7], p[8], p[9], p[10], p[11],
263 p[0], p[1], p[2], p[3], p[4], p[5]);
264 bp = &msg[strlen(msg)];
265
266 /* vlag tag and etherrtype */
267 ethertype = GET_ETHERTYPE(p);
268 if (ethertype == VTAG_TPID) {
269 sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
270 bp = &msg[strlen(msg)];
271
272 offset += VTAG_SIZE;
273 p = &buf[offset];
274 ethertype = GET_ETHERTYPE(p);
275 }
276 sprintf(bp, " type:%04x", ethertype);
277 bp = &msg[strlen(msg)];
278
279 /* ethernet packet length */
280 sprintf(bp, " mblklen:%d", msgdsize(mp));
281 bp = &msg[strlen(msg)];
282 if (mp->b_cont) {
283 sprintf(bp, "(");
284 bp = &msg[strlen(msg)];
285 for (tp = mp; tp; tp = tp->b_cont) {
286 if (tp == mp) {
287 sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
288 } else {
289 sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
290 }
291 bp = &msg[strlen(msg)];
292 }
293 sprintf(bp, ")");
294 bp = &msg[strlen(msg)];
295 }
296
297 if (ethertype != ETHERTYPE_IP) {
298 goto x;
299 }
300
301 /* ip address */
302 offset += sizeof (struct ether_header);
303 p = &buf[offset];
304 ipproto = p[9];
305 iplen = GET_NET16(&p[2]);
306 sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
307 p[12], p[13], p[14], p[15],
308 p[16], p[17], p[18], p[19],
309 ipproto, iplen);
310 bp = (void *)&msg[strlen(msg)];
311
312 iphlen = (p[0] & 0xf) * 4;
313
314 /* cksum for psuedo header */
315 cksum = *(uint16_t *)&p[12];
316 cksum += *(uint16_t *)&p[14];
317 cksum += *(uint16_t *)&p[16];
318 cksum += *(uint16_t *)&p[18];
319 cksum += BE_16(ipproto);
320
321 /* tcp or udp protocol header */
322 offset += iphlen;
323 p = &buf[offset];
324 if (ipproto == IPPROTO_TCP) {
325 tcplen = iplen - iphlen;
326 sprintf(bp, ", tcp: len:%d cksum:%x",
327 tcplen, GET_NET16(&p[16]));
328 bp = (void *)&msg[strlen(msg)];
329
330 if (check_cksum) {
331 cksum += BE_16(tcplen);
332 cksum = (uint16_t)ip_cksum(mp, offset, cksum);
333 sprintf(bp, " (%s)",
334 (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
335 bp = (void *)&msg[strlen(msg)];
336 }
337 } else if (ipproto == IPPROTO_UDP) {
338 udplen = GET_NET16(&p[4]);
339 sprintf(bp, ", udp: len:%d cksum:%x",
340 udplen, GET_NET16(&p[6]));
341 bp = (void *)&msg[strlen(msg)];
342
343 if (GET_NET16(&p[6]) && check_cksum) {
344 cksum += *(uint16_t *)&p[4];
345 cksum = (uint16_t)ip_cksum(mp, offset, cksum);
346 sprintf(bp, " (%s)",
347 (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
348 bp = (void *)&msg[strlen(msg)];
349 }
350 }
351 x:
352 cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
353 }
354 #endif /* GEM_DEBUG_VLAN */
355 #endif /* GEM_DEBUG_LEVEL */
356
357 /* ============================================================== */
358 /*
359 * IO cache flush
360 */
361 /* ============================================================== */
362 __INLINE__ void
gem_rx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)363 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
364 {
365 int n;
366 int m;
367 int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
368
369 /* sync active descriptors */
370 if (rx_desc_unit_shift < 0 || nslot == 0) {
371 /* no rx descriptor ring */
372 return;
373 }
374
375 n = dp->gc.gc_rx_ring_size - head;
376 if ((m = nslot - n) > 0) {
377 (void) ddi_dma_sync(dp->desc_dma_handle,
378 (off_t)0,
379 (size_t)(m << rx_desc_unit_shift),
380 how);
381 nslot = n;
382 }
383
384 (void) ddi_dma_sync(dp->desc_dma_handle,
385 (off_t)(head << rx_desc_unit_shift),
386 (size_t)(nslot << rx_desc_unit_shift),
387 how);
388 }
389
390 __INLINE__ void
gem_tx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)391 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
392 {
393 int n;
394 int m;
395 int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
396
397 /* sync active descriptors */
398 if (tx_desc_unit_shift < 0 || nslot == 0) {
399 /* no tx descriptor ring */
400 return;
401 }
402
403 n = dp->gc.gc_tx_ring_size - head;
404 if ((m = nslot - n) > 0) {
405 (void) ddi_dma_sync(dp->desc_dma_handle,
406 (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
407 (size_t)(m << tx_desc_unit_shift),
408 how);
409 nslot = n;
410 }
411
412 (void) ddi_dma_sync(dp->desc_dma_handle,
413 (off_t)((head << tx_desc_unit_shift)
414 + (dp->tx_ring_dma - dp->rx_ring_dma)),
415 (size_t)(nslot << tx_desc_unit_shift),
416 how);
417 }
418
419 static void
gem_rx_start_default(struct gem_dev * dp,int head,int nslot)420 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
421 {
422 gem_rx_desc_dma_sync(dp,
423 SLOT(head, dp->gc.gc_rx_ring_size), nslot,
424 DDI_DMA_SYNC_FORDEV);
425 }
426
427 /* ============================================================== */
428 /*
429 * Buffer management
430 */
431 /* ============================================================== */
432 static void
gem_dump_txbuf(struct gem_dev * dp,int level,const char * title)433 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
434 {
435 cmn_err(level,
436 "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
437 "tx_softq: %d[%d] %d[%d] (+%d), "
438 "tx_free: %d[%d] %d[%d] (+%d), "
439 "tx_desc: %d[%d] %d[%d] (+%d), "
440 "intr: %d[%d] (+%d), ",
441 dp->name, title,
442 dp->tx_active_head,
443 SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
444 dp->tx_active_tail,
445 SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
446 dp->tx_active_tail - dp->tx_active_head,
447 dp->tx_softq_head,
448 SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
449 dp->tx_softq_tail,
450 SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
451 dp->tx_softq_tail - dp->tx_softq_head,
452 dp->tx_free_head,
453 SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
454 dp->tx_free_tail,
455 SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
456 dp->tx_free_tail - dp->tx_free_head,
457 dp->tx_desc_head,
458 SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
459 dp->tx_desc_tail,
460 SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
461 dp->tx_desc_tail - dp->tx_desc_head,
462 dp->tx_desc_intr,
463 SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
464 dp->tx_desc_intr - dp->tx_desc_head);
465 }
466
467 static void
gem_free_rxbuf(struct rxbuf * rbp)468 gem_free_rxbuf(struct rxbuf *rbp)
469 {
470 struct gem_dev *dp;
471
472 dp = rbp->rxb_devp;
473 ASSERT(mutex_owned(&dp->intrlock));
474 rbp->rxb_next = dp->rx_buf_freelist;
475 dp->rx_buf_freelist = rbp;
476 dp->rx_buf_freecnt++;
477 }
478
479 /*
480 * gem_get_rxbuf: supply a receive buffer which have been mapped into
481 * DMA space.
482 */
483 struct rxbuf *
gem_get_rxbuf(struct gem_dev * dp,int cansleep)484 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
485 {
486 struct rxbuf *rbp;
487 uint_t count = 0;
488 int i;
489 int err;
490
491 ASSERT(mutex_owned(&dp->intrlock));
492
493 DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
494 dp->rx_buf_freecnt));
495 /*
496 * Get rx buffer management structure
497 */
498 rbp = dp->rx_buf_freelist;
499 if (rbp) {
500 /* get one from the recycle list */
501 ASSERT(dp->rx_buf_freecnt > 0);
502
503 dp->rx_buf_freelist = rbp->rxb_next;
504 dp->rx_buf_freecnt--;
505 rbp->rxb_next = NULL;
506 return (rbp);
507 }
508
509 /*
510 * Allocate a rx buffer management structure
511 */
512 rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
513 if (rbp == NULL) {
514 /* no memory */
515 return (NULL);
516 }
517
518 /*
519 * Prepare a back pointer to the device structure which will be
520 * refered on freeing the buffer later.
521 */
522 rbp->rxb_devp = dp;
523
524 /* allocate a dma handle for rx data buffer */
525 if ((err = ddi_dma_alloc_handle(dp->dip,
526 &dp->gc.gc_dma_attr_rxbuf,
527 (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
528 NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
529
530 cmn_err(CE_WARN,
531 "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
532 dp->name, __func__, err);
533
534 kmem_free(rbp, sizeof (struct rxbuf));
535 return (NULL);
536 }
537
538 /* allocate a bounce buffer for rx */
539 if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
540 ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
541 &dp->gc.gc_buf_attr,
542 /*
543 * if the nic requires a header at the top of receive buffers,
544 * it may access the rx buffer randomly.
545 */
546 (dp->gc.gc_rx_header_len > 0)
547 ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
548 cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
549 NULL,
550 &rbp->rxb_buf, &rbp->rxb_buf_len,
551 &rbp->rxb_bah)) != DDI_SUCCESS) {
552
553 cmn_err(CE_WARN,
554 "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
555 dp->name, __func__, err);
556
557 ddi_dma_free_handle(&rbp->rxb_dh);
558 kmem_free(rbp, sizeof (struct rxbuf));
559 return (NULL);
560 }
561
562 /* Mapin the bounce buffer into the DMA space */
563 if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
564 NULL, rbp->rxb_buf, dp->rx_buf_len,
565 ((dp->gc.gc_rx_header_len > 0)
566 ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
567 :(DDI_DMA_READ | DDI_DMA_STREAMING)),
568 cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
569 NULL,
570 rbp->rxb_dmacookie,
571 &count)) != DDI_DMA_MAPPED) {
572
573 ASSERT(err != DDI_DMA_INUSE);
574 DPRINTF(0, (CE_WARN,
575 "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
576 dp->name, __func__, err));
577
578 /*
579 * we failed to allocate a dma resource
580 * for the rx bounce buffer.
581 */
582 ddi_dma_mem_free(&rbp->rxb_bah);
583 ddi_dma_free_handle(&rbp->rxb_dh);
584 kmem_free(rbp, sizeof (struct rxbuf));
585 return (NULL);
586 }
587
588 /* correct the rest of the DMA mapping */
589 for (i = 1; i < count; i++) {
590 ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
591 }
592 rbp->rxb_nfrags = count;
593
594 /* Now we successfully prepared an rx buffer */
595 dp->rx_buf_allocated++;
596
597 return (rbp);
598 }
599
600 /* ============================================================== */
601 /*
602 * memory resource management
603 */
604 /* ============================================================== */
605 static int
gem_alloc_memory(struct gem_dev * dp)606 gem_alloc_memory(struct gem_dev *dp)
607 {
608 caddr_t ring;
609 caddr_t buf;
610 size_t req_size;
611 size_t ring_len;
612 size_t buf_len;
613 ddi_dma_cookie_t ring_cookie;
614 ddi_dma_cookie_t buf_cookie;
615 uint_t count;
616 int i;
617 int err;
618 struct txbuf *tbp;
619 int tx_buf_len;
620 ddi_dma_attr_t dma_attr_txbounce;
621
622 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
623
624 dp->desc_dma_handle = NULL;
625 req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
626
627 if (req_size > 0) {
628 /*
629 * Alloc RX/TX descriptors and a io area.
630 */
631 if ((err = ddi_dma_alloc_handle(dp->dip,
632 &dp->gc.gc_dma_attr_desc,
633 DDI_DMA_SLEEP, NULL,
634 &dp->desc_dma_handle)) != DDI_SUCCESS) {
635 cmn_err(CE_WARN,
636 "!%s: %s: ddi_dma_alloc_handle failed: %d",
637 dp->name, __func__, err);
638 return (ENOMEM);
639 }
640
641 if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
642 req_size, &dp->gc.gc_desc_attr,
643 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
644 &ring, &ring_len,
645 &dp->desc_acc_handle)) != DDI_SUCCESS) {
646 cmn_err(CE_WARN,
647 "!%s: %s: ddi_dma_mem_alloc failed: "
648 "ret %d, request size: %d",
649 dp->name, __func__, err, (int)req_size);
650 ddi_dma_free_handle(&dp->desc_dma_handle);
651 return (ENOMEM);
652 }
653
654 if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
655 NULL, ring, ring_len,
656 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
657 DDI_DMA_SLEEP, NULL,
658 &ring_cookie, &count)) != DDI_SUCCESS) {
659 ASSERT(err != DDI_DMA_INUSE);
660 cmn_err(CE_WARN,
661 "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
662 dp->name, __func__, err);
663 ddi_dma_mem_free(&dp->desc_acc_handle);
664 ddi_dma_free_handle(&dp->desc_dma_handle);
665 return (ENOMEM);
666 }
667 ASSERT(count == 1);
668
669 /* set base of rx descriptor ring */
670 dp->rx_ring = ring;
671 dp->rx_ring_dma = ring_cookie.dmac_laddress;
672
673 /* set base of tx descriptor ring */
674 dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
675 dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
676
677 /* set base of io area */
678 dp->io_area = dp->tx_ring + dp->tx_desc_size;
679 dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
680 }
681
682 /*
683 * Prepare DMA resources for tx packets
684 */
685 ASSERT(dp->gc.gc_tx_buf_size > 0);
686
687 /* Special dma attribute for tx bounce buffers */
688 dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
689 dma_attr_txbounce.dma_attr_sgllen = 1;
690 dma_attr_txbounce.dma_attr_align =
691 max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
692
693 /* Size for tx bounce buffers must be max tx packet size. */
694 tx_buf_len = MAXPKTBUF(dp);
695 tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
696
697 ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
698
699 for (i = 0, tbp = dp->tx_buf;
700 i < dp->gc.gc_tx_buf_size; i++, tbp++) {
701
702 /* setup bounce buffers for tx packets */
703 if ((err = ddi_dma_alloc_handle(dp->dip,
704 &dma_attr_txbounce,
705 DDI_DMA_SLEEP, NULL,
706 &tbp->txb_bdh)) != DDI_SUCCESS) {
707
708 cmn_err(CE_WARN,
709 "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
710 " err=%d, i=%d",
711 dp->name, __func__, err, i);
712 goto err_alloc_dh;
713 }
714
715 if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
716 tx_buf_len,
717 &dp->gc.gc_buf_attr,
718 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
719 &buf, &buf_len,
720 &tbp->txb_bah)) != DDI_SUCCESS) {
721 cmn_err(CE_WARN,
722 "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
723 "ret %d, request size %d",
724 dp->name, __func__, err, tx_buf_len);
725 ddi_dma_free_handle(&tbp->txb_bdh);
726 goto err_alloc_dh;
727 }
728
729 if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
730 NULL, buf, buf_len,
731 DDI_DMA_WRITE | DDI_DMA_STREAMING,
732 DDI_DMA_SLEEP, NULL,
733 &buf_cookie, &count)) != DDI_SUCCESS) {
734 ASSERT(err != DDI_DMA_INUSE);
735 cmn_err(CE_WARN,
736 "!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
737 dp->name, __func__, err);
738 ddi_dma_mem_free(&tbp->txb_bah);
739 ddi_dma_free_handle(&tbp->txb_bdh);
740 goto err_alloc_dh;
741 }
742 ASSERT(count == 1);
743 tbp->txb_buf = buf;
744 tbp->txb_buf_dma = buf_cookie.dmac_laddress;
745 }
746
747 return (0);
748
749 err_alloc_dh:
750 if (dp->gc.gc_tx_buf_size > 0) {
751 while (i-- > 0) {
752 (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
753 ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
754 ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
755 }
756 }
757
758 if (dp->desc_dma_handle) {
759 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
760 ddi_dma_mem_free(&dp->desc_acc_handle);
761 ddi_dma_free_handle(&dp->desc_dma_handle);
762 dp->desc_dma_handle = NULL;
763 }
764
765 return (ENOMEM);
766 }
767
768 static void
gem_free_memory(struct gem_dev * dp)769 gem_free_memory(struct gem_dev *dp)
770 {
771 int i;
772 struct rxbuf *rbp;
773 struct txbuf *tbp;
774
775 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
776
777 /* Free TX/RX descriptors and tx padding buffer */
778 if (dp->desc_dma_handle) {
779 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
780 ddi_dma_mem_free(&dp->desc_acc_handle);
781 ddi_dma_free_handle(&dp->desc_dma_handle);
782 dp->desc_dma_handle = NULL;
783 }
784
785 /* Free dma handles for Tx */
786 for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
787 /* Free bounce buffer associated to each txbuf */
788 (void) ddi_dma_unbind_handle(tbp->txb_bdh);
789 ddi_dma_mem_free(&tbp->txb_bah);
790 ddi_dma_free_handle(&tbp->txb_bdh);
791 }
792
793 /* Free rx buffer */
794 while ((rbp = dp->rx_buf_freelist) != NULL) {
795
796 ASSERT(dp->rx_buf_freecnt > 0);
797
798 dp->rx_buf_freelist = rbp->rxb_next;
799 dp->rx_buf_freecnt--;
800
801 /* release DMA mapping */
802 ASSERT(rbp->rxb_dh != NULL);
803
804 /* free dma handles for rx bbuf */
805 /* it has dma mapping always */
806 ASSERT(rbp->rxb_nfrags > 0);
807 (void) ddi_dma_unbind_handle(rbp->rxb_dh);
808
809 /* free the associated bounce buffer and dma handle */
810 ASSERT(rbp->rxb_bah != NULL);
811 ddi_dma_mem_free(&rbp->rxb_bah);
812 /* free the associated dma handle */
813 ddi_dma_free_handle(&rbp->rxb_dh);
814
815 /* free the base memory of rx buffer management */
816 kmem_free(rbp, sizeof (struct rxbuf));
817 }
818 }
819
820 /* ============================================================== */
821 /*
822 * Rx/Tx descriptor slot management
823 */
824 /* ============================================================== */
825 /*
826 * Initialize an empty rx ring.
827 */
828 static void
gem_init_rx_ring(struct gem_dev * dp)829 gem_init_rx_ring(struct gem_dev *dp)
830 {
831 int i;
832 int rx_ring_size = dp->gc.gc_rx_ring_size;
833
834 DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
835 dp->name, __func__,
836 rx_ring_size, dp->gc.gc_rx_buf_max));
837
838 /* make a physical chain of rx descriptors */
839 for (i = 0; i < rx_ring_size; i++) {
840 (*dp->gc.gc_rx_desc_init)(dp, i);
841 }
842 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
843
844 dp->rx_active_head = (seqnum_t)0;
845 dp->rx_active_tail = (seqnum_t)0;
846
847 ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
848 ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
849 }
850
851 /*
852 * Prepare rx buffers and put them into the rx buffer/descriptor ring.
853 */
854 static void
gem_prepare_rx_buf(struct gem_dev * dp)855 gem_prepare_rx_buf(struct gem_dev *dp)
856 {
857 int i;
858 int nrbuf;
859 struct rxbuf *rbp;
860
861 ASSERT(mutex_owned(&dp->intrlock));
862
863 /* Now we have no active buffers in rx ring */
864
865 nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
866 for (i = 0; i < nrbuf; i++) {
867 if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
868 break;
869 }
870 gem_append_rxbuf(dp, rbp);
871 }
872
873 gem_rx_desc_dma_sync(dp,
874 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
875 }
876
877 /*
878 * Reclaim active rx buffers in rx buffer ring.
879 */
880 static void
gem_clean_rx_buf(struct gem_dev * dp)881 gem_clean_rx_buf(struct gem_dev *dp)
882 {
883 int i;
884 struct rxbuf *rbp;
885 int rx_ring_size = dp->gc.gc_rx_ring_size;
886 #ifdef GEM_DEBUG_LEVEL
887 int total;
888 #endif
889 ASSERT(mutex_owned(&dp->intrlock));
890
891 DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
892 dp->name, __func__, dp->rx_buf_freecnt));
893 /*
894 * clean up HW descriptors
895 */
896 for (i = 0; i < rx_ring_size; i++) {
897 (*dp->gc.gc_rx_desc_clean)(dp, i);
898 }
899 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
900
901 #ifdef GEM_DEBUG_LEVEL
902 total = 0;
903 #endif
904 /*
905 * Reclaim allocated rx buffers
906 */
907 while ((rbp = dp->rx_buf_head) != NULL) {
908 #ifdef GEM_DEBUG_LEVEL
909 total++;
910 #endif
911 /* remove the first one from rx buffer list */
912 dp->rx_buf_head = rbp->rxb_next;
913
914 /* recycle the rxbuf */
915 gem_free_rxbuf(rbp);
916 }
917 dp->rx_buf_tail = (struct rxbuf *)NULL;
918
919 DPRINTF(2, (CE_CONT,
920 "!%s: %s: %d buffers freeed, total: %d free",
921 dp->name, __func__, total, dp->rx_buf_freecnt));
922 }
923
924 /*
925 * Initialize an empty transmit buffer/descriptor ring
926 */
927 static void
gem_init_tx_ring(struct gem_dev * dp)928 gem_init_tx_ring(struct gem_dev *dp)
929 {
930 int i;
931 int tx_buf_size = dp->gc.gc_tx_buf_size;
932 int tx_ring_size = dp->gc.gc_tx_ring_size;
933
934 DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
935 dp->name, __func__,
936 dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
937
938 ASSERT(!dp->mac_active);
939
940 /* initialize active list and free list */
941 dp->tx_slots_base =
942 SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
943 dp->tx_softq_tail -= dp->tx_softq_head;
944 dp->tx_softq_head = (seqnum_t)0;
945
946 dp->tx_active_head = dp->tx_softq_head;
947 dp->tx_active_tail = dp->tx_softq_head;
948
949 dp->tx_free_head = dp->tx_softq_tail;
950 dp->tx_free_tail = dp->gc.gc_tx_buf_limit;
951
952 dp->tx_desc_head = (seqnum_t)0;
953 dp->tx_desc_tail = (seqnum_t)0;
954 dp->tx_desc_intr = (seqnum_t)0;
955
956 for (i = 0; i < tx_ring_size; i++) {
957 (*dp->gc.gc_tx_desc_init)(dp, i);
958 }
959 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
960 }
961
962 __INLINE__
963 static void
gem_txbuf_free_dma_resources(struct txbuf * tbp)964 gem_txbuf_free_dma_resources(struct txbuf *tbp)
965 {
966 if (tbp->txb_mp) {
967 freemsg(tbp->txb_mp);
968 tbp->txb_mp = NULL;
969 }
970 tbp->txb_nfrags = 0;
971 tbp->txb_flag = 0;
972 }
973 #pragma inline(gem_txbuf_free_dma_resources)
974
975 /*
976 * reclaim active tx buffers and reset positions in tx rings.
977 */
978 static void
gem_clean_tx_buf(struct gem_dev * dp)979 gem_clean_tx_buf(struct gem_dev *dp)
980 {
981 int i;
982 seqnum_t head;
983 seqnum_t tail;
984 seqnum_t sn;
985 struct txbuf *tbp;
986 int tx_ring_size = dp->gc.gc_tx_ring_size;
987 #ifdef GEM_DEBUG_LEVEL
988 int err;
989 #endif
990
991 ASSERT(!dp->mac_active);
992 ASSERT(dp->tx_busy == 0);
993 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
994
995 /*
996 * clean up all HW descriptors
997 */
998 for (i = 0; i < tx_ring_size; i++) {
999 (*dp->gc.gc_tx_desc_clean)(dp, i);
1000 }
1001 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1002
1003 /* dequeue all active and loaded buffers */
1004 head = dp->tx_active_head;
1005 tail = dp->tx_softq_tail;
1006
1007 ASSERT(dp->tx_free_head - head >= 0);
1008 tbp = GET_TXBUF(dp, head);
1009 for (sn = head; sn != tail; sn++) {
1010 gem_txbuf_free_dma_resources(tbp);
1011 ASSERT(tbp->txb_mp == NULL);
1012 dp->stats.errxmt++;
1013 tbp = tbp->txb_next;
1014 }
1015
1016 #ifdef GEM_DEBUG_LEVEL
1017 /* ensure no dma resources for tx are not in use now */
1018 err = 0;
1019 while (sn != head + dp->gc.gc_tx_buf_size) {
1020 if (tbp->txb_mp || tbp->txb_nfrags) {
1021 DPRINTF(0, (CE_CONT,
1022 "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1023 dp->name, __func__,
1024 sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1025 tbp->txb_mp, tbp->txb_nfrags));
1026 err = 1;
1027 }
1028 sn++;
1029 tbp = tbp->txb_next;
1030 }
1031
1032 if (err) {
1033 gem_dump_txbuf(dp, CE_WARN,
1034 "gem_clean_tx_buf: tbp->txb_mp != NULL");
1035 }
1036 #endif
1037 /* recycle buffers, now no active tx buffers in the ring */
1038 dp->tx_free_tail += tail - head;
1039 ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1040
1041 /* fix positions in tx buffer rings */
1042 dp->tx_active_head = dp->tx_free_head;
1043 dp->tx_active_tail = dp->tx_free_head;
1044 dp->tx_softq_head = dp->tx_free_head;
1045 dp->tx_softq_tail = dp->tx_free_head;
1046 }
1047
1048 /*
1049 * Reclaim transmitted buffers from tx buffer/descriptor ring.
1050 */
1051 __INLINE__ int
gem_reclaim_txbuf(struct gem_dev * dp)1052 gem_reclaim_txbuf(struct gem_dev *dp)
1053 {
1054 struct txbuf *tbp;
1055 uint_t txstat;
1056 int err = GEM_SUCCESS;
1057 seqnum_t head;
1058 seqnum_t tail;
1059 seqnum_t sn;
1060 seqnum_t desc_head;
1061 int tx_ring_size = dp->gc.gc_tx_ring_size;
1062 uint_t (*tx_desc_stat)(struct gem_dev *dp,
1063 int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1064 clock_t now;
1065
1066 now = ddi_get_lbolt();
1067 if (now == (clock_t)0) {
1068 /* make non-zero timestamp */
1069 now--;
1070 }
1071
1072 mutex_enter(&dp->xmitlock);
1073
1074 head = dp->tx_active_head;
1075 tail = dp->tx_active_tail;
1076
1077 #if GEM_DEBUG_LEVEL > 2
1078 if (head != tail) {
1079 cmn_err(CE_CONT, "!%s: %s: "
1080 "testing active_head:%d[%d], active_tail:%d[%d]",
1081 dp->name, __func__,
1082 head, SLOT(head, dp->gc.gc_tx_buf_size),
1083 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1084 }
1085 #endif
1086 #ifdef DEBUG
1087 if (dp->tx_reclaim_busy == 0) {
1088 /* check tx buffer management consistency */
1089 ASSERT(dp->tx_free_tail - dp->tx_active_head
1090 == dp->gc.gc_tx_buf_limit);
1091 /* EMPTY */
1092 }
1093 #endif
1094 dp->tx_reclaim_busy++;
1095
1096 /* sync all active HW descriptors */
1097 gem_tx_desc_dma_sync(dp,
1098 SLOT(dp->tx_desc_head, tx_ring_size),
1099 dp->tx_desc_tail - dp->tx_desc_head,
1100 DDI_DMA_SYNC_FORKERNEL);
1101
1102 tbp = GET_TXBUF(dp, head);
1103 desc_head = dp->tx_desc_head;
1104 for (sn = head; sn != tail;
1105 dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1106 int ndescs;
1107
1108 ASSERT(tbp->txb_desc == desc_head);
1109
1110 ndescs = tbp->txb_ndescs;
1111 if (ndescs == 0) {
1112 /* skip errored descriptors */
1113 continue;
1114 }
1115 txstat = (*tx_desc_stat)(dp,
1116 SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1117
1118 if (txstat == 0) {
1119 /* not transmitted yet */
1120 break;
1121 }
1122
1123 if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1124 dp->tx_blocked = now;
1125 }
1126
1127 ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1128
1129 if (txstat & GEM_TX_ERR) {
1130 err = GEM_FAILURE;
1131 cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1132 dp->name, sn, SLOT(sn, tx_ring_size));
1133 }
1134 #if GEM_DEBUG_LEVEL > 4
1135 if (now - tbp->txb_stime >= 50) {
1136 cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1137 dp->name, (now - tbp->txb_stime)*10);
1138 }
1139 #endif
1140 /* free transmitted descriptors */
1141 desc_head += ndescs;
1142 }
1143
1144 if (dp->tx_desc_head != desc_head) {
1145 /* we have reclaimed one or more tx buffers */
1146 dp->tx_desc_head = desc_head;
1147
1148 /* If we passed the next interrupt position, update it */
1149 if (desc_head - dp->tx_desc_intr > 0) {
1150 dp->tx_desc_intr = desc_head;
1151 }
1152 }
1153 mutex_exit(&dp->xmitlock);
1154
1155 /* free dma mapping resources associated with transmitted tx buffers */
1156 tbp = GET_TXBUF(dp, head);
1157 tail = sn;
1158 #if GEM_DEBUG_LEVEL > 2
1159 if (head != tail) {
1160 cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1161 __func__,
1162 head, SLOT(head, dp->gc.gc_tx_buf_size),
1163 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1164 }
1165 #endif
1166 for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1167 gem_txbuf_free_dma_resources(tbp);
1168 }
1169
1170 /* recycle the tx buffers */
1171 mutex_enter(&dp->xmitlock);
1172 if (--dp->tx_reclaim_busy == 0) {
1173 /* we are the last thread who can update free tail */
1174 #if GEM_DEBUG_LEVEL > 4
1175 /* check all resouces have been deallocated */
1176 sn = dp->tx_free_tail;
1177 tbp = GET_TXBUF(dp, new_tail);
1178 while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1179 if (tbp->txb_nfrags) {
1180 /* in use */
1181 break;
1182 }
1183 ASSERT(tbp->txb_mp == NULL);
1184 tbp = tbp->txb_next;
1185 sn++;
1186 }
1187 ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1188 #endif
1189 dp->tx_free_tail =
1190 dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1191 }
1192 if (!dp->mac_active) {
1193 /* someone may be waiting for me. */
1194 cv_broadcast(&dp->tx_drain_cv);
1195 }
1196 #if GEM_DEBUG_LEVEL > 2
1197 cmn_err(CE_CONT, "!%s: %s: called, "
1198 "free_head:%d free_tail:%d(+%d) added:%d",
1199 dp->name, __func__,
1200 dp->tx_free_head, dp->tx_free_tail,
1201 dp->tx_free_tail - dp->tx_free_head, tail - head);
1202 #endif
1203 mutex_exit(&dp->xmitlock);
1204
1205 return (err);
1206 }
1207 #pragma inline(gem_reclaim_txbuf)
1208
1209
1210 /*
1211 * Make tx descriptors in out-of-order manner
1212 */
1213 static void
gem_tx_load_descs_oo(struct gem_dev * dp,seqnum_t start_slot,seqnum_t end_slot,uint64_t flags)1214 gem_tx_load_descs_oo(struct gem_dev *dp,
1215 seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1216 {
1217 seqnum_t sn;
1218 struct txbuf *tbp;
1219 int tx_ring_size = dp->gc.gc_tx_ring_size;
1220 int (*tx_desc_write)
1221 (struct gem_dev *dp, int slot,
1222 ddi_dma_cookie_t *dmacookie,
1223 int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1224 clock_t now = ddi_get_lbolt();
1225
1226 sn = start_slot;
1227 tbp = GET_TXBUF(dp, sn);
1228 do {
1229 #if GEM_DEBUG_LEVEL > 1
1230 if (dp->tx_cnt < 100) {
1231 dp->tx_cnt++;
1232 flags |= GEM_TXFLAG_INTR;
1233 }
1234 #endif
1235 /* write a tx descriptor */
1236 tbp->txb_desc = sn;
1237 tbp->txb_ndescs = (*tx_desc_write)(dp,
1238 SLOT(sn, tx_ring_size),
1239 tbp->txb_dmacookie,
1240 tbp->txb_nfrags, flags | tbp->txb_flag);
1241 tbp->txb_stime = now;
1242 ASSERT(tbp->txb_ndescs == 1);
1243
1244 flags = 0;
1245 sn++;
1246 tbp = tbp->txb_next;
1247 } while (sn != end_slot);
1248 }
1249
1250 __INLINE__
1251 static size_t
gem_setup_txbuf_copy(struct gem_dev * dp,mblk_t * mp,struct txbuf * tbp)1252 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1253 {
1254 size_t min_pkt;
1255 caddr_t bp;
1256 size_t off;
1257 mblk_t *tp;
1258 size_t len;
1259 uint64_t flag;
1260
1261 ASSERT(tbp->txb_mp == NULL);
1262
1263 /* we use bounce buffer for the packet */
1264 min_pkt = ETHERMIN;
1265 bp = tbp->txb_buf;
1266 off = 0;
1267 tp = mp;
1268
1269 flag = tbp->txb_flag;
1270 if (flag & GEM_TXFLAG_SWVTAG) {
1271 /* need to increase min packet size */
1272 min_pkt += VTAG_SIZE;
1273 ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1274 }
1275
1276 /* copy the rest */
1277 for (; tp; tp = tp->b_cont) {
1278 if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1279 bcopy(tp->b_rptr, &bp[off], len);
1280 off += len;
1281 }
1282 }
1283
1284 if (off < min_pkt &&
1285 (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1286 /*
1287 * Extend the packet to minimum packet size explicitly.
1288 * For software vlan packets, we shouldn't use tx autopad
1289 * function because nics may not be aware of vlan.
1290 * we must keep 46 octet of payload even if we use vlan.
1291 */
1292 bzero(&bp[off], min_pkt - off);
1293 off = min_pkt;
1294 }
1295
1296 (void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1297
1298 tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1299 tbp->txb_dmacookie[0].dmac_size = off;
1300
1301 DPRINTF(2, (CE_CONT,
1302 "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1303 dp->name, __func__,
1304 tbp->txb_dmacookie[0].dmac_laddress,
1305 tbp->txb_dmacookie[0].dmac_size,
1306 (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1307 min_pkt));
1308
1309 /* save misc info */
1310 tbp->txb_mp = mp;
1311 tbp->txb_nfrags = 1;
1312 #ifdef DEBUG_MULTIFRAGS
1313 if (dp->gc.gc_tx_max_frags >= 3 &&
1314 tbp->txb_dmacookie[0].dmac_size > 16*3) {
1315 tbp->txb_dmacookie[1].dmac_laddress =
1316 tbp->txb_dmacookie[0].dmac_laddress + 16;
1317 tbp->txb_dmacookie[2].dmac_laddress =
1318 tbp->txb_dmacookie[1].dmac_laddress + 16;
1319
1320 tbp->txb_dmacookie[2].dmac_size =
1321 tbp->txb_dmacookie[0].dmac_size - 16*2;
1322 tbp->txb_dmacookie[1].dmac_size = 16;
1323 tbp->txb_dmacookie[0].dmac_size = 16;
1324 tbp->txb_nfrags = 3;
1325 }
1326 #endif
1327 return (off);
1328 }
1329 #pragma inline(gem_setup_txbuf_copy)
1330
1331 __INLINE__
1332 static void
gem_tx_start_unit(struct gem_dev * dp)1333 gem_tx_start_unit(struct gem_dev *dp)
1334 {
1335 seqnum_t head;
1336 seqnum_t tail;
1337 struct txbuf *tbp_head;
1338 struct txbuf *tbp_tail;
1339
1340 /* update HW descriptors from soft queue */
1341 ASSERT(mutex_owned(&dp->xmitlock));
1342 ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1343
1344 head = dp->tx_softq_head;
1345 tail = dp->tx_softq_tail;
1346
1347 DPRINTF(1, (CE_CONT,
1348 "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1349 dp->name, __func__, head, tail, tail - head,
1350 dp->tx_desc_head, dp->tx_desc_tail,
1351 dp->tx_desc_tail - dp->tx_desc_head));
1352
1353 ASSERT(tail - head > 0);
1354
1355 dp->tx_desc_tail = tail;
1356
1357 tbp_head = GET_TXBUF(dp, head);
1358 tbp_tail = GET_TXBUF(dp, tail - 1);
1359
1360 ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1361
1362 dp->gc.gc_tx_start(dp,
1363 SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1364 tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1365
1366 /* advance softq head and active tail */
1367 dp->tx_softq_head = dp->tx_active_tail = tail;
1368 }
1369 #pragma inline(gem_tx_start_unit)
1370
1371 #ifdef GEM_DEBUG_LEVEL
1372 static int gem_send_cnt[10];
1373 #endif
1374 #define PKT_MIN_SIZE (sizeof (struct ether_header) + 10 + VTAG_SIZE)
1375 #define EHLEN (sizeof (struct ether_header))
1376 /*
1377 * check ether packet type and ip protocol
1378 */
1379 static uint64_t
gem_txbuf_options(struct gem_dev * dp,mblk_t * mp,uint8_t * bp)1380 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1381 {
1382 mblk_t *tp;
1383 ssize_t len;
1384 uint_t vtag;
1385 int off;
1386 uint64_t flag;
1387
1388 flag = 0ULL;
1389
1390 /*
1391 * prepare continuous header of the packet for protocol analysis
1392 */
1393 if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1394 /* we use work buffer to copy mblk */
1395 for (tp = mp, off = 0;
1396 tp && (off < PKT_MIN_SIZE);
1397 tp = tp->b_cont, off += len) {
1398 len = (long)tp->b_wptr - (long)tp->b_rptr;
1399 len = min(len, PKT_MIN_SIZE - off);
1400 bcopy(tp->b_rptr, &bp[off], len);
1401 }
1402 } else {
1403 /* we can use mblk without copy */
1404 bp = mp->b_rptr;
1405 }
1406
1407 /* process vlan tag for GLD v3 */
1408 if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1409 if (dp->misc_flag & GEM_VLAN_HARD) {
1410 vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1411 ASSERT(vtag);
1412 flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1413 } else {
1414 flag |= GEM_TXFLAG_SWVTAG;
1415 }
1416 }
1417 return (flag);
1418 }
1419 #undef EHLEN
1420 #undef PKT_MIN_SIZE
1421 /*
1422 * gem_send_common is an exported function because hw depend routines may
1423 * use it for sending control frames like setup frames for 2114x chipset.
1424 */
1425 mblk_t *
gem_send_common(struct gem_dev * dp,mblk_t * mp_head,uint32_t flags)1426 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1427 {
1428 int nmblk;
1429 int avail;
1430 mblk_t *tp;
1431 mblk_t *mp;
1432 int i;
1433 struct txbuf *tbp;
1434 seqnum_t head;
1435 uint64_t load_flags;
1436 uint64_t len_total = 0;
1437 uint32_t bcast = 0;
1438 uint32_t mcast = 0;
1439
1440 ASSERT(mp_head != NULL);
1441
1442 mp = mp_head;
1443 nmblk = 1;
1444 while ((mp = mp->b_next) != NULL) {
1445 nmblk++;
1446 }
1447 #ifdef GEM_DEBUG_LEVEL
1448 gem_send_cnt[0]++;
1449 gem_send_cnt[min(nmblk, 9)]++;
1450 #endif
1451 /*
1452 * Aquire resources
1453 */
1454 mutex_enter(&dp->xmitlock);
1455 if (dp->mac_suspended) {
1456 mutex_exit(&dp->xmitlock);
1457 mp = mp_head;
1458 while (mp) {
1459 tp = mp->b_next;
1460 freemsg(mp);
1461 mp = tp;
1462 }
1463 return (NULL);
1464 }
1465
1466 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1467 /* don't send data packets while mac isn't active */
1468 /* XXX - should we discard packets? */
1469 mutex_exit(&dp->xmitlock);
1470 return (mp_head);
1471 }
1472
1473 /* allocate free slots */
1474 head = dp->tx_free_head;
1475 avail = dp->tx_free_tail - head;
1476
1477 DPRINTF(2, (CE_CONT,
1478 "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1479 dp->name, __func__,
1480 dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1481
1482 avail = min(avail, dp->tx_max_packets);
1483
1484 if (nmblk > avail) {
1485 if (avail == 0) {
1486 /* no resources; short cut */
1487 DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1488 dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1489 goto done;
1490 }
1491 nmblk = avail;
1492 }
1493
1494 dp->tx_free_head = head + nmblk;
1495 load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1496
1497 /* update last interrupt position if tx buffers exhaust. */
1498 if (nmblk == avail) {
1499 tbp = GET_TXBUF(dp, head + avail - 1);
1500 tbp->txb_flag = GEM_TXFLAG_INTR;
1501 dp->tx_desc_intr = head + avail;
1502 }
1503 mutex_exit(&dp->xmitlock);
1504
1505 tbp = GET_TXBUF(dp, head);
1506
1507 for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1508 uint8_t *bp;
1509 uint64_t txflag;
1510
1511 /* remove one from the mblk list */
1512 ASSERT(mp_head != NULL);
1513 mp = mp_head;
1514 mp_head = mp_head->b_next;
1515 mp->b_next = NULL;
1516
1517 /* statistics for non-unicast packets */
1518 bp = mp->b_rptr;
1519 if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1520 if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1521 ETHERADDRL) == 0) {
1522 bcast++;
1523 } else {
1524 mcast++;
1525 }
1526 }
1527
1528 /* save misc info */
1529 txflag = tbp->txb_flag;
1530 txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1531 txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1532 tbp->txb_flag = txflag;
1533
1534 len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1535 }
1536
1537 (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1538
1539 /* Append the tbp at the tail of the active tx buffer list */
1540 mutex_enter(&dp->xmitlock);
1541
1542 if ((--dp->tx_busy) == 0) {
1543 /* extend the tail of softq, as new packets have been ready. */
1544 dp->tx_softq_tail = dp->tx_free_head;
1545
1546 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1547 /*
1548 * The device status has changed while we are
1549 * preparing tx buf.
1550 * As we are the last one that make tx non-busy.
1551 * wake up someone who may wait for us.
1552 */
1553 cv_broadcast(&dp->tx_drain_cv);
1554 } else {
1555 ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1556 gem_tx_start_unit(dp);
1557 }
1558 }
1559 dp->stats.obytes += len_total;
1560 dp->stats.opackets += nmblk;
1561 dp->stats.obcast += bcast;
1562 dp->stats.omcast += mcast;
1563 done:
1564 mutex_exit(&dp->xmitlock);
1565
1566 return (mp_head);
1567 }
1568
1569 /* ========================================================== */
1570 /*
1571 * error detection and restart routines
1572 */
1573 /* ========================================================== */
1574 int
gem_restart_nic(struct gem_dev * dp,uint_t flags)1575 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1576 {
1577 ASSERT(mutex_owned(&dp->intrlock));
1578
1579 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1580 #ifdef GEM_DEBUG_LEVEL
1581 #if GEM_DEBUG_LEVEL > 1
1582 gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1583 #endif
1584 #endif
1585
1586 if (dp->mac_suspended) {
1587 /* should we return GEM_FAILURE ? */
1588 return (GEM_FAILURE);
1589 }
1590
1591 /*
1592 * We should avoid calling any routines except xxx_chip_reset
1593 * when we are resuming the system.
1594 */
1595 if (dp->mac_active) {
1596 if (flags & GEM_RESTART_KEEP_BUF) {
1597 /* stop rx gracefully */
1598 dp->rxmode &= ~RXMODE_ENABLE;
1599 (void) (*dp->gc.gc_set_rx_filter)(dp);
1600 }
1601 (void) gem_mac_stop(dp, flags);
1602 }
1603
1604 /* reset the chip. */
1605 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1606 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1607 dp->name, __func__);
1608 goto err;
1609 }
1610
1611 if (gem_mac_init(dp) != GEM_SUCCESS) {
1612 goto err;
1613 }
1614
1615 /* setup media mode if the link have been up */
1616 if (dp->mii_state == MII_STATE_LINKUP) {
1617 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1618 goto err;
1619 }
1620 }
1621
1622 /* setup mac address and enable rx filter */
1623 dp->rxmode |= RXMODE_ENABLE;
1624 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1625 goto err;
1626 }
1627
1628 /*
1629 * XXX - a panic happened because of linkdown.
1630 * We must check mii_state here, because the link can be down just
1631 * before the restart event happen. If the link is down now,
1632 * gem_mac_start() will be called from gem_mii_link_check() when
1633 * the link become up later.
1634 */
1635 if (dp->mii_state == MII_STATE_LINKUP) {
1636 /* restart the nic */
1637 ASSERT(!dp->mac_active);
1638 (void) gem_mac_start(dp);
1639 }
1640 return (GEM_SUCCESS);
1641 err:
1642 return (GEM_FAILURE);
1643 }
1644
1645
1646 static void
gem_tx_timeout(struct gem_dev * dp)1647 gem_tx_timeout(struct gem_dev *dp)
1648 {
1649 clock_t now;
1650 boolean_t tx_sched;
1651 struct txbuf *tbp;
1652
1653 mutex_enter(&dp->intrlock);
1654
1655 tx_sched = B_FALSE;
1656 now = ddi_get_lbolt();
1657
1658 mutex_enter(&dp->xmitlock);
1659 if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1660 mutex_exit(&dp->xmitlock);
1661 goto schedule_next;
1662 }
1663 mutex_exit(&dp->xmitlock);
1664
1665 /* reclaim transmitted buffers to check the trasmitter hangs or not. */
1666 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1667 /* tx error happened, reset transmitter in the chip */
1668 (void) gem_restart_nic(dp, 0);
1669 tx_sched = B_TRUE;
1670 dp->tx_blocked = (clock_t)0;
1671
1672 goto schedule_next;
1673 }
1674
1675 mutex_enter(&dp->xmitlock);
1676 /* check if the transmitter thread is stuck */
1677 if (dp->tx_active_head == dp->tx_active_tail) {
1678 /* no tx buffer is loaded to the nic */
1679 if (dp->tx_blocked &&
1680 now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1681 gem_dump_txbuf(dp, CE_WARN,
1682 "gem_tx_timeout: tx blocked");
1683 tx_sched = B_TRUE;
1684 dp->tx_blocked = (clock_t)0;
1685 }
1686 mutex_exit(&dp->xmitlock);
1687 goto schedule_next;
1688 }
1689
1690 tbp = GET_TXBUF(dp, dp->tx_active_head);
1691 if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1692 mutex_exit(&dp->xmitlock);
1693 goto schedule_next;
1694 }
1695 mutex_exit(&dp->xmitlock);
1696
1697 gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1698
1699 /* discard untransmitted packet and restart tx. */
1700 (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1701 tx_sched = B_TRUE;
1702 dp->tx_blocked = (clock_t)0;
1703
1704 schedule_next:
1705 mutex_exit(&dp->intrlock);
1706
1707 /* restart the downstream if needed */
1708 if (tx_sched) {
1709 mac_tx_update(dp->mh);
1710 }
1711
1712 DPRINTF(4, (CE_CONT,
1713 "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1714 dp->name, BOOLEAN(dp->tx_blocked),
1715 dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1716 dp->timeout_id =
1717 timeout((void (*)(void *))gem_tx_timeout,
1718 (void *)dp, dp->gc.gc_tx_timeout_interval);
1719 }
1720
1721 /* ================================================================== */
1722 /*
1723 * Interrupt handler
1724 */
1725 /* ================================================================== */
1726 __INLINE__
1727 static void
gem_append_rxbuf(struct gem_dev * dp,struct rxbuf * rbp_head)1728 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1729 {
1730 struct rxbuf *rbp;
1731 seqnum_t tail;
1732 int rx_ring_size = dp->gc.gc_rx_ring_size;
1733
1734 ASSERT(rbp_head != NULL);
1735 ASSERT(mutex_owned(&dp->intrlock));
1736
1737 DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1738 dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1739
1740 /*
1741 * Add new buffers into active rx buffer list
1742 */
1743 if (dp->rx_buf_head == NULL) {
1744 dp->rx_buf_head = rbp_head;
1745 ASSERT(dp->rx_buf_tail == NULL);
1746 } else {
1747 dp->rx_buf_tail->rxb_next = rbp_head;
1748 }
1749
1750 tail = dp->rx_active_tail;
1751 for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1752 /* need to notify the tail for the lower layer */
1753 dp->rx_buf_tail = rbp;
1754
1755 dp->gc.gc_rx_desc_write(dp,
1756 SLOT(tail, rx_ring_size),
1757 rbp->rxb_dmacookie,
1758 rbp->rxb_nfrags);
1759
1760 dp->rx_active_tail = tail = tail + 1;
1761 }
1762 }
1763 #pragma inline(gem_append_rxbuf)
1764
1765 mblk_t *
gem_get_packet_default(struct gem_dev * dp,struct rxbuf * rbp,size_t len)1766 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1767 {
1768 int rx_header_len = dp->gc.gc_rx_header_len;
1769 uint8_t *bp;
1770 mblk_t *mp;
1771
1772 /* allocate a new mblk */
1773 if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1774 ASSERT(mp->b_next == NULL);
1775 ASSERT(mp->b_cont == NULL);
1776
1777 mp->b_rptr += VTAG_SIZE;
1778 bp = mp->b_rptr;
1779 mp->b_wptr = bp + len;
1780
1781 /*
1782 * flush the range of the entire buffer to invalidate
1783 * all of corresponding dirty entries in iocache.
1784 */
1785 (void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1786 0, DDI_DMA_SYNC_FORKERNEL);
1787
1788 bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1789 }
1790 return (mp);
1791 }
1792
1793 #ifdef GEM_DEBUG_LEVEL
1794 uint_t gem_rx_pkts[17];
1795 #endif
1796
1797
1798 int
gem_receive(struct gem_dev * dp)1799 gem_receive(struct gem_dev *dp)
1800 {
1801 uint64_t len_total = 0;
1802 struct rxbuf *rbp;
1803 mblk_t *mp;
1804 int cnt = 0;
1805 uint64_t rxstat;
1806 struct rxbuf *newbufs;
1807 struct rxbuf **newbufs_tailp;
1808 mblk_t *rx_head;
1809 mblk_t **rx_tailp;
1810 int rx_ring_size = dp->gc.gc_rx_ring_size;
1811 seqnum_t active_head;
1812 uint64_t (*rx_desc_stat)(struct gem_dev *dp,
1813 int slot, int ndesc);
1814 int ethermin = ETHERMIN;
1815 int ethermax = dp->mtu + sizeof (struct ether_header);
1816 int rx_header_len = dp->gc.gc_rx_header_len;
1817
1818 ASSERT(mutex_owned(&dp->intrlock));
1819
1820 DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1821 dp->name, dp->rx_buf_head));
1822
1823 rx_desc_stat = dp->gc.gc_rx_desc_stat;
1824 newbufs_tailp = &newbufs;
1825 rx_tailp = &rx_head;
1826 for (active_head = dp->rx_active_head;
1827 (rbp = dp->rx_buf_head) != NULL; active_head++) {
1828 int len;
1829 if (cnt == 0) {
1830 cnt = max(dp->poll_pkt_delay*2, 10);
1831 cnt = min(cnt,
1832 dp->rx_active_tail - active_head);
1833 gem_rx_desc_dma_sync(dp,
1834 SLOT(active_head, rx_ring_size),
1835 cnt,
1836 DDI_DMA_SYNC_FORKERNEL);
1837 }
1838
1839 if (rx_header_len > 0) {
1840 (void) ddi_dma_sync(rbp->rxb_dh, 0,
1841 rx_header_len, DDI_DMA_SYNC_FORKERNEL);
1842 }
1843
1844 if (((rxstat = (*rx_desc_stat)(dp,
1845 SLOT(active_head, rx_ring_size),
1846 rbp->rxb_nfrags))
1847 & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1848 /* not received yet */
1849 break;
1850 }
1851
1852 /* Remove the head of the rx buffer list */
1853 dp->rx_buf_head = rbp->rxb_next;
1854 cnt--;
1855
1856
1857 if (rxstat & GEM_RX_ERR) {
1858 goto next;
1859 }
1860
1861 len = rxstat & GEM_RX_LEN;
1862 DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1863 dp->name, __func__, rxstat, len));
1864
1865 /*
1866 * Copy the packet
1867 */
1868 if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1869 /* no memory, discard the packet */
1870 dp->stats.norcvbuf++;
1871 goto next;
1872 }
1873
1874 /*
1875 * Process VLAN tag
1876 */
1877 ethermin = ETHERMIN;
1878 ethermax = dp->mtu + sizeof (struct ether_header);
1879 if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1880 ethermax += VTAG_SIZE;
1881 }
1882
1883 /* check packet size */
1884 if (len < ethermin) {
1885 dp->stats.errrcv++;
1886 dp->stats.runt++;
1887 freemsg(mp);
1888 goto next;
1889 }
1890
1891 if (len > ethermax) {
1892 dp->stats.errrcv++;
1893 dp->stats.frame_too_long++;
1894 freemsg(mp);
1895 goto next;
1896 }
1897
1898 len_total += len;
1899
1900 #ifdef GEM_DEBUG_VLAN
1901 if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
1902 gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1903 }
1904 #endif
1905 /* append received packet to temporaly rx buffer list */
1906 *rx_tailp = mp;
1907 rx_tailp = &mp->b_next;
1908
1909 if (mp->b_rptr[0] & 1) {
1910 if (bcmp(mp->b_rptr,
1911 gem_etherbroadcastaddr.ether_addr_octet,
1912 ETHERADDRL) == 0) {
1913 dp->stats.rbcast++;
1914 } else {
1915 dp->stats.rmcast++;
1916 }
1917 }
1918 next:
1919 ASSERT(rbp != NULL);
1920
1921 /* append new one to temporal new buffer list */
1922 *newbufs_tailp = rbp;
1923 newbufs_tailp = &rbp->rxb_next;
1924 }
1925
1926 /* advance rx_active_head */
1927 if ((cnt = active_head - dp->rx_active_head) > 0) {
1928 dp->stats.rbytes += len_total;
1929 dp->stats.rpackets += cnt;
1930 }
1931 dp->rx_active_head = active_head;
1932
1933 /* terminate the working list */
1934 *newbufs_tailp = NULL;
1935 *rx_tailp = NULL;
1936
1937 if (dp->rx_buf_head == NULL) {
1938 dp->rx_buf_tail = NULL;
1939 }
1940
1941 DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1942 dp->name, __func__, cnt, rx_head));
1943
1944 if (newbufs) {
1945 /*
1946 * fillfull rx list with new buffers
1947 */
1948 seqnum_t head;
1949
1950 /* save current tail */
1951 head = dp->rx_active_tail;
1952 gem_append_rxbuf(dp, newbufs);
1953
1954 /* call hw depend start routine if we have. */
1955 dp->gc.gc_rx_start(dp,
1956 SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1957 }
1958
1959 if (rx_head) {
1960 /*
1961 * send up received packets
1962 */
1963 mutex_exit(&dp->intrlock);
1964 mac_rx(dp->mh, NULL, rx_head);
1965 mutex_enter(&dp->intrlock);
1966 }
1967
1968 #ifdef GEM_DEBUG_LEVEL
1969 gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1970 #endif
1971 return (cnt);
1972 }
1973
1974 boolean_t
gem_tx_done(struct gem_dev * dp)1975 gem_tx_done(struct gem_dev *dp)
1976 {
1977 boolean_t tx_sched = B_FALSE;
1978
1979 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1980 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1981 DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1982 dp->name, dp->tx_active_head, dp->tx_active_tail));
1983 tx_sched = B_TRUE;
1984 goto x;
1985 }
1986
1987 mutex_enter(&dp->xmitlock);
1988
1989 /* XXX - we must not have any packets in soft queue */
1990 ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1991 /*
1992 * If we won't have chance to get more free tx buffers, and blocked,
1993 * it is worth to reschedule the downstream i.e. tx side.
1994 */
1995 ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1996 if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1997 /*
1998 * As no further tx-done interrupts are scheduled, this
1999 * is the last chance to kick tx side, which may be
2000 * blocked now, otherwise the tx side never works again.
2001 */
2002 tx_sched = B_TRUE;
2003 dp->tx_blocked = (clock_t)0;
2004 dp->tx_max_packets =
2005 min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2006 }
2007
2008 mutex_exit(&dp->xmitlock);
2009
2010 DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
2011 dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2012 x:
2013 return (tx_sched);
2014 }
2015
2016 static uint_t
gem_intr(struct gem_dev * dp)2017 gem_intr(struct gem_dev *dp)
2018 {
2019 uint_t ret;
2020
2021 mutex_enter(&dp->intrlock);
2022 if (dp->mac_suspended) {
2023 mutex_exit(&dp->intrlock);
2024 return (DDI_INTR_UNCLAIMED);
2025 }
2026 dp->intr_busy = B_TRUE;
2027
2028 ret = (*dp->gc.gc_interrupt)(dp);
2029
2030 if (ret == DDI_INTR_UNCLAIMED) {
2031 dp->intr_busy = B_FALSE;
2032 mutex_exit(&dp->intrlock);
2033 return (ret);
2034 }
2035
2036 if (!dp->mac_active) {
2037 cv_broadcast(&dp->tx_drain_cv);
2038 }
2039
2040
2041 dp->stats.intr++;
2042 dp->intr_busy = B_FALSE;
2043
2044 mutex_exit(&dp->intrlock);
2045
2046 if (ret & INTR_RESTART_TX) {
2047 DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2048 mac_tx_update(dp->mh);
2049 ret &= ~INTR_RESTART_TX;
2050 }
2051 return (ret);
2052 }
2053
2054 static void
gem_intr_watcher(struct gem_dev * dp)2055 gem_intr_watcher(struct gem_dev *dp)
2056 {
2057 (void) gem_intr(dp);
2058
2059 /* schedule next call of tu_intr_watcher */
2060 dp->intr_watcher_id =
2061 timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2062 }
2063
2064 /* ======================================================================== */
2065 /*
2066 * MII support routines
2067 */
2068 /* ======================================================================== */
2069 static void
gem_choose_forcedmode(struct gem_dev * dp)2070 gem_choose_forcedmode(struct gem_dev *dp)
2071 {
2072 /* choose media mode */
2073 if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2074 dp->speed = GEM_SPD_1000;
2075 dp->full_duplex = dp->anadv_1000fdx;
2076 } else if (dp->anadv_100fdx || dp->anadv_100t4) {
2077 dp->speed = GEM_SPD_100;
2078 dp->full_duplex = B_TRUE;
2079 } else if (dp->anadv_100hdx) {
2080 dp->speed = GEM_SPD_100;
2081 dp->full_duplex = B_FALSE;
2082 } else {
2083 dp->speed = GEM_SPD_10;
2084 dp->full_duplex = dp->anadv_10fdx;
2085 }
2086 }
2087
2088 uint16_t
gem_mii_read(struct gem_dev * dp,uint_t reg)2089 gem_mii_read(struct gem_dev *dp, uint_t reg)
2090 {
2091 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2092 (*dp->gc.gc_mii_sync)(dp);
2093 }
2094 return ((*dp->gc.gc_mii_read)(dp, reg));
2095 }
2096
2097 void
gem_mii_write(struct gem_dev * dp,uint_t reg,uint16_t val)2098 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2099 {
2100 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2101 (*dp->gc.gc_mii_sync)(dp);
2102 }
2103 (*dp->gc.gc_mii_write)(dp, reg, val);
2104 }
2105
2106 #define fc_cap_decode(x) \
2107 ((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \
2108 (((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
2109
2110 int
gem_mii_config_default(struct gem_dev * dp)2111 gem_mii_config_default(struct gem_dev *dp)
2112 {
2113 uint16_t mii_stat;
2114 uint16_t val;
2115 static uint16_t fc_cap_encode[4] = {
2116 0, /* none */
2117 MII_ABILITY_PAUSE, /* symmetric */
2118 MII_ABILITY_ASMPAUSE, /* tx */
2119 MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */
2120 };
2121
2122 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2123
2124 /*
2125 * Configure bits in advertisement register
2126 */
2127 mii_stat = dp->mii_status;
2128
2129 DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2130 dp->name, __func__, mii_stat, MII_STATUS_BITS));
2131
2132 if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2133 /* it's funny */
2134 cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2135 dp->name, mii_stat, MII_STATUS_BITS);
2136 return (GEM_FAILURE);
2137 }
2138
2139 /* Do not change the rest of the ability bits in the advert reg */
2140 val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2141
2142 DPRINTF(0, (CE_CONT,
2143 "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2144 dp->name, __func__,
2145 dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2146 dp->anadv_10fdx, dp->anadv_10hdx));
2147
2148 if (dp->anadv_100t4) {
2149 val |= MII_ABILITY_100BASE_T4;
2150 }
2151 if (dp->anadv_100fdx) {
2152 val |= MII_ABILITY_100BASE_TX_FD;
2153 }
2154 if (dp->anadv_100hdx) {
2155 val |= MII_ABILITY_100BASE_TX;
2156 }
2157 if (dp->anadv_10fdx) {
2158 val |= MII_ABILITY_10BASE_T_FD;
2159 }
2160 if (dp->anadv_10hdx) {
2161 val |= MII_ABILITY_10BASE_T;
2162 }
2163
2164 /* set flow control capability */
2165 val |= fc_cap_encode[dp->anadv_flow_control];
2166
2167 DPRINTF(0, (CE_CONT,
2168 "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2169 dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2170 dp->anadv_flow_control));
2171
2172 gem_mii_write(dp, MII_AN_ADVERT, val);
2173
2174 if (mii_stat & MII_STATUS_XSTATUS) {
2175 /*
2176 * 1000Base-T GMII support
2177 */
2178 if (!dp->anadv_autoneg) {
2179 /* enable manual configuration */
2180 val = MII_1000TC_CFG_EN;
2181 } else {
2182 val = 0;
2183 if (dp->anadv_1000fdx) {
2184 val |= MII_1000TC_ADV_FULL;
2185 }
2186 if (dp->anadv_1000hdx) {
2187 val |= MII_1000TC_ADV_HALF;
2188 }
2189 }
2190 DPRINTF(0, (CE_CONT,
2191 "!%s: %s: setting MII_1000TC reg:%b",
2192 dp->name, __func__, val, MII_1000TC_BITS));
2193
2194 gem_mii_write(dp, MII_1000TC, val);
2195 }
2196
2197 return (GEM_SUCCESS);
2198 }
2199
2200 #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP)
2201 #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN)
2202
2203 static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2204 /* none symm tx rx/symm */
2205 /* none */
2206 {FLOW_CONTROL_NONE,
2207 FLOW_CONTROL_NONE,
2208 FLOW_CONTROL_NONE,
2209 FLOW_CONTROL_NONE},
2210 /* sym */
2211 {FLOW_CONTROL_NONE,
2212 FLOW_CONTROL_SYMMETRIC,
2213 FLOW_CONTROL_NONE,
2214 FLOW_CONTROL_SYMMETRIC},
2215 /* tx */
2216 {FLOW_CONTROL_NONE,
2217 FLOW_CONTROL_NONE,
2218 FLOW_CONTROL_NONE,
2219 FLOW_CONTROL_TX_PAUSE},
2220 /* rx/symm */
2221 {FLOW_CONTROL_NONE,
2222 FLOW_CONTROL_SYMMETRIC,
2223 FLOW_CONTROL_RX_PAUSE,
2224 FLOW_CONTROL_SYMMETRIC},
2225 };
2226
2227 static char *gem_fc_type[] = {
2228 "without",
2229 "with symmetric",
2230 "with tx",
2231 "with rx",
2232 };
2233
2234 boolean_t
gem_mii_link_check(struct gem_dev * dp)2235 gem_mii_link_check(struct gem_dev *dp)
2236 {
2237 uint16_t old_mii_state;
2238 boolean_t tx_sched = B_FALSE;
2239 uint16_t status;
2240 uint16_t advert;
2241 uint16_t lpable;
2242 uint16_t exp;
2243 uint16_t ctl1000;
2244 uint16_t stat1000;
2245 uint16_t val;
2246 clock_t now;
2247 clock_t diff;
2248 int linkdown_action;
2249 boolean_t fix_phy = B_FALSE;
2250
2251 now = ddi_get_lbolt();
2252 old_mii_state = dp->mii_state;
2253
2254 DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2255 dp->name, __func__, now, dp->mii_state));
2256
2257 diff = now - dp->mii_last_check;
2258 dp->mii_last_check = now;
2259
2260 /*
2261 * For NWAM, don't show linkdown state right
2262 * after the system boots
2263 */
2264 if (dp->linkup_delay > 0) {
2265 if (dp->linkup_delay > diff) {
2266 dp->linkup_delay -= diff;
2267 } else {
2268 /* link up timeout */
2269 dp->linkup_delay = -1;
2270 }
2271 }
2272
2273 next_nowait:
2274 switch (dp->mii_state) {
2275 case MII_STATE_UNKNOWN:
2276 /* power-up, DP83840 requires 32 sync bits */
2277 (*dp->gc.gc_mii_sync)(dp);
2278 goto reset_phy;
2279
2280 case MII_STATE_RESETTING:
2281 dp->mii_timer -= diff;
2282 if (dp->mii_timer > 0) {
2283 /* don't read phy registers in resetting */
2284 dp->mii_interval = WATCH_INTERVAL_FAST;
2285 goto next;
2286 }
2287
2288 /* Timer expired, ensure reset bit is not set */
2289
2290 if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2291 /* some phys need sync bits after reset */
2292 (*dp->gc.gc_mii_sync)(dp);
2293 }
2294 val = gem_mii_read(dp, MII_CONTROL);
2295 if (val & MII_CONTROL_RESET) {
2296 cmn_err(CE_NOTE,
2297 "!%s: time:%ld resetting phy not complete."
2298 " mii_control:0x%b",
2299 dp->name, ddi_get_lbolt(),
2300 val, MII_CONTROL_BITS);
2301 }
2302
2303 /* ensure neither isolated nor pwrdown nor auto-nego mode */
2304 /* XXX -- this operation is required for NS DP83840A. */
2305 gem_mii_write(dp, MII_CONTROL, 0);
2306
2307 /* As resetting PHY has completed, configure PHY registers */
2308 if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2309 /* we failed to configure PHY. */
2310 goto reset_phy;
2311 }
2312
2313 /* mii_config may disable autonegatiation */
2314 gem_choose_forcedmode(dp);
2315
2316 dp->mii_lpable = 0;
2317 dp->mii_advert = 0;
2318 dp->mii_exp = 0;
2319 dp->mii_ctl1000 = 0;
2320 dp->mii_stat1000 = 0;
2321 dp->flow_control = FLOW_CONTROL_NONE;
2322
2323 if (!dp->anadv_autoneg) {
2324 /* skip auto-negotiation phase */
2325 dp->mii_state = MII_STATE_MEDIA_SETUP;
2326 dp->mii_timer = 0;
2327 dp->mii_interval = 0;
2328 goto next_nowait;
2329 }
2330
2331 /* Issue auto-negotiation command */
2332 goto autonego;
2333
2334 case MII_STATE_AUTONEGOTIATING:
2335 /*
2336 * Autonegotiation is in progress
2337 */
2338 dp->mii_timer -= diff;
2339 if (dp->mii_timer -
2340 (dp->gc.gc_mii_an_timeout
2341 - dp->gc.gc_mii_an_wait) > 0) {
2342 /*
2343 * wait for a while, typically autonegotiation
2344 * completes in 2.3 - 2.5 sec.
2345 */
2346 dp->mii_interval = WATCH_INTERVAL_FAST;
2347 goto next;
2348 }
2349
2350 /* read PHY status */
2351 status = gem_mii_read(dp, MII_STATUS);
2352 DPRINTF(4, (CE_CONT,
2353 "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2354 dp->name, __func__, dp->mii_state,
2355 status, MII_STATUS_BITS));
2356
2357 if (status & MII_STATUS_REMFAULT) {
2358 /*
2359 * The link parnert told me something wrong happend.
2360 * What do we do ?
2361 */
2362 cmn_err(CE_CONT,
2363 "!%s: auto-negotiation failed: remote fault",
2364 dp->name);
2365 goto autonego;
2366 }
2367
2368 if ((status & MII_STATUS_ANDONE) == 0) {
2369 if (dp->mii_timer <= 0) {
2370 /*
2371 * Auto-negotiation was timed out,
2372 * try again w/o resetting phy.
2373 */
2374 if (!dp->mii_supress_msg) {
2375 cmn_err(CE_WARN,
2376 "!%s: auto-negotiation failed: timeout",
2377 dp->name);
2378 dp->mii_supress_msg = B_TRUE;
2379 }
2380 goto autonego;
2381 }
2382 /*
2383 * Auto-negotiation is in progress. Wait.
2384 */
2385 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2386 goto next;
2387 }
2388
2389 /*
2390 * Auto-negotiation have completed.
2391 * Assume linkdown and fall through.
2392 */
2393 dp->mii_supress_msg = B_FALSE;
2394 dp->mii_state = MII_STATE_AN_DONE;
2395 DPRINTF(0, (CE_CONT,
2396 "!%s: auto-negotiation completed, MII_STATUS:%b",
2397 dp->name, status, MII_STATUS_BITS));
2398
2399 if (dp->gc.gc_mii_an_delay > 0) {
2400 dp->mii_timer = dp->gc.gc_mii_an_delay;
2401 dp->mii_interval = drv_usectohz(20*1000);
2402 goto next;
2403 }
2404
2405 dp->mii_timer = 0;
2406 diff = 0;
2407 goto next_nowait;
2408
2409 case MII_STATE_AN_DONE:
2410 /*
2411 * Auto-negotiation have done. Now we can set up media.
2412 */
2413 dp->mii_timer -= diff;
2414 if (dp->mii_timer > 0) {
2415 /* wait for a while */
2416 dp->mii_interval = WATCH_INTERVAL_FAST;
2417 goto next;
2418 }
2419
2420 /*
2421 * set up the result of auto negotiation
2422 */
2423
2424 /*
2425 * Read registers required to determin current
2426 * duplex mode and media speed.
2427 */
2428 if (dp->gc.gc_mii_an_delay > 0) {
2429 /*
2430 * As the link watcher context has been suspended,
2431 * 'status' is invalid. We must status register here
2432 */
2433 status = gem_mii_read(dp, MII_STATUS);
2434 }
2435 advert = gem_mii_read(dp, MII_AN_ADVERT);
2436 lpable = gem_mii_read(dp, MII_AN_LPABLE);
2437 exp = gem_mii_read(dp, MII_AN_EXPANSION);
2438 if (exp == 0xffff) {
2439 /* some phys don't have exp register */
2440 exp = 0;
2441 }
2442 ctl1000 = 0;
2443 stat1000 = 0;
2444 if (dp->mii_status & MII_STATUS_XSTATUS) {
2445 ctl1000 = gem_mii_read(dp, MII_1000TC);
2446 stat1000 = gem_mii_read(dp, MII_1000TS);
2447 }
2448 dp->mii_lpable = lpable;
2449 dp->mii_advert = advert;
2450 dp->mii_exp = exp;
2451 dp->mii_ctl1000 = ctl1000;
2452 dp->mii_stat1000 = stat1000;
2453
2454 cmn_err(CE_CONT,
2455 "!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2456 dp->name,
2457 advert, MII_ABILITY_BITS,
2458 lpable, MII_ABILITY_BITS,
2459 exp, MII_AN_EXP_BITS);
2460
2461 if (dp->mii_status & MII_STATUS_XSTATUS) {
2462 cmn_err(CE_CONT,
2463 "! MII_1000TC:%b, MII_1000TS:%b",
2464 ctl1000, MII_1000TC_BITS,
2465 stat1000, MII_1000TS_BITS);
2466 }
2467
2468 if (gem_population(lpable) <= 1 &&
2469 (exp & MII_AN_EXP_LPCANAN) == 0) {
2470 if ((advert & MII_ABILITY_TECH) != lpable) {
2471 cmn_err(CE_WARN,
2472 "!%s: but the link partnar doesn't seem"
2473 " to have auto-negotiation capability."
2474 " please check the link configuration.",
2475 dp->name);
2476 }
2477 /*
2478 * it should be result of parallel detection, which
2479 * cannot detect duplex mode.
2480 */
2481 if (lpable & MII_ABILITY_100BASE_TX) {
2482 /*
2483 * we prefer full duplex mode for 100Mbps
2484 * connection, if we can.
2485 */
2486 lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2487 }
2488
2489 if ((advert & lpable) == 0 &&
2490 lpable & MII_ABILITY_10BASE_T) {
2491 lpable |= advert & MII_ABILITY_10BASE_T_FD;
2492 }
2493 /*
2494 * as the link partnar isn't auto-negotiatable, use
2495 * fixed mode temporally.
2496 */
2497 fix_phy = B_TRUE;
2498 } else if (lpable == 0) {
2499 cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2500 goto reset_phy;
2501 }
2502 /*
2503 * configure current link mode according to AN priority.
2504 */
2505 val = advert & lpable;
2506 if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2507 (stat1000 & MII_1000TS_LP_FULL)) {
2508 /* 1000BaseT & full duplex */
2509 dp->speed = GEM_SPD_1000;
2510 dp->full_duplex = B_TRUE;
2511 } else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2512 (stat1000 & MII_1000TS_LP_HALF)) {
2513 /* 1000BaseT & half duplex */
2514 dp->speed = GEM_SPD_1000;
2515 dp->full_duplex = B_FALSE;
2516 } else if (val & MII_ABILITY_100BASE_TX_FD) {
2517 /* 100BaseTx & full duplex */
2518 dp->speed = GEM_SPD_100;
2519 dp->full_duplex = B_TRUE;
2520 } else if (val & MII_ABILITY_100BASE_T4) {
2521 /* 100BaseT4 & full duplex */
2522 dp->speed = GEM_SPD_100;
2523 dp->full_duplex = B_TRUE;
2524 } else if (val & MII_ABILITY_100BASE_TX) {
2525 /* 100BaseTx & half duplex */
2526 dp->speed = GEM_SPD_100;
2527 dp->full_duplex = B_FALSE;
2528 } else if (val & MII_ABILITY_10BASE_T_FD) {
2529 /* 10BaseT & full duplex */
2530 dp->speed = GEM_SPD_10;
2531 dp->full_duplex = B_TRUE;
2532 } else if (val & MII_ABILITY_10BASE_T) {
2533 /* 10BaseT & half duplex */
2534 dp->speed = GEM_SPD_10;
2535 dp->full_duplex = B_FALSE;
2536 } else {
2537 /*
2538 * It seems that the link partnar doesn't have
2539 * auto-negotiation capability and our PHY
2540 * could not report the correct current mode.
2541 * We guess current mode by mii_control register.
2542 */
2543 val = gem_mii_read(dp, MII_CONTROL);
2544
2545 /* select 100m full or 10m half */
2546 dp->speed = (val & MII_CONTROL_100MB) ?
2547 GEM_SPD_100 : GEM_SPD_10;
2548 dp->full_duplex = dp->speed != GEM_SPD_10;
2549 fix_phy = B_TRUE;
2550
2551 cmn_err(CE_NOTE,
2552 "!%s: auto-negotiation done but "
2553 "common ability not found.\n"
2554 "PHY state: control:%b advert:%b lpable:%b\n"
2555 "guessing %d Mbps %s duplex mode",
2556 dp->name,
2557 val, MII_CONTROL_BITS,
2558 advert, MII_ABILITY_BITS,
2559 lpable, MII_ABILITY_BITS,
2560 gem_speed_value[dp->speed],
2561 dp->full_duplex ? "full" : "half");
2562 }
2563
2564 if (dp->full_duplex) {
2565 dp->flow_control =
2566 gem_fc_result[fc_cap_decode(advert)]
2567 [fc_cap_decode(lpable)];
2568 } else {
2569 dp->flow_control = FLOW_CONTROL_NONE;
2570 }
2571 dp->mii_state = MII_STATE_MEDIA_SETUP;
2572 /* FALLTHROUGH */
2573
2574 case MII_STATE_MEDIA_SETUP:
2575 dp->mii_state = MII_STATE_LINKDOWN;
2576 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2577 DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2578 dp->mii_supress_msg = B_FALSE;
2579
2580 /* use short interval */
2581 dp->mii_interval = WATCH_INTERVAL_FAST;
2582
2583 if ((!dp->anadv_autoneg) ||
2584 dp->gc.gc_mii_an_oneshot || fix_phy) {
2585
2586 /*
2587 * write specified mode to phy.
2588 */
2589 val = gem_mii_read(dp, MII_CONTROL);
2590 val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2591 MII_CONTROL_ANE | MII_CONTROL_RSAN);
2592
2593 if (dp->full_duplex) {
2594 val |= MII_CONTROL_FDUPLEX;
2595 }
2596
2597 switch (dp->speed) {
2598 case GEM_SPD_1000:
2599 val |= MII_CONTROL_1000MB;
2600 break;
2601
2602 case GEM_SPD_100:
2603 val |= MII_CONTROL_100MB;
2604 break;
2605
2606 default:
2607 cmn_err(CE_WARN, "%s: unknown speed:%d",
2608 dp->name, dp->speed);
2609 /* FALLTHROUGH */
2610 case GEM_SPD_10:
2611 /* for GEM_SPD_10, do nothing */
2612 break;
2613 }
2614
2615 if (dp->mii_status & MII_STATUS_XSTATUS) {
2616 gem_mii_write(dp,
2617 MII_1000TC, MII_1000TC_CFG_EN);
2618 }
2619 gem_mii_write(dp, MII_CONTROL, val);
2620 }
2621
2622 if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2623 /* notify the result of auto-negotiation to mac */
2624 (*dp->gc.gc_set_media)(dp);
2625 }
2626
2627 if ((void *)dp->gc.gc_mii_tune_phy) {
2628 /* for built-in sis900 */
2629 /* XXX - this code should be removed. */
2630 (*dp->gc.gc_mii_tune_phy)(dp);
2631 }
2632
2633 goto next_nowait;
2634
2635 case MII_STATE_LINKDOWN:
2636 status = gem_mii_read(dp, MII_STATUS);
2637 if (status & MII_STATUS_LINKUP) {
2638 /*
2639 * Link going up
2640 */
2641 dp->mii_state = MII_STATE_LINKUP;
2642 dp->mii_supress_msg = B_FALSE;
2643
2644 DPRINTF(0, (CE_CONT,
2645 "!%s: link up detected: mii_stat:%b",
2646 dp->name, status, MII_STATUS_BITS));
2647
2648 /*
2649 * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are
2650 * ignored when MII_CONTROL_ANE is set.
2651 */
2652 cmn_err(CE_CONT,
2653 "!%s: Link up: %d Mbps %s duplex %s flow control",
2654 dp->name,
2655 gem_speed_value[dp->speed],
2656 dp->full_duplex ? "full" : "half",
2657 gem_fc_type[dp->flow_control]);
2658
2659 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2660
2661 /* XXX - we need other timer to watch statictics */
2662 if (dp->gc.gc_mii_hw_link_detection &&
2663 dp->nic_state == NIC_STATE_ONLINE) {
2664 dp->mii_interval = 0;
2665 }
2666
2667 if (dp->nic_state == NIC_STATE_ONLINE) {
2668 if (!dp->mac_active) {
2669 (void) gem_mac_start(dp);
2670 }
2671 tx_sched = B_TRUE;
2672 }
2673 goto next;
2674 }
2675
2676 dp->mii_supress_msg = B_TRUE;
2677 if (dp->anadv_autoneg) {
2678 dp->mii_timer -= diff;
2679 if (dp->mii_timer <= 0) {
2680 /*
2681 * link down timer expired.
2682 * need to restart auto-negotiation.
2683 */
2684 linkdown_action =
2685 dp->gc.gc_mii_linkdown_timeout_action;
2686 goto restart_autonego;
2687 }
2688 }
2689 /* don't change mii_state */
2690 break;
2691
2692 case MII_STATE_LINKUP:
2693 status = gem_mii_read(dp, MII_STATUS);
2694 if ((status & MII_STATUS_LINKUP) == 0) {
2695 /*
2696 * Link going down
2697 */
2698 cmn_err(CE_NOTE,
2699 "!%s: link down detected: mii_stat:%b",
2700 dp->name, status, MII_STATUS_BITS);
2701
2702 if (dp->nic_state == NIC_STATE_ONLINE &&
2703 dp->mac_active &&
2704 dp->gc.gc_mii_stop_mac_on_linkdown) {
2705 (void) gem_mac_stop(dp, 0);
2706
2707 if (dp->tx_blocked) {
2708 /* drain tx */
2709 tx_sched = B_TRUE;
2710 }
2711 }
2712
2713 if (dp->anadv_autoneg) {
2714 /* need to restart auto-negotiation */
2715 linkdown_action = dp->gc.gc_mii_linkdown_action;
2716 goto restart_autonego;
2717 }
2718
2719 dp->mii_state = MII_STATE_LINKDOWN;
2720 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2721
2722 if ((void *)dp->gc.gc_mii_tune_phy) {
2723 /* for built-in sis900 */
2724 (*dp->gc.gc_mii_tune_phy)(dp);
2725 }
2726 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2727 goto next;
2728 }
2729
2730 /* don't change mii_state */
2731 if (dp->gc.gc_mii_hw_link_detection &&
2732 dp->nic_state == NIC_STATE_ONLINE) {
2733 dp->mii_interval = 0;
2734 goto next;
2735 }
2736 break;
2737 }
2738 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2739 goto next;
2740
2741 /* Actions on the end of state routine */
2742
2743 restart_autonego:
2744 switch (linkdown_action) {
2745 case MII_ACTION_RESET:
2746 if (!dp->mii_supress_msg) {
2747 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2748 }
2749 dp->mii_supress_msg = B_TRUE;
2750 goto reset_phy;
2751
2752 case MII_ACTION_NONE:
2753 dp->mii_supress_msg = B_TRUE;
2754 if (dp->gc.gc_mii_an_oneshot) {
2755 goto autonego;
2756 }
2757 /* PHY will restart autonego automatically */
2758 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2759 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2760 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2761 goto next;
2762
2763 case MII_ACTION_RSA:
2764 if (!dp->mii_supress_msg) {
2765 cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2766 dp->name);
2767 }
2768 dp->mii_supress_msg = B_TRUE;
2769 goto autonego;
2770
2771 default:
2772 cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2773 dp->name, dp->gc.gc_mii_linkdown_action);
2774 dp->mii_supress_msg = B_TRUE;
2775 }
2776 /* NOTREACHED */
2777
2778 reset_phy:
2779 if (!dp->mii_supress_msg) {
2780 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2781 }
2782 dp->mii_state = MII_STATE_RESETTING;
2783 dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2784 if (!dp->gc.gc_mii_dont_reset) {
2785 gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2786 }
2787 dp->mii_interval = WATCH_INTERVAL_FAST;
2788 goto next;
2789
2790 autonego:
2791 if (!dp->mii_supress_msg) {
2792 cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2793 }
2794 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2795 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2796
2797 /* start/restart auto nego */
2798 val = gem_mii_read(dp, MII_CONTROL) &
2799 ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2800
2801 gem_mii_write(dp, MII_CONTROL,
2802 val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2803
2804 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2805
2806 next:
2807 if (dp->link_watcher_id == 0 && dp->mii_interval) {
2808 /* we must schedule next mii_watcher */
2809 dp->link_watcher_id =
2810 timeout((void (*)(void *))&gem_mii_link_watcher,
2811 (void *)dp, dp->mii_interval);
2812 }
2813
2814 if (old_mii_state != dp->mii_state) {
2815 /* notify new mii link state */
2816 if (dp->mii_state == MII_STATE_LINKUP) {
2817 dp->linkup_delay = 0;
2818 GEM_LINKUP(dp);
2819 } else if (dp->linkup_delay <= 0) {
2820 GEM_LINKDOWN(dp);
2821 }
2822 } else if (dp->linkup_delay < 0) {
2823 /* first linkup timeout */
2824 dp->linkup_delay = 0;
2825 GEM_LINKDOWN(dp);
2826 }
2827
2828 return (tx_sched);
2829 }
2830
2831 static void
gem_mii_link_watcher(struct gem_dev * dp)2832 gem_mii_link_watcher(struct gem_dev *dp)
2833 {
2834 boolean_t tx_sched;
2835
2836 mutex_enter(&dp->intrlock);
2837
2838 dp->link_watcher_id = 0;
2839 tx_sched = gem_mii_link_check(dp);
2840 #if GEM_DEBUG_LEVEL > 2
2841 if (dp->link_watcher_id == 0) {
2842 cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2843 }
2844 #endif
2845 mutex_exit(&dp->intrlock);
2846
2847 if (tx_sched) {
2848 /* kick potentially stopped downstream */
2849 mac_tx_update(dp->mh);
2850 }
2851 }
2852
2853 int
gem_mii_probe_default(struct gem_dev * dp)2854 gem_mii_probe_default(struct gem_dev *dp)
2855 {
2856 int8_t phy;
2857 uint16_t status;
2858 uint16_t adv;
2859 uint16_t adv_org;
2860
2861 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2862
2863 /*
2864 * Scan PHY
2865 */
2866 /* ensure to send sync bits */
2867 dp->mii_status = 0;
2868
2869 /* Try default phy first */
2870 if (dp->mii_phy_addr) {
2871 status = gem_mii_read(dp, MII_STATUS);
2872 if (status != 0xffff && status != 0) {
2873 gem_mii_write(dp, MII_CONTROL, 0);
2874 goto PHY_found;
2875 }
2876
2877 if (dp->mii_phy_addr < 0) {
2878 cmn_err(CE_NOTE,
2879 "!%s: failed to probe default internal and/or non-MII PHY",
2880 dp->name);
2881 return (GEM_FAILURE);
2882 }
2883
2884 cmn_err(CE_NOTE,
2885 "!%s: failed to probe default MII PHY at %d",
2886 dp->name, dp->mii_phy_addr);
2887 }
2888
2889 /* Try all possible address */
2890 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2891 dp->mii_phy_addr = phy;
2892 status = gem_mii_read(dp, MII_STATUS);
2893
2894 if (status != 0xffff && status != 0) {
2895 gem_mii_write(dp, MII_CONTROL, 0);
2896 goto PHY_found;
2897 }
2898 }
2899
2900 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2901 dp->mii_phy_addr = phy;
2902 gem_mii_write(dp, MII_CONTROL, 0);
2903 status = gem_mii_read(dp, MII_STATUS);
2904
2905 if (status != 0xffff && status != 0) {
2906 goto PHY_found;
2907 }
2908 }
2909
2910 cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2911 dp->mii_phy_addr = -1;
2912
2913 return (GEM_FAILURE);
2914
2915 PHY_found:
2916 dp->mii_status = status;
2917 dp->mii_phy_id = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2918 gem_mii_read(dp, MII_PHYIDL);
2919
2920 if (dp->mii_phy_addr < 0) {
2921 cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2922 dp->name, dp->mii_phy_id);
2923 } else {
2924 cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2925 dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2926 }
2927
2928 cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2929 dp->name,
2930 gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2931 status, MII_STATUS_BITS,
2932 gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2933 gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2934
2935 dp->mii_xstatus = 0;
2936 if (status & MII_STATUS_XSTATUS) {
2937 dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2938
2939 cmn_err(CE_CONT, "!%s: xstatus:%b",
2940 dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2941 }
2942
2943 /* check if the phy can advertize pause abilities */
2944 adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2945
2946 gem_mii_write(dp, MII_AN_ADVERT,
2947 MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE);
2948
2949 adv = gem_mii_read(dp, MII_AN_ADVERT);
2950
2951 if ((adv & MII_ABILITY_PAUSE) == 0) {
2952 dp->gc.gc_flow_control &= ~1;
2953 }
2954
2955 if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
2956 dp->gc.gc_flow_control &= ~2;
2957 }
2958
2959 gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2960
2961 return (GEM_SUCCESS);
2962 }
2963
2964 static void
gem_mii_start(struct gem_dev * dp)2965 gem_mii_start(struct gem_dev *dp)
2966 {
2967 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2968
2969 /* make a first call of check link */
2970 dp->mii_state = MII_STATE_UNKNOWN;
2971 dp->mii_last_check = ddi_get_lbolt();
2972 dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2973 (void) gem_mii_link_watcher(dp);
2974 }
2975
2976 static void
gem_mii_stop(struct gem_dev * dp)2977 gem_mii_stop(struct gem_dev *dp)
2978 {
2979 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2980
2981 /* Ensure timer routine stopped */
2982 mutex_enter(&dp->intrlock);
2983 if (dp->link_watcher_id) {
2984 while (untimeout(dp->link_watcher_id) == -1)
2985 ;
2986 dp->link_watcher_id = 0;
2987 }
2988 mutex_exit(&dp->intrlock);
2989 }
2990
2991 boolean_t
gem_get_mac_addr_conf(struct gem_dev * dp)2992 gem_get_mac_addr_conf(struct gem_dev *dp)
2993 {
2994 char propname[32];
2995 char *valstr;
2996 uint8_t mac[ETHERADDRL];
2997 char *cp;
2998 int c;
2999 int i;
3000 int j;
3001 uint8_t v;
3002 uint8_t d;
3003 uint8_t ored;
3004
3005 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3006 /*
3007 * Get ethernet address from .conf file
3008 */
3009 (void) sprintf(propname, "mac-addr");
3010 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3011 DDI_PROP_DONTPASS, propname, &valstr)) !=
3012 DDI_PROP_SUCCESS) {
3013 return (B_FALSE);
3014 }
3015
3016 if (strlen(valstr) != ETHERADDRL*3-1) {
3017 goto syntax_err;
3018 }
3019
3020 cp = valstr;
3021 j = 0;
3022 ored = 0;
3023 for (;;) {
3024 v = 0;
3025 for (i = 0; i < 2; i++) {
3026 c = *cp++;
3027
3028 if (c >= 'a' && c <= 'f') {
3029 d = c - 'a' + 10;
3030 } else if (c >= 'A' && c <= 'F') {
3031 d = c - 'A' + 10;
3032 } else if (c >= '0' && c <= '9') {
3033 d = c - '0';
3034 } else {
3035 goto syntax_err;
3036 }
3037 v = (v << 4) | d;
3038 }
3039
3040 mac[j++] = v;
3041 ored |= v;
3042 if (j == ETHERADDRL) {
3043 /* done */
3044 break;
3045 }
3046
3047 c = *cp++;
3048 if (c != ':') {
3049 goto syntax_err;
3050 }
3051 }
3052
3053 if (ored == 0) {
3054 goto err;
3055 }
3056 for (i = 0; i < ETHERADDRL; i++) {
3057 dp->dev_addr.ether_addr_octet[i] = mac[i];
3058 }
3059 ddi_prop_free(valstr);
3060 return (B_TRUE);
3061
3062 syntax_err:
3063 cmn_err(CE_CONT,
3064 "!%s: read mac addr: trying .conf: syntax err %s",
3065 dp->name, valstr);
3066 err:
3067 ddi_prop_free(valstr);
3068
3069 return (B_FALSE);
3070 }
3071
3072
3073 /* ============================================================== */
3074 /*
3075 * internal start/stop interface
3076 */
3077 /* ============================================================== */
3078 static int
gem_mac_set_rx_filter(struct gem_dev * dp)3079 gem_mac_set_rx_filter(struct gem_dev *dp)
3080 {
3081 return ((*dp->gc.gc_set_rx_filter)(dp));
3082 }
3083
3084 /*
3085 * gem_mac_init: cold start
3086 */
3087 static int
gem_mac_init(struct gem_dev * dp)3088 gem_mac_init(struct gem_dev *dp)
3089 {
3090 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3091
3092 if (dp->mac_suspended) {
3093 return (GEM_FAILURE);
3094 }
3095
3096 dp->mac_active = B_FALSE;
3097
3098 gem_init_rx_ring(dp);
3099 gem_init_tx_ring(dp);
3100
3101 /* reset transmitter state */
3102 dp->tx_blocked = (clock_t)0;
3103 dp->tx_busy = 0;
3104 dp->tx_reclaim_busy = 0;
3105 dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3106
3107 if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3108 return (GEM_FAILURE);
3109 }
3110
3111 gem_prepare_rx_buf(dp);
3112
3113 return (GEM_SUCCESS);
3114 }
3115 /*
3116 * gem_mac_start: warm start
3117 */
3118 static int
gem_mac_start(struct gem_dev * dp)3119 gem_mac_start(struct gem_dev *dp)
3120 {
3121 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3122
3123 ASSERT(mutex_owned(&dp->intrlock));
3124 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3125 ASSERT(dp->mii_state == MII_STATE_LINKUP);
3126
3127 /* enable tx and rx */
3128 mutex_enter(&dp->xmitlock);
3129 if (dp->mac_suspended) {
3130 mutex_exit(&dp->xmitlock);
3131 return (GEM_FAILURE);
3132 }
3133 dp->mac_active = B_TRUE;
3134 mutex_exit(&dp->xmitlock);
3135
3136 /* setup rx buffers */
3137 (*dp->gc.gc_rx_start)(dp,
3138 SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3139 dp->rx_active_tail - dp->rx_active_head);
3140
3141 if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3142 cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3143 dp->name, __func__);
3144 return (GEM_FAILURE);
3145 }
3146
3147 mutex_enter(&dp->xmitlock);
3148
3149 /* load untranmitted packets to the nic */
3150 ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3151 if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3152 gem_tx_load_descs_oo(dp,
3153 dp->tx_softq_head, dp->tx_softq_tail,
3154 GEM_TXFLAG_HEAD);
3155 /* issue preloaded tx buffers */
3156 gem_tx_start_unit(dp);
3157 }
3158
3159 mutex_exit(&dp->xmitlock);
3160
3161 return (GEM_SUCCESS);
3162 }
3163
3164 static int
gem_mac_stop(struct gem_dev * dp,uint_t flags)3165 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3166 {
3167 int i;
3168 int wait_time; /* in uS */
3169 #ifdef GEM_DEBUG_LEVEL
3170 clock_t now;
3171 #endif
3172 int ret = GEM_SUCCESS;
3173
3174 DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3175 dp->name, __func__, dp->rx_buf_freecnt));
3176
3177 ASSERT(mutex_owned(&dp->intrlock));
3178 ASSERT(!mutex_owned(&dp->xmitlock));
3179
3180 /*
3181 * Block transmits
3182 */
3183 mutex_enter(&dp->xmitlock);
3184 if (dp->mac_suspended) {
3185 mutex_exit(&dp->xmitlock);
3186 return (GEM_SUCCESS);
3187 }
3188 dp->mac_active = B_FALSE;
3189
3190 while (dp->tx_busy > 0) {
3191 cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3192 }
3193 mutex_exit(&dp->xmitlock);
3194
3195 if ((flags & GEM_RESTART_NOWAIT) == 0) {
3196 /*
3197 * Wait for all tx buffers sent.
3198 */
3199 wait_time =
3200 2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3201 (dp->tx_active_tail - dp->tx_active_head);
3202
3203 DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3204 dp->name, __func__, wait_time));
3205 i = 0;
3206 #ifdef GEM_DEBUG_LEVEL
3207 now = ddi_get_lbolt();
3208 #endif
3209 while (dp->tx_active_tail != dp->tx_active_head) {
3210 if (i > wait_time) {
3211 /* timeout */
3212 cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3213 dp->name, __func__);
3214 break;
3215 }
3216 (void) gem_reclaim_txbuf(dp);
3217 drv_usecwait(100);
3218 i += 100;
3219 }
3220 DPRINTF(0, (CE_NOTE,
3221 "!%s: %s: the nic have drained in %d uS, real %d mS",
3222 dp->name, __func__, i,
3223 10*((int)(ddi_get_lbolt() - now))));
3224 }
3225
3226 /*
3227 * Now we can stop the nic safely.
3228 */
3229 if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3230 cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3231 dp->name, __func__);
3232 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3233 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3234 dp->name, __func__);
3235 }
3236 }
3237
3238 /*
3239 * Clear all rx buffers
3240 */
3241 if (flags & GEM_RESTART_KEEP_BUF) {
3242 (void) gem_receive(dp);
3243 }
3244 gem_clean_rx_buf(dp);
3245
3246 /*
3247 * Update final statistics
3248 */
3249 (*dp->gc.gc_get_stats)(dp);
3250
3251 /*
3252 * Clear all pended tx packets
3253 */
3254 ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3255 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3256 if (flags & GEM_RESTART_KEEP_BUF) {
3257 /* restore active tx buffers */
3258 dp->tx_active_tail = dp->tx_active_head;
3259 dp->tx_softq_head = dp->tx_active_head;
3260 } else {
3261 gem_clean_tx_buf(dp);
3262 }
3263
3264 return (ret);
3265 }
3266
3267 static int
gem_add_multicast(struct gem_dev * dp,const uint8_t * ep)3268 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3269 {
3270 int cnt;
3271 int err;
3272
3273 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3274
3275 mutex_enter(&dp->intrlock);
3276 if (dp->mac_suspended) {
3277 mutex_exit(&dp->intrlock);
3278 return (GEM_FAILURE);
3279 }
3280
3281 if (dp->mc_count_req++ < GEM_MAXMC) {
3282 /* append the new address at the end of the mclist */
3283 cnt = dp->mc_count;
3284 bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3285 ETHERADDRL);
3286 if (dp->gc.gc_multicast_hash) {
3287 dp->mc_list[cnt].hash =
3288 (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3289 }
3290 dp->mc_count = cnt + 1;
3291 }
3292
3293 if (dp->mc_count_req != dp->mc_count) {
3294 /* multicast address list overflow */
3295 dp->rxmode |= RXMODE_MULTI_OVF;
3296 } else {
3297 dp->rxmode &= ~RXMODE_MULTI_OVF;
3298 }
3299
3300 /* tell new multicast list to the hardware */
3301 err = gem_mac_set_rx_filter(dp);
3302
3303 mutex_exit(&dp->intrlock);
3304
3305 return (err);
3306 }
3307
3308 static int
gem_remove_multicast(struct gem_dev * dp,const uint8_t * ep)3309 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3310 {
3311 size_t len;
3312 int i;
3313 int cnt;
3314 int err;
3315
3316 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3317
3318 mutex_enter(&dp->intrlock);
3319 if (dp->mac_suspended) {
3320 mutex_exit(&dp->intrlock);
3321 return (GEM_FAILURE);
3322 }
3323
3324 dp->mc_count_req--;
3325 cnt = dp->mc_count;
3326 for (i = 0; i < cnt; i++) {
3327 if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3328 continue;
3329 }
3330 /* shrink the mclist by copying forward */
3331 len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3332 if (len > 0) {
3333 bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3334 }
3335 dp->mc_count--;
3336 break;
3337 }
3338
3339 if (dp->mc_count_req != dp->mc_count) {
3340 /* multicast address list overflow */
3341 dp->rxmode |= RXMODE_MULTI_OVF;
3342 } else {
3343 dp->rxmode &= ~RXMODE_MULTI_OVF;
3344 }
3345 /* In gem v2, don't hold xmitlock on calling set_rx_filter */
3346 err = gem_mac_set_rx_filter(dp);
3347
3348 mutex_exit(&dp->intrlock);
3349
3350 return (err);
3351 }
3352
3353 /* ============================================================== */
3354 /*
3355 * ND interface
3356 */
3357 /* ============================================================== */
3358 enum {
3359 PARAM_AUTONEG_CAP,
3360 PARAM_PAUSE_CAP,
3361 PARAM_ASYM_PAUSE_CAP,
3362 PARAM_1000FDX_CAP,
3363 PARAM_1000HDX_CAP,
3364 PARAM_100T4_CAP,
3365 PARAM_100FDX_CAP,
3366 PARAM_100HDX_CAP,
3367 PARAM_10FDX_CAP,
3368 PARAM_10HDX_CAP,
3369
3370 PARAM_ADV_AUTONEG_CAP,
3371 PARAM_ADV_PAUSE_CAP,
3372 PARAM_ADV_ASYM_PAUSE_CAP,
3373 PARAM_ADV_1000FDX_CAP,
3374 PARAM_ADV_1000HDX_CAP,
3375 PARAM_ADV_100T4_CAP,
3376 PARAM_ADV_100FDX_CAP,
3377 PARAM_ADV_100HDX_CAP,
3378 PARAM_ADV_10FDX_CAP,
3379 PARAM_ADV_10HDX_CAP,
3380
3381 PARAM_LP_AUTONEG_CAP,
3382 PARAM_LP_PAUSE_CAP,
3383 PARAM_LP_ASYM_PAUSE_CAP,
3384 PARAM_LP_1000FDX_CAP,
3385 PARAM_LP_1000HDX_CAP,
3386 PARAM_LP_100T4_CAP,
3387 PARAM_LP_100FDX_CAP,
3388 PARAM_LP_100HDX_CAP,
3389 PARAM_LP_10FDX_CAP,
3390 PARAM_LP_10HDX_CAP,
3391
3392 PARAM_LINK_STATUS,
3393 PARAM_LINK_SPEED,
3394 PARAM_LINK_DUPLEX,
3395
3396 PARAM_LINK_AUTONEG,
3397 PARAM_LINK_RX_PAUSE,
3398 PARAM_LINK_TX_PAUSE,
3399
3400 PARAM_LOOP_MODE,
3401 PARAM_MSI_CNT,
3402
3403 #ifdef DEBUG_RESUME
3404 PARAM_RESUME_TEST,
3405 #endif
3406 PARAM_COUNT
3407 };
3408
3409 enum ioc_reply {
3410 IOC_INVAL = -1, /* bad, NAK with EINVAL */
3411 IOC_DONE, /* OK, reply sent */
3412 IOC_ACK, /* OK, just send ACK */
3413 IOC_REPLY, /* OK, just send reply */
3414 IOC_RESTART_ACK, /* OK, restart & ACK */
3415 IOC_RESTART_REPLY /* OK, restart & reply */
3416 };
3417
3418 struct gem_nd_arg {
3419 struct gem_dev *dp;
3420 int item;
3421 };
3422
3423 static int
gem_param_get(queue_t * q,mblk_t * mp,caddr_t arg,cred_t * credp)3424 gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3425 {
3426 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3427 int item = ((struct gem_nd_arg *)(void *)arg)->item;
3428 long val;
3429
3430 DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3431 dp->name, __func__, item));
3432
3433 switch (item) {
3434 case PARAM_AUTONEG_CAP:
3435 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3436 DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3437 break;
3438
3439 case PARAM_PAUSE_CAP:
3440 val = BOOLEAN(dp->gc.gc_flow_control & 1);
3441 break;
3442
3443 case PARAM_ASYM_PAUSE_CAP:
3444 val = BOOLEAN(dp->gc.gc_flow_control & 2);
3445 break;
3446
3447 case PARAM_1000FDX_CAP:
3448 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3449 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3450 break;
3451
3452 case PARAM_1000HDX_CAP:
3453 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3454 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3455 break;
3456
3457 case PARAM_100T4_CAP:
3458 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3459 break;
3460
3461 case PARAM_100FDX_CAP:
3462 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3463 break;
3464
3465 case PARAM_100HDX_CAP:
3466 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3467 break;
3468
3469 case PARAM_10FDX_CAP:
3470 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3471 break;
3472
3473 case PARAM_10HDX_CAP:
3474 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3475 break;
3476
3477 case PARAM_ADV_AUTONEG_CAP:
3478 val = dp->anadv_autoneg;
3479 break;
3480
3481 case PARAM_ADV_PAUSE_CAP:
3482 val = BOOLEAN(dp->anadv_flow_control & 1);
3483 break;
3484
3485 case PARAM_ADV_ASYM_PAUSE_CAP:
3486 val = BOOLEAN(dp->anadv_flow_control & 2);
3487 break;
3488
3489 case PARAM_ADV_1000FDX_CAP:
3490 val = dp->anadv_1000fdx;
3491 break;
3492
3493 case PARAM_ADV_1000HDX_CAP:
3494 val = dp->anadv_1000hdx;
3495 break;
3496
3497 case PARAM_ADV_100T4_CAP:
3498 val = dp->anadv_100t4;
3499 break;
3500
3501 case PARAM_ADV_100FDX_CAP:
3502 val = dp->anadv_100fdx;
3503 break;
3504
3505 case PARAM_ADV_100HDX_CAP:
3506 val = dp->anadv_100hdx;
3507 break;
3508
3509 case PARAM_ADV_10FDX_CAP:
3510 val = dp->anadv_10fdx;
3511 break;
3512
3513 case PARAM_ADV_10HDX_CAP:
3514 val = dp->anadv_10hdx;
3515 break;
3516
3517 case PARAM_LP_AUTONEG_CAP:
3518 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3519 break;
3520
3521 case PARAM_LP_PAUSE_CAP:
3522 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3523 break;
3524
3525 case PARAM_LP_ASYM_PAUSE_CAP:
3526 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
3527 break;
3528
3529 case PARAM_LP_1000FDX_CAP:
3530 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3531 break;
3532
3533 case PARAM_LP_1000HDX_CAP:
3534 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3535 break;
3536
3537 case PARAM_LP_100T4_CAP:
3538 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3539 break;
3540
3541 case PARAM_LP_100FDX_CAP:
3542 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3543 break;
3544
3545 case PARAM_LP_100HDX_CAP:
3546 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3547 break;
3548
3549 case PARAM_LP_10FDX_CAP:
3550 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3551 break;
3552
3553 case PARAM_LP_10HDX_CAP:
3554 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3555 break;
3556
3557 case PARAM_LINK_STATUS:
3558 val = (dp->mii_state == MII_STATE_LINKUP);
3559 break;
3560
3561 case PARAM_LINK_SPEED:
3562 val = gem_speed_value[dp->speed];
3563 break;
3564
3565 case PARAM_LINK_DUPLEX:
3566 val = 0;
3567 if (dp->mii_state == MII_STATE_LINKUP) {
3568 val = dp->full_duplex ? 2 : 1;
3569 }
3570 break;
3571
3572 case PARAM_LINK_AUTONEG:
3573 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3574 break;
3575
3576 case PARAM_LINK_RX_PAUSE:
3577 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3578 (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3579 break;
3580
3581 case PARAM_LINK_TX_PAUSE:
3582 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3583 (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3584 break;
3585
3586 #ifdef DEBUG_RESUME
3587 case PARAM_RESUME_TEST:
3588 val = 0;
3589 break;
3590 #endif
3591 default:
3592 cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3593 dp->name, item);
3594 break;
3595 }
3596
3597 (void) mi_mpprintf(mp, "%ld", val);
3598
3599 return (0);
3600 }
3601
3602 static int
gem_param_set(queue_t * q,mblk_t * mp,char * value,caddr_t arg,cred_t * credp)3603 gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3604 {
3605 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3606 int item = ((struct gem_nd_arg *)(void *)arg)->item;
3607 long val;
3608 char *end;
3609
3610 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3611 if (ddi_strtol(value, &end, 10, &val)) {
3612 return (EINVAL);
3613 }
3614 if (end == value) {
3615 return (EINVAL);
3616 }
3617
3618 switch (item) {
3619 case PARAM_ADV_AUTONEG_CAP:
3620 if (val != 0 && val != 1) {
3621 goto err;
3622 }
3623 if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3624 goto err;
3625 }
3626 dp->anadv_autoneg = (int)val;
3627 break;
3628
3629 case PARAM_ADV_PAUSE_CAP:
3630 if (val != 0 && val != 1) {
3631 goto err;
3632 }
3633 if (val) {
3634 dp->anadv_flow_control |= 1;
3635 } else {
3636 dp->anadv_flow_control &= ~1;
3637 }
3638 break;
3639
3640 case PARAM_ADV_ASYM_PAUSE_CAP:
3641 if (val != 0 && val != 1) {
3642 goto err;
3643 }
3644 if (val) {
3645 dp->anadv_flow_control |= 2;
3646 } else {
3647 dp->anadv_flow_control &= ~2;
3648 }
3649 break;
3650
3651 case PARAM_ADV_1000FDX_CAP:
3652 if (val != 0 && val != 1) {
3653 goto err;
3654 }
3655 if (val && (dp->mii_xstatus &
3656 (MII_XSTATUS_1000BASET_FD |
3657 MII_XSTATUS_1000BASEX_FD)) == 0) {
3658 goto err;
3659 }
3660 dp->anadv_1000fdx = (int)val;
3661 break;
3662
3663 case PARAM_ADV_1000HDX_CAP:
3664 if (val != 0 && val != 1) {
3665 goto err;
3666 }
3667 if (val && (dp->mii_xstatus &
3668 (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3669 goto err;
3670 }
3671 dp->anadv_1000hdx = (int)val;
3672 break;
3673
3674 case PARAM_ADV_100T4_CAP:
3675 if (val != 0 && val != 1) {
3676 goto err;
3677 }
3678 if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3679 goto err;
3680 }
3681 dp->anadv_100t4 = (int)val;
3682 break;
3683
3684 case PARAM_ADV_100FDX_CAP:
3685 if (val != 0 && val != 1) {
3686 goto err;
3687 }
3688 if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3689 goto err;
3690 }
3691 dp->anadv_100fdx = (int)val;
3692 break;
3693
3694 case PARAM_ADV_100HDX_CAP:
3695 if (val != 0 && val != 1) {
3696 goto err;
3697 }
3698 if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3699 goto err;
3700 }
3701 dp->anadv_100hdx = (int)val;
3702 break;
3703
3704 case PARAM_ADV_10FDX_CAP:
3705 if (val != 0 && val != 1) {
3706 goto err;
3707 }
3708 if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3709 goto err;
3710 }
3711 dp->anadv_10fdx = (int)val;
3712 break;
3713
3714 case PARAM_ADV_10HDX_CAP:
3715 if (val != 0 && val != 1) {
3716 goto err;
3717 }
3718 if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3719 goto err;
3720 }
3721 dp->anadv_10hdx = (int)val;
3722 break;
3723 }
3724
3725 /* sync with PHY */
3726 gem_choose_forcedmode(dp);
3727
3728 dp->mii_state = MII_STATE_UNKNOWN;
3729 if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3730 /* XXX - Can we ignore the return code ? */
3731 (void) gem_mii_link_check(dp);
3732 }
3733
3734 return (0);
3735 err:
3736 return (EINVAL);
3737 }
3738
3739 static void
gem_nd_load(struct gem_dev * dp,char * name,ndgetf_t gf,ndsetf_t sf,int item)3740 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3741 {
3742 struct gem_nd_arg *arg;
3743
3744 ASSERT(item >= 0);
3745 ASSERT(item < PARAM_COUNT);
3746
3747 arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3748 arg->dp = dp;
3749 arg->item = item;
3750
3751 DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3752 dp->name, __func__, name, item));
3753 (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3754 }
3755
3756 static void
gem_nd_setup(struct gem_dev * dp)3757 gem_nd_setup(struct gem_dev *dp)
3758 {
3759 DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3760 dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3761
3762 ASSERT(dp->nd_arg_p == NULL);
3763
3764 dp->nd_arg_p =
3765 kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3766
3767 #define SETFUNC(x) ((x) ? gem_param_set : NULL)
3768
3769 gem_nd_load(dp, "autoneg_cap",
3770 gem_param_get, NULL, PARAM_AUTONEG_CAP);
3771 gem_nd_load(dp, "pause_cap",
3772 gem_param_get, NULL, PARAM_PAUSE_CAP);
3773 gem_nd_load(dp, "asym_pause_cap",
3774 gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3775 gem_nd_load(dp, "1000fdx_cap",
3776 gem_param_get, NULL, PARAM_1000FDX_CAP);
3777 gem_nd_load(dp, "1000hdx_cap",
3778 gem_param_get, NULL, PARAM_1000HDX_CAP);
3779 gem_nd_load(dp, "100T4_cap",
3780 gem_param_get, NULL, PARAM_100T4_CAP);
3781 gem_nd_load(dp, "100fdx_cap",
3782 gem_param_get, NULL, PARAM_100FDX_CAP);
3783 gem_nd_load(dp, "100hdx_cap",
3784 gem_param_get, NULL, PARAM_100HDX_CAP);
3785 gem_nd_load(dp, "10fdx_cap",
3786 gem_param_get, NULL, PARAM_10FDX_CAP);
3787 gem_nd_load(dp, "10hdx_cap",
3788 gem_param_get, NULL, PARAM_10HDX_CAP);
3789
3790 /* Our advertised capabilities */
3791 gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3792 SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3793 PARAM_ADV_AUTONEG_CAP);
3794 gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3795 SETFUNC(dp->gc.gc_flow_control & 1),
3796 PARAM_ADV_PAUSE_CAP);
3797 gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3798 SETFUNC(dp->gc.gc_flow_control & 2),
3799 PARAM_ADV_ASYM_PAUSE_CAP);
3800 gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3801 SETFUNC(dp->mii_xstatus &
3802 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3803 PARAM_ADV_1000FDX_CAP);
3804 gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3805 SETFUNC(dp->mii_xstatus &
3806 (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3807 PARAM_ADV_1000HDX_CAP);
3808 gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3809 SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3810 !dp->mii_advert_ro),
3811 PARAM_ADV_100T4_CAP);
3812 gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3813 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3814 !dp->mii_advert_ro),
3815 PARAM_ADV_100FDX_CAP);
3816 gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3817 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3818 !dp->mii_advert_ro),
3819 PARAM_ADV_100HDX_CAP);
3820 gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3821 SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3822 !dp->mii_advert_ro),
3823 PARAM_ADV_10FDX_CAP);
3824 gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3825 SETFUNC((dp->mii_status & MII_STATUS_10) &&
3826 !dp->mii_advert_ro),
3827 PARAM_ADV_10HDX_CAP);
3828
3829 /* Partner's advertised capabilities */
3830 gem_nd_load(dp, "lp_autoneg_cap",
3831 gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3832 gem_nd_load(dp, "lp_pause_cap",
3833 gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3834 gem_nd_load(dp, "lp_asym_pause_cap",
3835 gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3836 gem_nd_load(dp, "lp_1000fdx_cap",
3837 gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3838 gem_nd_load(dp, "lp_1000hdx_cap",
3839 gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3840 gem_nd_load(dp, "lp_100T4_cap",
3841 gem_param_get, NULL, PARAM_LP_100T4_CAP);
3842 gem_nd_load(dp, "lp_100fdx_cap",
3843 gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3844 gem_nd_load(dp, "lp_100hdx_cap",
3845 gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3846 gem_nd_load(dp, "lp_10fdx_cap",
3847 gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3848 gem_nd_load(dp, "lp_10hdx_cap",
3849 gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3850
3851 /* Current operating modes */
3852 gem_nd_load(dp, "link_status",
3853 gem_param_get, NULL, PARAM_LINK_STATUS);
3854 gem_nd_load(dp, "link_speed",
3855 gem_param_get, NULL, PARAM_LINK_SPEED);
3856 gem_nd_load(dp, "link_duplex",
3857 gem_param_get, NULL, PARAM_LINK_DUPLEX);
3858 gem_nd_load(dp, "link_autoneg",
3859 gem_param_get, NULL, PARAM_LINK_AUTONEG);
3860 gem_nd_load(dp, "link_rx_pause",
3861 gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3862 gem_nd_load(dp, "link_tx_pause",
3863 gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3864 #ifdef DEBUG_RESUME
3865 gem_nd_load(dp, "resume_test",
3866 gem_param_get, NULL, PARAM_RESUME_TEST);
3867 #endif
3868 #undef SETFUNC
3869 }
3870
3871 static
3872 enum ioc_reply
gem_nd_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp,struct iocblk * iocp)3873 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3874 {
3875 boolean_t ok;
3876
3877 ASSERT(mutex_owned(&dp->intrlock));
3878
3879 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3880
3881 switch (iocp->ioc_cmd) {
3882 case ND_GET:
3883 ok = nd_getset(wq, dp->nd_data_p, mp);
3884 DPRINTF(0, (CE_CONT,
3885 "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3886 return (ok ? IOC_REPLY : IOC_INVAL);
3887
3888 case ND_SET:
3889 ok = nd_getset(wq, dp->nd_data_p, mp);
3890
3891 DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3892 dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3893
3894 if (!ok) {
3895 return (IOC_INVAL);
3896 }
3897
3898 if (iocp->ioc_error) {
3899 return (IOC_REPLY);
3900 }
3901
3902 return (IOC_RESTART_REPLY);
3903 }
3904
3905 cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3906
3907 return (IOC_INVAL);
3908 }
3909
3910 static void
gem_nd_cleanup(struct gem_dev * dp)3911 gem_nd_cleanup(struct gem_dev *dp)
3912 {
3913 ASSERT(dp->nd_data_p != NULL);
3914 ASSERT(dp->nd_arg_p != NULL);
3915
3916 nd_free(&dp->nd_data_p);
3917
3918 kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3919 dp->nd_arg_p = NULL;
3920 }
3921
3922 static void
gem_mac_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp)3923 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3924 {
3925 struct iocblk *iocp;
3926 enum ioc_reply status;
3927 int cmd;
3928
3929 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3930
3931 /*
3932 * Validate the command before bothering with the mutex ...
3933 */
3934 iocp = (void *)mp->b_rptr;
3935 iocp->ioc_error = 0;
3936 cmd = iocp->ioc_cmd;
3937
3938 DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3939
3940 mutex_enter(&dp->intrlock);
3941 mutex_enter(&dp->xmitlock);
3942
3943 switch (cmd) {
3944 default:
3945 _NOTE(NOTREACHED)
3946 status = IOC_INVAL;
3947 break;
3948
3949 case ND_GET:
3950 case ND_SET:
3951 status = gem_nd_ioctl(dp, wq, mp, iocp);
3952 break;
3953 }
3954
3955 mutex_exit(&dp->xmitlock);
3956 mutex_exit(&dp->intrlock);
3957
3958 #ifdef DEBUG_RESUME
3959 if (cmd == ND_GET) {
3960 gem_suspend(dp->dip);
3961 gem_resume(dp->dip);
3962 }
3963 #endif
3964 /*
3965 * Finally, decide how to reply
3966 */
3967 switch (status) {
3968 default:
3969 case IOC_INVAL:
3970 /*
3971 * Error, reply with a NAK and EINVAL or the specified error
3972 */
3973 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3974 EINVAL : iocp->ioc_error);
3975 break;
3976
3977 case IOC_DONE:
3978 /*
3979 * OK, reply already sent
3980 */
3981 break;
3982
3983 case IOC_RESTART_ACK:
3984 case IOC_ACK:
3985 /*
3986 * OK, reply with an ACK
3987 */
3988 miocack(wq, mp, 0, 0);
3989 break;
3990
3991 case IOC_RESTART_REPLY:
3992 case IOC_REPLY:
3993 /*
3994 * OK, send prepared reply as ACK or NAK
3995 */
3996 mp->b_datap->db_type =
3997 iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
3998 qreply(wq, mp);
3999 break;
4000 }
4001 }
4002
4003 #ifndef SYS_MAC_H
4004 #define XCVR_UNDEFINED 0
4005 #define XCVR_NONE 1
4006 #define XCVR_10 2
4007 #define XCVR_100T4 3
4008 #define XCVR_100X 4
4009 #define XCVR_100T2 5
4010 #define XCVR_1000X 6
4011 #define XCVR_1000T 7
4012 #endif
4013 static int
gem_mac_xcvr_inuse(struct gem_dev * dp)4014 gem_mac_xcvr_inuse(struct gem_dev *dp)
4015 {
4016 int val = XCVR_UNDEFINED;
4017
4018 if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4019 if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4020 val = XCVR_100T4;
4021 } else if (dp->mii_status &
4022 (MII_STATUS_100_BASEX_FD |
4023 MII_STATUS_100_BASEX)) {
4024 val = XCVR_100X;
4025 } else if (dp->mii_status &
4026 (MII_STATUS_100_BASE_T2_FD |
4027 MII_STATUS_100_BASE_T2)) {
4028 val = XCVR_100T2;
4029 } else if (dp->mii_status &
4030 (MII_STATUS_10_FD | MII_STATUS_10)) {
4031 val = XCVR_10;
4032 }
4033 } else if (dp->mii_xstatus &
4034 (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4035 val = XCVR_1000T;
4036 } else if (dp->mii_xstatus &
4037 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4038 val = XCVR_1000X;
4039 }
4040
4041 return (val);
4042 }
4043
4044 /* ============================================================== */
4045 /*
4046 * GLDv3 interface
4047 */
4048 /* ============================================================== */
4049 static int gem_m_getstat(void *, uint_t, uint64_t *);
4050 static int gem_m_start(void *);
4051 static void gem_m_stop(void *);
4052 static int gem_m_setpromisc(void *, boolean_t);
4053 static int gem_m_multicst(void *, boolean_t, const uint8_t *);
4054 static int gem_m_unicst(void *, const uint8_t *);
4055 static mblk_t *gem_m_tx(void *, mblk_t *);
4056 static void gem_m_ioctl(void *, queue_t *, mblk_t *);
4057 static boolean_t gem_m_getcapab(void *, mac_capab_t, void *);
4058
4059 #define GEM_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
4060
4061 static mac_callbacks_t gem_m_callbacks = {
4062 GEM_M_CALLBACK_FLAGS,
4063 gem_m_getstat,
4064 gem_m_start,
4065 gem_m_stop,
4066 gem_m_setpromisc,
4067 gem_m_multicst,
4068 gem_m_unicst,
4069 gem_m_tx,
4070 NULL,
4071 gem_m_ioctl,
4072 gem_m_getcapab,
4073 };
4074
4075 static int
gem_m_start(void * arg)4076 gem_m_start(void *arg)
4077 {
4078 int err = 0;
4079 struct gem_dev *dp = arg;
4080
4081 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4082
4083 mutex_enter(&dp->intrlock);
4084 if (dp->mac_suspended) {
4085 err = EIO;
4086 goto x;
4087 }
4088 if (gem_mac_init(dp) != GEM_SUCCESS) {
4089 err = EIO;
4090 goto x;
4091 }
4092 dp->nic_state = NIC_STATE_INITIALIZED;
4093
4094 /* reset rx filter state */
4095 dp->mc_count = 0;
4096 dp->mc_count_req = 0;
4097
4098 /* setup media mode if the link have been up */
4099 if (dp->mii_state == MII_STATE_LINKUP) {
4100 (dp->gc.gc_set_media)(dp);
4101 }
4102
4103 /* setup initial rx filter */
4104 bcopy(dp->dev_addr.ether_addr_octet,
4105 dp->cur_addr.ether_addr_octet, ETHERADDRL);
4106 dp->rxmode |= RXMODE_ENABLE;
4107
4108 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4109 err = EIO;
4110 goto x;
4111 }
4112
4113 dp->nic_state = NIC_STATE_ONLINE;
4114 if (dp->mii_state == MII_STATE_LINKUP) {
4115 if (gem_mac_start(dp) != GEM_SUCCESS) {
4116 err = EIO;
4117 goto x;
4118 }
4119 }
4120
4121 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4122 (void *)dp, dp->gc.gc_tx_timeout_interval);
4123 mutex_exit(&dp->intrlock);
4124
4125 return (0);
4126 x:
4127 dp->nic_state = NIC_STATE_STOPPED;
4128 mutex_exit(&dp->intrlock);
4129 return (err);
4130 }
4131
4132 static void
gem_m_stop(void * arg)4133 gem_m_stop(void *arg)
4134 {
4135 struct gem_dev *dp = arg;
4136
4137 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4138
4139 /* stop rx */
4140 mutex_enter(&dp->intrlock);
4141 if (dp->mac_suspended) {
4142 mutex_exit(&dp->intrlock);
4143 return;
4144 }
4145 dp->rxmode &= ~RXMODE_ENABLE;
4146 (void) gem_mac_set_rx_filter(dp);
4147 mutex_exit(&dp->intrlock);
4148
4149 /* stop tx timeout watcher */
4150 if (dp->timeout_id) {
4151 while (untimeout(dp->timeout_id) == -1)
4152 ;
4153 dp->timeout_id = 0;
4154 }
4155
4156 /* make the nic state inactive */
4157 mutex_enter(&dp->intrlock);
4158 if (dp->mac_suspended) {
4159 mutex_exit(&dp->intrlock);
4160 return;
4161 }
4162 dp->nic_state = NIC_STATE_STOPPED;
4163
4164 /* we need deassert mac_active due to block interrupt handler */
4165 mutex_enter(&dp->xmitlock);
4166 dp->mac_active = B_FALSE;
4167 mutex_exit(&dp->xmitlock);
4168
4169 /* block interrupts */
4170 while (dp->intr_busy) {
4171 cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4172 }
4173 (void) gem_mac_stop(dp, 0);
4174 mutex_exit(&dp->intrlock);
4175 }
4176
4177 static int
gem_m_multicst(void * arg,boolean_t add,const uint8_t * ep)4178 gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4179 {
4180 int err;
4181 int ret;
4182 struct gem_dev *dp = arg;
4183
4184 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4185
4186 if (add) {
4187 ret = gem_add_multicast(dp, ep);
4188 } else {
4189 ret = gem_remove_multicast(dp, ep);
4190 }
4191
4192 err = 0;
4193 if (ret != GEM_SUCCESS) {
4194 err = EIO;
4195 }
4196
4197 return (err);
4198 }
4199
4200 static int
gem_m_setpromisc(void * arg,boolean_t on)4201 gem_m_setpromisc(void *arg, boolean_t on)
4202 {
4203 int err = 0; /* no error */
4204 struct gem_dev *dp = arg;
4205
4206 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4207
4208 mutex_enter(&dp->intrlock);
4209 if (dp->mac_suspended) {
4210 mutex_exit(&dp->intrlock);
4211 return (EIO);
4212 }
4213 if (on) {
4214 dp->rxmode |= RXMODE_PROMISC;
4215 } else {
4216 dp->rxmode &= ~RXMODE_PROMISC;
4217 }
4218
4219 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4220 err = EIO;
4221 }
4222 mutex_exit(&dp->intrlock);
4223
4224 return (err);
4225 }
4226
4227 int
gem_m_getstat(void * arg,uint_t stat,uint64_t * valp)4228 gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4229 {
4230 struct gem_dev *dp = arg;
4231 struct gem_stats *gstp = &dp->stats;
4232 uint64_t val = 0;
4233
4234 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4235
4236 if (mutex_owned(&dp->intrlock)) {
4237 if (dp->mac_suspended) {
4238 return (EIO);
4239 }
4240 } else {
4241 mutex_enter(&dp->intrlock);
4242 if (dp->mac_suspended) {
4243 mutex_exit(&dp->intrlock);
4244 return (EIO);
4245 }
4246 mutex_exit(&dp->intrlock);
4247 }
4248
4249 if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4250 return (EIO);
4251 }
4252
4253 switch (stat) {
4254 case MAC_STAT_IFSPEED:
4255 val = gem_speed_value[dp->speed] *1000000ull;
4256 break;
4257
4258 case MAC_STAT_MULTIRCV:
4259 val = gstp->rmcast;
4260 break;
4261
4262 case MAC_STAT_BRDCSTRCV:
4263 val = gstp->rbcast;
4264 break;
4265
4266 case MAC_STAT_MULTIXMT:
4267 val = gstp->omcast;
4268 break;
4269
4270 case MAC_STAT_BRDCSTXMT:
4271 val = gstp->obcast;
4272 break;
4273
4274 case MAC_STAT_NORCVBUF:
4275 val = gstp->norcvbuf + gstp->missed;
4276 break;
4277
4278 case MAC_STAT_IERRORS:
4279 val = gstp->errrcv;
4280 break;
4281
4282 case MAC_STAT_NOXMTBUF:
4283 val = gstp->noxmtbuf;
4284 break;
4285
4286 case MAC_STAT_OERRORS:
4287 val = gstp->errxmt;
4288 break;
4289
4290 case MAC_STAT_COLLISIONS:
4291 val = gstp->collisions;
4292 break;
4293
4294 case MAC_STAT_RBYTES:
4295 val = gstp->rbytes;
4296 break;
4297
4298 case MAC_STAT_IPACKETS:
4299 val = gstp->rpackets;
4300 break;
4301
4302 case MAC_STAT_OBYTES:
4303 val = gstp->obytes;
4304 break;
4305
4306 case MAC_STAT_OPACKETS:
4307 val = gstp->opackets;
4308 break;
4309
4310 case MAC_STAT_UNDERFLOWS:
4311 val = gstp->underflow;
4312 break;
4313
4314 case MAC_STAT_OVERFLOWS:
4315 val = gstp->overflow;
4316 break;
4317
4318 case ETHER_STAT_ALIGN_ERRORS:
4319 val = gstp->frame;
4320 break;
4321
4322 case ETHER_STAT_FCS_ERRORS:
4323 val = gstp->crc;
4324 break;
4325
4326 case ETHER_STAT_FIRST_COLLISIONS:
4327 val = gstp->first_coll;
4328 break;
4329
4330 case ETHER_STAT_MULTI_COLLISIONS:
4331 val = gstp->multi_coll;
4332 break;
4333
4334 case ETHER_STAT_SQE_ERRORS:
4335 val = gstp->sqe;
4336 break;
4337
4338 case ETHER_STAT_DEFER_XMTS:
4339 val = gstp->defer;
4340 break;
4341
4342 case ETHER_STAT_TX_LATE_COLLISIONS:
4343 val = gstp->xmtlatecoll;
4344 break;
4345
4346 case ETHER_STAT_EX_COLLISIONS:
4347 val = gstp->excoll;
4348 break;
4349
4350 case ETHER_STAT_MACXMT_ERRORS:
4351 val = gstp->xmit_internal_err;
4352 break;
4353
4354 case ETHER_STAT_CARRIER_ERRORS:
4355 val = gstp->nocarrier;
4356 break;
4357
4358 case ETHER_STAT_TOOLONG_ERRORS:
4359 val = gstp->frame_too_long;
4360 break;
4361
4362 case ETHER_STAT_MACRCV_ERRORS:
4363 val = gstp->rcv_internal_err;
4364 break;
4365
4366 case ETHER_STAT_XCVR_ADDR:
4367 val = dp->mii_phy_addr;
4368 break;
4369
4370 case ETHER_STAT_XCVR_ID:
4371 val = dp->mii_phy_id;
4372 break;
4373
4374 case ETHER_STAT_XCVR_INUSE:
4375 val = gem_mac_xcvr_inuse(dp);
4376 break;
4377
4378 case ETHER_STAT_CAP_1000FDX:
4379 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4380 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4381 break;
4382
4383 case ETHER_STAT_CAP_1000HDX:
4384 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4385 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4386 break;
4387
4388 case ETHER_STAT_CAP_100FDX:
4389 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4390 break;
4391
4392 case ETHER_STAT_CAP_100HDX:
4393 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4394 break;
4395
4396 case ETHER_STAT_CAP_10FDX:
4397 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4398 break;
4399
4400 case ETHER_STAT_CAP_10HDX:
4401 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4402 break;
4403
4404 case ETHER_STAT_CAP_ASMPAUSE:
4405 val = BOOLEAN(dp->gc.gc_flow_control & 2);
4406 break;
4407
4408 case ETHER_STAT_CAP_PAUSE:
4409 val = BOOLEAN(dp->gc.gc_flow_control & 1);
4410 break;
4411
4412 case ETHER_STAT_CAP_AUTONEG:
4413 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4414 break;
4415
4416 case ETHER_STAT_ADV_CAP_1000FDX:
4417 val = dp->anadv_1000fdx;
4418 break;
4419
4420 case ETHER_STAT_ADV_CAP_1000HDX:
4421 val = dp->anadv_1000hdx;
4422 break;
4423
4424 case ETHER_STAT_ADV_CAP_100FDX:
4425 val = dp->anadv_100fdx;
4426 break;
4427
4428 case ETHER_STAT_ADV_CAP_100HDX:
4429 val = dp->anadv_100hdx;
4430 break;
4431
4432 case ETHER_STAT_ADV_CAP_10FDX:
4433 val = dp->anadv_10fdx;
4434 break;
4435
4436 case ETHER_STAT_ADV_CAP_10HDX:
4437 val = dp->anadv_10hdx;
4438 break;
4439
4440 case ETHER_STAT_ADV_CAP_ASMPAUSE:
4441 val = BOOLEAN(dp->anadv_flow_control & 2);
4442 break;
4443
4444 case ETHER_STAT_ADV_CAP_PAUSE:
4445 val = BOOLEAN(dp->anadv_flow_control & 1);
4446 break;
4447
4448 case ETHER_STAT_ADV_CAP_AUTONEG:
4449 val = dp->anadv_autoneg;
4450 break;
4451
4452 case ETHER_STAT_LP_CAP_1000FDX:
4453 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4454 break;
4455
4456 case ETHER_STAT_LP_CAP_1000HDX:
4457 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4458 break;
4459
4460 case ETHER_STAT_LP_CAP_100FDX:
4461 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4462 break;
4463
4464 case ETHER_STAT_LP_CAP_100HDX:
4465 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4466 break;
4467
4468 case ETHER_STAT_LP_CAP_10FDX:
4469 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4470 break;
4471
4472 case ETHER_STAT_LP_CAP_10HDX:
4473 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4474 break;
4475
4476 case ETHER_STAT_LP_CAP_ASMPAUSE:
4477 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
4478 break;
4479
4480 case ETHER_STAT_LP_CAP_PAUSE:
4481 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4482 break;
4483
4484 case ETHER_STAT_LP_CAP_AUTONEG:
4485 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4486 break;
4487
4488 case ETHER_STAT_LINK_ASMPAUSE:
4489 val = BOOLEAN(dp->flow_control & 2);
4490 break;
4491
4492 case ETHER_STAT_LINK_PAUSE:
4493 val = BOOLEAN(dp->flow_control & 1);
4494 break;
4495
4496 case ETHER_STAT_LINK_AUTONEG:
4497 val = dp->anadv_autoneg &&
4498 BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4499 break;
4500
4501 case ETHER_STAT_LINK_DUPLEX:
4502 val = (dp->mii_state == MII_STATE_LINKUP) ?
4503 (dp->full_duplex ? 2 : 1) : 0;
4504 break;
4505
4506 case ETHER_STAT_TOOSHORT_ERRORS:
4507 val = gstp->runt;
4508 break;
4509 case ETHER_STAT_LP_REMFAULT:
4510 val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4511 break;
4512
4513 case ETHER_STAT_JABBER_ERRORS:
4514 val = gstp->jabber;
4515 break;
4516
4517 case ETHER_STAT_CAP_100T4:
4518 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4519 break;
4520
4521 case ETHER_STAT_ADV_CAP_100T4:
4522 val = dp->anadv_100t4;
4523 break;
4524
4525 case ETHER_STAT_LP_CAP_100T4:
4526 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4527 break;
4528
4529 default:
4530 #if GEM_DEBUG_LEVEL > 2
4531 cmn_err(CE_WARN,
4532 "%s: unrecognized parameter value = %d",
4533 __func__, stat);
4534 #endif
4535 return (ENOTSUP);
4536 }
4537
4538 *valp = val;
4539
4540 return (0);
4541 }
4542
4543 static int
gem_m_unicst(void * arg,const uint8_t * mac)4544 gem_m_unicst(void *arg, const uint8_t *mac)
4545 {
4546 int err = 0;
4547 struct gem_dev *dp = arg;
4548
4549 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4550
4551 mutex_enter(&dp->intrlock);
4552 if (dp->mac_suspended) {
4553 mutex_exit(&dp->intrlock);
4554 return (EIO);
4555 }
4556 bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4557 dp->rxmode |= RXMODE_ENABLE;
4558
4559 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4560 err = EIO;
4561 }
4562 mutex_exit(&dp->intrlock);
4563
4564 return (err);
4565 }
4566
4567 /*
4568 * gem_m_tx is used only for sending data packets into ethernet wire.
4569 */
4570 static mblk_t *
gem_m_tx(void * arg,mblk_t * mp)4571 gem_m_tx(void *arg, mblk_t *mp)
4572 {
4573 uint32_t flags = 0;
4574 struct gem_dev *dp = arg;
4575 mblk_t *tp;
4576
4577 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4578
4579 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4580 if (dp->mii_state != MII_STATE_LINKUP) {
4581 /* Some nics hate to send packets when the link is down. */
4582 while (mp) {
4583 tp = mp->b_next;
4584 mp->b_next = NULL;
4585 freemsg(mp);
4586 mp = tp;
4587 }
4588 return (NULL);
4589 }
4590
4591 return (gem_send_common(dp, mp, flags));
4592 }
4593
4594 static void
gem_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)4595 gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4596 {
4597 DPRINTF(0, (CE_CONT, "!%s: %s: called",
4598 ((struct gem_dev *)arg)->name, __func__));
4599
4600 gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4601 }
4602
4603 /* ARGSUSED */
4604 static boolean_t
gem_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)4605 gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4606 {
4607 return (B_FALSE);
4608 }
4609
4610 static void
gem_gld3_init(struct gem_dev * dp,mac_register_t * macp)4611 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4612 {
4613 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4614 macp->m_driver = dp;
4615 macp->m_dip = dp->dip;
4616 macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4617 macp->m_callbacks = &gem_m_callbacks;
4618 macp->m_min_sdu = 0;
4619 macp->m_max_sdu = dp->mtu;
4620
4621 if (dp->misc_flag & GEM_VLAN) {
4622 macp->m_margin = VTAG_SIZE;
4623 }
4624 }
4625
4626 /* ======================================================================== */
4627 /*
4628 * attach/detatch support
4629 */
4630 /* ======================================================================== */
4631 static void
gem_read_conf(struct gem_dev * dp)4632 gem_read_conf(struct gem_dev *dp)
4633 {
4634 int val;
4635
4636 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4637
4638 /*
4639 * Get media mode infomation from .conf file
4640 */
4641 dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4642 dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4643 dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4644 dp->anadv_100t4 = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4645 dp->anadv_100fdx = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4646 dp->anadv_100hdx = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4647 dp->anadv_10fdx = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4648 dp->anadv_10hdx = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4649
4650 if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4651 DDI_PROP_DONTPASS, "full-duplex"))) {
4652 dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4653 dp->anadv_autoneg = B_FALSE;
4654 if (dp->full_duplex) {
4655 dp->anadv_1000hdx = B_FALSE;
4656 dp->anadv_100hdx = B_FALSE;
4657 dp->anadv_10hdx = B_FALSE;
4658 } else {
4659 dp->anadv_1000fdx = B_FALSE;
4660 dp->anadv_100fdx = B_FALSE;
4661 dp->anadv_10fdx = B_FALSE;
4662 }
4663 }
4664
4665 if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4666 dp->anadv_autoneg = B_FALSE;
4667 switch (val) {
4668 case 1000:
4669 dp->speed = GEM_SPD_1000;
4670 dp->anadv_100t4 = B_FALSE;
4671 dp->anadv_100fdx = B_FALSE;
4672 dp->anadv_100hdx = B_FALSE;
4673 dp->anadv_10fdx = B_FALSE;
4674 dp->anadv_10hdx = B_FALSE;
4675 break;
4676 case 100:
4677 dp->speed = GEM_SPD_100;
4678 dp->anadv_1000fdx = B_FALSE;
4679 dp->anadv_1000hdx = B_FALSE;
4680 dp->anadv_10fdx = B_FALSE;
4681 dp->anadv_10hdx = B_FALSE;
4682 break;
4683 case 10:
4684 dp->speed = GEM_SPD_10;
4685 dp->anadv_1000fdx = B_FALSE;
4686 dp->anadv_1000hdx = B_FALSE;
4687 dp->anadv_100t4 = B_FALSE;
4688 dp->anadv_100fdx = B_FALSE;
4689 dp->anadv_100hdx = B_FALSE;
4690 break;
4691 default:
4692 cmn_err(CE_WARN,
4693 "!%s: property %s: illegal value:%d",
4694 dp->name, "speed", val);
4695 dp->anadv_autoneg = B_TRUE;
4696 break;
4697 }
4698 }
4699
4700 val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4701 if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4702 cmn_err(CE_WARN,
4703 "!%s: property %s: illegal value:%d",
4704 dp->name, "flow-control", val);
4705 } else {
4706 val = min(val, dp->gc.gc_flow_control);
4707 }
4708 dp->anadv_flow_control = val;
4709
4710 if (gem_prop_get_int(dp, "nointr", 0)) {
4711 dp->misc_flag |= GEM_NOINTR;
4712 cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4713 }
4714
4715 dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4716 dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4717 dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4718 dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4719 dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4720 }
4721
4722
4723 /*
4724 * Gem kstat support
4725 */
4726
4727 #define GEM_LOCAL_DATA_SIZE(gc) \
4728 (sizeof (struct gem_dev) + \
4729 sizeof (struct mcast_addr) * GEM_MAXMC + \
4730 sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4731 sizeof (void *) * ((gc)->gc_tx_buf_size))
4732
4733 struct gem_dev *
gem_do_attach(dev_info_t * dip,int port,struct gem_conf * gc,void * base,ddi_acc_handle_t * regs_handlep,void * lp,int lmsize)4734 gem_do_attach(dev_info_t *dip, int port,
4735 struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4736 void *lp, int lmsize)
4737 {
4738 struct gem_dev *dp;
4739 int i;
4740 ddi_iblock_cookie_t c;
4741 mac_register_t *macp = NULL;
4742 int ret;
4743 int unit;
4744 int nports;
4745
4746 unit = ddi_get_instance(dip);
4747 if ((nports = gc->gc_nports) == 0) {
4748 nports = 1;
4749 }
4750 if (nports == 1) {
4751 ddi_set_driver_private(dip, NULL);
4752 }
4753
4754 DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4755 unit));
4756
4757 /*
4758 * Allocate soft data structure
4759 */
4760 dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4761
4762 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4763 cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4764 unit, __func__);
4765 return (NULL);
4766 }
4767 /* ddi_set_driver_private(dip, dp); */
4768
4769 /* link to private area */
4770 dp->private = lp;
4771 dp->priv_size = lmsize;
4772 dp->mc_list = (struct mcast_addr *)&dp[1];
4773
4774 dp->dip = dip;
4775 (void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4776
4777 /*
4778 * Get iblock cookie
4779 */
4780 if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4781 cmn_err(CE_CONT,
4782 "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4783 dp->name);
4784 goto err_free_private;
4785 }
4786 dp->iblock_cookie = c;
4787
4788 /*
4789 * Initialize mutex's for this device.
4790 */
4791 mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4792 mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4793 cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4794
4795 /*
4796 * configure gem parameter
4797 */
4798 dp->base_addr = base;
4799 dp->regs_handle = *regs_handlep;
4800 dp->gc = *gc;
4801 gc = &dp->gc;
4802 /* patch for simplify dma resource management */
4803 gc->gc_tx_max_frags = 1;
4804 gc->gc_tx_max_descs_per_pkt = 1;
4805 gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4806 gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4807 gc->gc_tx_desc_write_oo = B_TRUE;
4808
4809 gc->gc_nports = nports; /* fix nports */
4810
4811 /* fix copy threadsholds */
4812 gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4813 gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4814
4815 /* fix rx buffer boundary for iocache line size */
4816 ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4817 ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4818 gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4819 gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4820
4821 /* fix descriptor boundary for cache line size */
4822 gc->gc_dma_attr_desc.dma_attr_align =
4823 max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
4824
4825 /* patch get_packet method */
4826 if (gc->gc_get_packet == NULL) {
4827 gc->gc_get_packet = &gem_get_packet_default;
4828 }
4829
4830 /* patch get_rx_start method */
4831 if (gc->gc_rx_start == NULL) {
4832 gc->gc_rx_start = &gem_rx_start_default;
4833 }
4834
4835 /* calculate descriptor area */
4836 if (gc->gc_rx_desc_unit_shift >= 0) {
4837 dp->rx_desc_size =
4838 ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4839 gc->gc_dma_attr_desc.dma_attr_align);
4840 }
4841 if (gc->gc_tx_desc_unit_shift >= 0) {
4842 dp->tx_desc_size =
4843 ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4844 gc->gc_dma_attr_desc.dma_attr_align);
4845 }
4846
4847 dp->mtu = ETHERMTU;
4848 dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4849 /* link tx buffers */
4850 for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4851 dp->tx_buf[i].txb_next =
4852 &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4853 }
4854
4855 dp->rxmode = 0;
4856 dp->speed = GEM_SPD_10; /* default is 10Mbps */
4857 dp->full_duplex = B_FALSE; /* default is half */
4858 dp->flow_control = FLOW_CONTROL_NONE;
4859 dp->poll_pkt_delay = 8; /* typical coalease for rx packets */
4860
4861 /* performance tuning parameters */
4862 dp->txthr = ETHERMAX; /* tx fifo threshold */
4863 dp->txmaxdma = 16*4; /* tx max dma burst size */
4864 dp->rxthr = 128; /* rx fifo threshold */
4865 dp->rxmaxdma = 16*4; /* rx max dma burst size */
4866
4867 /*
4868 * Get media mode information from .conf file
4869 */
4870 gem_read_conf(dp);
4871
4872 /* rx_buf_len is required buffer length without padding for alignment */
4873 dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4874
4875 /*
4876 * Reset the chip
4877 */
4878 mutex_enter(&dp->intrlock);
4879 dp->nic_state = NIC_STATE_STOPPED;
4880 ret = (*dp->gc.gc_reset_chip)(dp);
4881 mutex_exit(&dp->intrlock);
4882 if (ret != GEM_SUCCESS) {
4883 goto err_free_regs;
4884 }
4885
4886 /*
4887 * HW dependant paremeter initialization
4888 */
4889 mutex_enter(&dp->intrlock);
4890 ret = (*dp->gc.gc_attach_chip)(dp);
4891 mutex_exit(&dp->intrlock);
4892 if (ret != GEM_SUCCESS) {
4893 goto err_free_regs;
4894 }
4895
4896 #ifdef DEBUG_MULTIFRAGS
4897 dp->gc.gc_tx_copy_thresh = dp->mtu;
4898 #endif
4899 /* allocate tx and rx resources */
4900 if (gem_alloc_memory(dp)) {
4901 goto err_free_regs;
4902 }
4903
4904 DPRINTF(0, (CE_CONT,
4905 "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4906 dp->name, (long)dp->base_addr,
4907 dp->dev_addr.ether_addr_octet[0],
4908 dp->dev_addr.ether_addr_octet[1],
4909 dp->dev_addr.ether_addr_octet[2],
4910 dp->dev_addr.ether_addr_octet[3],
4911 dp->dev_addr.ether_addr_octet[4],
4912 dp->dev_addr.ether_addr_octet[5]));
4913
4914 /* copy mac address */
4915 dp->cur_addr = dp->dev_addr;
4916
4917 gem_gld3_init(dp, macp);
4918
4919 /* Probe MII phy (scan phy) */
4920 dp->mii_lpable = 0;
4921 dp->mii_advert = 0;
4922 dp->mii_exp = 0;
4923 dp->mii_ctl1000 = 0;
4924 dp->mii_stat1000 = 0;
4925 if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4926 goto err_free_ring;
4927 }
4928
4929 /* mask unsupported abilities */
4930 dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4931 dp->anadv_1000fdx &=
4932 BOOLEAN(dp->mii_xstatus &
4933 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4934 dp->anadv_1000hdx &=
4935 BOOLEAN(dp->mii_xstatus &
4936 (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4937 dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4938 dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4939 dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4940 dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4941 dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4942
4943 gem_choose_forcedmode(dp);
4944
4945 /* initialize MII phy if required */
4946 if (dp->gc.gc_mii_init) {
4947 if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4948 goto err_free_ring;
4949 }
4950 }
4951
4952 /*
4953 * initialize kstats including mii statistics
4954 */
4955 gem_nd_setup(dp);
4956
4957 /*
4958 * Add interrupt to system.
4959 */
4960 if (ret = mac_register(macp, &dp->mh)) {
4961 cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
4962 dp->name, ret);
4963 goto err_release_stats;
4964 }
4965 mac_free(macp);
4966 macp = NULL;
4967
4968 if (dp->misc_flag & GEM_SOFTINTR) {
4969 if (ddi_add_softintr(dip,
4970 DDI_SOFTINT_LOW, &dp->soft_id,
4971 NULL, NULL,
4972 (uint_t (*)(caddr_t))gem_intr,
4973 (caddr_t)dp) != DDI_SUCCESS) {
4974 cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
4975 dp->name);
4976 goto err_unregister;
4977 }
4978 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4979 if (ddi_add_intr(dip, 0, NULL, NULL,
4980 (uint_t (*)(caddr_t))gem_intr,
4981 (caddr_t)dp) != DDI_SUCCESS) {
4982 cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4983 goto err_unregister;
4984 }
4985 } else {
4986 /*
4987 * Dont use interrupt.
4988 * schedule first call of gem_intr_watcher
4989 */
4990 dp->intr_watcher_id =
4991 timeout((void (*)(void *))gem_intr_watcher,
4992 (void *)dp, drv_usectohz(3*1000000));
4993 }
4994
4995 /* link this device to dev_info */
4996 dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
4997 dp->port = port;
4998 ddi_set_driver_private(dip, (caddr_t)dp);
4999
5000 /* reset mii phy and start mii link watcher */
5001 gem_mii_start(dp);
5002
5003 DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
5004 return (dp);
5005
5006 err_unregister:
5007 (void) mac_unregister(dp->mh);
5008 err_release_stats:
5009 /* release NDD resources */
5010 gem_nd_cleanup(dp);
5011
5012 err_free_ring:
5013 gem_free_memory(dp);
5014 err_free_regs:
5015 ddi_regs_map_free(&dp->regs_handle);
5016 err_free_locks:
5017 mutex_destroy(&dp->xmitlock);
5018 mutex_destroy(&dp->intrlock);
5019 cv_destroy(&dp->tx_drain_cv);
5020 err_free_private:
5021 if (macp) {
5022 mac_free(macp);
5023 }
5024 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5025
5026 return (NULL);
5027 }
5028
5029 int
gem_do_detach(dev_info_t * dip)5030 gem_do_detach(dev_info_t *dip)
5031 {
5032 struct gem_dev *dp;
5033 struct gem_dev *tmp;
5034 caddr_t private;
5035 int priv_size;
5036 ddi_acc_handle_t rh;
5037
5038 dp = GEM_GET_DEV(dip);
5039 if (dp == NULL) {
5040 return (DDI_SUCCESS);
5041 }
5042
5043 rh = dp->regs_handle;
5044 private = dp->private;
5045 priv_size = dp->priv_size;
5046
5047 while (dp) {
5048 /* unregister with gld v3 */
5049 if (mac_unregister(dp->mh) != 0) {
5050 return (DDI_FAILURE);
5051 }
5052
5053 /* ensure any rx buffers are not used */
5054 if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5055 /* resource is busy */
5056 cmn_err(CE_PANIC,
5057 "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5058 dp->name, __func__,
5059 dp->rx_buf_allocated, dp->rx_buf_freecnt);
5060 /* NOT REACHED */
5061 }
5062
5063 /* stop mii link watcher */
5064 gem_mii_stop(dp);
5065
5066 /* unregister interrupt handler */
5067 if (dp->misc_flag & GEM_SOFTINTR) {
5068 ddi_remove_softintr(dp->soft_id);
5069 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5070 ddi_remove_intr(dip, 0, dp->iblock_cookie);
5071 } else {
5072 /* stop interrupt watcher */
5073 if (dp->intr_watcher_id) {
5074 while (untimeout(dp->intr_watcher_id) == -1)
5075 ;
5076 dp->intr_watcher_id = 0;
5077 }
5078 }
5079
5080 /* release NDD resources */
5081 gem_nd_cleanup(dp);
5082 /* release buffers, descriptors and dma resources */
5083 gem_free_memory(dp);
5084
5085 /* release locks and condition variables */
5086 mutex_destroy(&dp->xmitlock);
5087 mutex_destroy(&dp->intrlock);
5088 cv_destroy(&dp->tx_drain_cv);
5089
5090 /* release basic memory resources */
5091 tmp = dp->next;
5092 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5093 dp = tmp;
5094 }
5095
5096 /* release common private memory for the nic */
5097 kmem_free(private, priv_size);
5098
5099 /* release register mapping resources */
5100 ddi_regs_map_free(&rh);
5101
5102 DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5103 ddi_driver_name(dip), ddi_get_instance(dip)));
5104
5105 return (DDI_SUCCESS);
5106 }
5107
5108 int
gem_suspend(dev_info_t * dip)5109 gem_suspend(dev_info_t *dip)
5110 {
5111 struct gem_dev *dp;
5112
5113 /*
5114 * stop the device
5115 */
5116 dp = GEM_GET_DEV(dip);
5117 ASSERT(dp);
5118
5119 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5120
5121 for (; dp; dp = dp->next) {
5122
5123 /* stop mii link watcher */
5124 gem_mii_stop(dp);
5125
5126 /* stop interrupt watcher for no-intr mode */
5127 if (dp->misc_flag & GEM_NOINTR) {
5128 if (dp->intr_watcher_id) {
5129 while (untimeout(dp->intr_watcher_id) == -1)
5130 ;
5131 }
5132 dp->intr_watcher_id = 0;
5133 }
5134
5135 /* stop tx timeout watcher */
5136 if (dp->timeout_id) {
5137 while (untimeout(dp->timeout_id) == -1)
5138 ;
5139 dp->timeout_id = 0;
5140 }
5141
5142 /* make the nic state inactive */
5143 mutex_enter(&dp->intrlock);
5144 (void) gem_mac_stop(dp, 0);
5145 ASSERT(!dp->mac_active);
5146
5147 /* no further register access */
5148 dp->mac_suspended = B_TRUE;
5149 mutex_exit(&dp->intrlock);
5150 }
5151
5152 /* XXX - power down the nic */
5153
5154 return (DDI_SUCCESS);
5155 }
5156
5157 int
gem_resume(dev_info_t * dip)5158 gem_resume(dev_info_t *dip)
5159 {
5160 struct gem_dev *dp;
5161
5162 /*
5163 * restart the device
5164 */
5165 dp = GEM_GET_DEV(dip);
5166 ASSERT(dp);
5167
5168 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5169
5170 for (; dp; dp = dp->next) {
5171
5172 /*
5173 * Bring up the nic after power up
5174 */
5175
5176 /* gem_xxx.c layer to setup power management state. */
5177 ASSERT(!dp->mac_active);
5178
5179 /* reset the chip, because we are just after power up. */
5180 mutex_enter(&dp->intrlock);
5181
5182 dp->mac_suspended = B_FALSE;
5183 dp->nic_state = NIC_STATE_STOPPED;
5184
5185 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5186 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5187 dp->name, __func__);
5188 mutex_exit(&dp->intrlock);
5189 goto err;
5190 }
5191 mutex_exit(&dp->intrlock);
5192
5193 /* initialize mii phy because we are just after power up */
5194 if (dp->gc.gc_mii_init) {
5195 (void) (*dp->gc.gc_mii_init)(dp);
5196 }
5197
5198 if (dp->misc_flag & GEM_NOINTR) {
5199 /*
5200 * schedule first call of gem_intr_watcher
5201 * instead of interrupts.
5202 */
5203 dp->intr_watcher_id =
5204 timeout((void (*)(void *))gem_intr_watcher,
5205 (void *)dp, drv_usectohz(3*1000000));
5206 }
5207
5208 /* restart mii link watcher */
5209 gem_mii_start(dp);
5210
5211 /* restart mac */
5212 mutex_enter(&dp->intrlock);
5213
5214 if (gem_mac_init(dp) != GEM_SUCCESS) {
5215 mutex_exit(&dp->intrlock);
5216 goto err_reset;
5217 }
5218 dp->nic_state = NIC_STATE_INITIALIZED;
5219
5220 /* setup media mode if the link have been up */
5221 if (dp->mii_state == MII_STATE_LINKUP) {
5222 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5223 mutex_exit(&dp->intrlock);
5224 goto err_reset;
5225 }
5226 }
5227
5228 /* enable mac address and rx filter */
5229 dp->rxmode |= RXMODE_ENABLE;
5230 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5231 mutex_exit(&dp->intrlock);
5232 goto err_reset;
5233 }
5234 dp->nic_state = NIC_STATE_ONLINE;
5235
5236 /* restart tx timeout watcher */
5237 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5238 (void *)dp,
5239 dp->gc.gc_tx_timeout_interval);
5240
5241 /* now the nic is fully functional */
5242 if (dp->mii_state == MII_STATE_LINKUP) {
5243 if (gem_mac_start(dp) != GEM_SUCCESS) {
5244 mutex_exit(&dp->intrlock);
5245 goto err_reset;
5246 }
5247 }
5248 mutex_exit(&dp->intrlock);
5249 }
5250
5251 return (DDI_SUCCESS);
5252
5253 err_reset:
5254 if (dp->intr_watcher_id) {
5255 while (untimeout(dp->intr_watcher_id) == -1)
5256 ;
5257 dp->intr_watcher_id = 0;
5258 }
5259 mutex_enter(&dp->intrlock);
5260 (*dp->gc.gc_reset_chip)(dp);
5261 dp->nic_state = NIC_STATE_STOPPED;
5262 mutex_exit(&dp->intrlock);
5263
5264 err:
5265 return (DDI_FAILURE);
5266 }
5267
5268 /*
5269 * misc routines for PCI
5270 */
5271 uint8_t
gem_search_pci_cap(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint8_t target)5272 gem_search_pci_cap(dev_info_t *dip,
5273 ddi_acc_handle_t conf_handle, uint8_t target)
5274 {
5275 uint8_t pci_cap_ptr;
5276 uint32_t pci_cap;
5277
5278 /* search power management capablities */
5279 pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5280 while (pci_cap_ptr) {
5281 /* read pci capability header */
5282 pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5283 if ((pci_cap & 0xff) == target) {
5284 /* found */
5285 break;
5286 }
5287 /* get next_ptr */
5288 pci_cap_ptr = (pci_cap >> 8) & 0xff;
5289 }
5290 return (pci_cap_ptr);
5291 }
5292
5293 int
gem_pci_set_power_state(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint_t new_mode)5294 gem_pci_set_power_state(dev_info_t *dip,
5295 ddi_acc_handle_t conf_handle, uint_t new_mode)
5296 {
5297 uint8_t pci_cap_ptr;
5298 uint32_t pmcsr;
5299 uint_t unit;
5300 const char *drv_name;
5301
5302 ASSERT(new_mode < 4);
5303
5304 unit = ddi_get_instance(dip);
5305 drv_name = ddi_driver_name(dip);
5306
5307 /* search power management capablities */
5308 pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5309
5310 if (pci_cap_ptr == 0) {
5311 cmn_err(CE_CONT,
5312 "!%s%d: doesn't have pci power management capability",
5313 drv_name, unit);
5314 return (DDI_FAILURE);
5315 }
5316
5317 /* read power management capabilities */
5318 pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5319
5320 DPRINTF(0, (CE_CONT,
5321 "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5322 drv_name, unit, pci_cap_ptr, pmcsr));
5323
5324 /*
5325 * Is the resuested power mode supported?
5326 */
5327 /* not yet */
5328
5329 /*
5330 * move to new mode
5331 */
5332 pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5333 pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5334
5335 return (DDI_SUCCESS);
5336 }
5337
5338 /*
5339 * select suitable register for by specified address space or register
5340 * offset in PCI config space
5341 */
5342 int
gem_pci_regs_map_setup(dev_info_t * dip,uint32_t which,uint32_t mask,struct ddi_device_acc_attr * attrp,caddr_t * basep,ddi_acc_handle_t * hp)5343 gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5344 struct ddi_device_acc_attr *attrp,
5345 caddr_t *basep, ddi_acc_handle_t *hp)
5346 {
5347 struct pci_phys_spec *regs;
5348 uint_t len;
5349 uint_t unit;
5350 uint_t n;
5351 uint_t i;
5352 int ret;
5353 const char *drv_name;
5354
5355 unit = ddi_get_instance(dip);
5356 drv_name = ddi_driver_name(dip);
5357
5358 /* Search IO-range or memory-range to be mapped */
5359 regs = NULL;
5360 len = 0;
5361
5362 if ((ret = ddi_prop_lookup_int_array(
5363 DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5364 "reg", (void *)®s, &len)) != DDI_PROP_SUCCESS) {
5365 cmn_err(CE_WARN,
5366 "!%s%d: failed to get reg property (ret:%d)",
5367 drv_name, unit, ret);
5368 return (DDI_FAILURE);
5369 }
5370 n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5371
5372 ASSERT(regs != NULL && len > 0);
5373
5374 #if GEM_DEBUG_LEVEL > 0
5375 for (i = 0; i < n; i++) {
5376 cmn_err(CE_CONT,
5377 "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5378 drv_name, unit, i,
5379 regs[i].pci_phys_hi,
5380 regs[i].pci_phys_mid,
5381 regs[i].pci_phys_low,
5382 regs[i].pci_size_hi,
5383 regs[i].pci_size_low);
5384 }
5385 #endif
5386 for (i = 0; i < n; i++) {
5387 if ((regs[i].pci_phys_hi & mask) == which) {
5388 /* it's the requested space */
5389 ddi_prop_free(regs);
5390 goto address_range_found;
5391 }
5392 }
5393 ddi_prop_free(regs);
5394 return (DDI_FAILURE);
5395
5396 address_range_found:
5397 if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5398 != DDI_SUCCESS) {
5399 cmn_err(CE_CONT,
5400 "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5401 drv_name, unit, ret);
5402 }
5403
5404 return (ret);
5405 }
5406
5407 void
gem_mod_init(struct dev_ops * dop,char * name)5408 gem_mod_init(struct dev_ops *dop, char *name)
5409 {
5410 mac_init_ops(dop, name);
5411 }
5412
5413 void
gem_mod_fini(struct dev_ops * dop)5414 gem_mod_fini(struct dev_ops *dop)
5415 {
5416 mac_fini_ops(dop);
5417 }
5418