1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Copyright (c) 2001-2006 Advanced Micro Devices, Inc.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are met:
34  *
35  * + Redistributions of source code must retain the above copyright notice,
36  * + this list of conditions and the following disclaimer.
37  *
38  * + Redistributions in binary form must reproduce the above copyright
39  * + notice, this list of conditions and the following disclaimer in the
40  * + documentation and/or other materials provided with the distribution.
41  *
42  * + Neither the name of Advanced Micro Devices, Inc. nor the names of its
43  * + contributors may be used to endorse or promote products derived from
44  * + this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
47  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
48  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
49  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
50  * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. OR
51  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
56  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
58  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  *
60  * Import/Export/Re-Export/Use/Release/Transfer Restrictions and
61  * Compliance with Applicable Laws.  Notice is hereby given that
62  * the software may be subject to restrictions on use, release,
63  * transfer, importation, exportation and/or re-exportation under
64  * the laws and regulations of the United States or other
65  * countries ("Applicable Laws"), which include but are not
66  * limited to U.S. export control laws such as the Export
67  * Administration Regulations and national security controls as
68  * defined thereunder, as well as State Department controls under
69  * the U.S. Munitions List.  Permission to use and/or
70  * redistribute the software is conditioned upon compliance with
71  * all Applicable Laws, including U.S. export control laws
72  * regarding specifically designated persons, countries and
73  * nationals of countries subject to national security controls.
74  */
75 
76 
77 #pragma ident "@(#)$RCSfile: solaris_odl.c,v $ $Revision: 1.3 $ " \
78 " $Date: 2004/04/22 15:22:54 $ AMD"
79 
80 
81 /* include files */
82 #include <sys/disp.h>
83 #include <sys/atomic.h>
84 #include <sys/vlan.h>
85 #include "amd8111s_main.h"
86 
87 /* Global macro Definations */
88 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
89 #define	INTERFACE_NAME "amd8111s"
90 #define	AMD8111S_SPLIT	128
91 #define	AMD8111S_SEND_MAX	64
92 
93 static char ident[] = "AMD8111 10/100M Ethernet 1.0";
94 
95 /*
96  * Driver Entry Points
97  */
98 static int amd8111s_attach(dev_info_t *, ddi_attach_cmd_t);
99 static int amd8111s_detach(dev_info_t *, ddi_detach_cmd_t);
100 
101 /*
102  * GLD Entry points prototype
103  */
104 static int amd8111s_m_unicst(void *, const uint8_t *);
105 static int amd8111s_m_promisc(void *, boolean_t);
106 static int amd8111s_m_stat(void *, uint_t, uint64_t *);
107 static void amd8111s_m_resources(void *arg);
108 static void amd8111s_m_ioctl(void *, queue_t *, mblk_t *);
109 static int amd8111s_m_multicst(void *, boolean_t, const uint8_t *addr);
110 static int amd8111s_m_start(void *);
111 static void amd8111s_m_stop(void *);
112 static mblk_t *amd8111s_m_tx(void *, mblk_t *mp);
113 static uint_t amd8111s_intr(caddr_t);
114 
115 static int amd8111s_unattach(dev_info_t *, struct LayerPointers *);
116 
117 static boolean_t amd8111s_allocate_buffers(struct LayerPointers *);
118 static int amd8111s_odlInit(struct LayerPointers *);
119 static boolean_t amd8111s_allocate_descriptors(struct LayerPointers *);
120 static void amd8111s_free_descriptors(struct LayerPointers *);
121 static boolean_t amd8111s_alloc_dma_ringbuf(struct LayerPointers *,
122 		struct amd8111s_dma_ringbuf *, uint32_t, uint32_t);
123 static void amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *);
124 
125 
126 static void amd8111s_log(struct LayerPointers *adapter, int level,
127     char *fmt, ...);
128 
129 static struct cb_ops amd8111s_cb_ops = {
130 	nulldev,
131 	nulldev,
132 	nodev,
133 	nodev,
134 	nodev,
135 	nodev,
136 	nodev,
137 	nodev,
138 	nodev,
139 	nodev,
140 	nodev,
141 	nochpoll,
142 	ddi_prop_op,
143 	NULL,
144 	D_NEW | D_MP,
145 	CB_REV,		/* cb_rev */
146 	nodev,		/* cb_aread */
147 	nodev		/* cb_awrite */
148 };
149 
150 static struct dev_ops amd8111s_dev_ops = {
151 	DEVO_REV,		/* devo_rev */
152 	0,			/* devo_refcnt */
153 	NULL,			/* devo_getinfo */
154 	nulldev,		/* devo_identify */
155 	nulldev,		/* devo_probe */
156 	amd8111s_attach,	/* devo_attach */
157 	amd8111s_detach,	/* devo_detach */
158 	nodev,			/* devo_reset */
159 	&amd8111s_cb_ops,	/* devo_cb_ops */
160 	NULL,			/* devo_bus_ops */
161 	nodev
162 };
163 
164 struct modldrv amd8111s_modldrv = {
165 	&mod_driverops,		/* Type of module. This one is a driver */
166 	ident,			/* short description */
167 	&amd8111s_dev_ops	/* driver specific ops */
168 };
169 
170 struct modlinkage amd8111s_modlinkage = {
171 	MODREV_1, (void *)&amd8111s_modldrv, NULL
172 };
173 
174 /*
175  * Global Variables
176  */
177 struct LayerPointers *amd8111sadapter;
178 
179 static ddi_dma_attr_t pcn_buff_dma_attr_t = {
180 	DMA_ATTR_V0,	/* dma_attr_version */
181 	(uint64_t)0,		/* dma_attr_addr_lo */
182 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
183 	(uint64_t)0xFFFFFFFF,	/* dma_attr_count_max */
184 	(uint64_t)1,		/* dma_attr_align */
185 	(uint_t)0x7F,		/* dma_attr_burstsizes */
186 	(uint32_t)1,		/* dma_attr_minxfer */
187 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
188 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
189 	(int)1,			/* dma_attr_sgllen */
190 	(uint32_t)1,		/* granularity */
191 	(uint_t)0		/* dma_attr_flags */
192 };
193 
194 static ddi_dma_attr_t pcn_desc_dma_attr_t = {
195 	DMA_ATTR_V0,		/* dma_attr_version */
196 	(uint64_t)0,		/* dma_attr_addr_lo */
197 	(uint64_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
198 	(uint64_t)0x7FFFFFFF,	/* dma_attr_count_max */
199 	(uint64_t)0x10,		/* dma_attr_align */
200 	(uint_t)0xFFFFFFFFU,	/* dma_attr_burstsizes */
201 	(uint32_t)1,		/* dma_attr_minxfer */
202 	(uint64_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
203 	(uint64_t)0xFFFFFFFF,	/* dma_attr_seg */
204 	(int)1,			/* dma_attr_sgllen */
205 	(uint32_t)1,		/* granularity */
206 	(uint_t)0		/* dma_attr_flags */
207 };
208 
209 /* PIO access attributes for registers */
210 static ddi_device_acc_attr_t pcn_acc_attr = {
211 	DDI_DEVICE_ATTR_V0,
212 	DDI_STRUCTURE_LE_ACC,
213 	DDI_STRICTORDER_ACC
214 };
215 
216 #define	AMD8111S_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL)
217 
218 
219 static mac_callbacks_t amd8111s_m_callbacks = {
220 	AMD8111S_M_CALLBACK_FLAGS,
221 	amd8111s_m_stat,
222 	amd8111s_m_start,
223 	amd8111s_m_stop,
224 	amd8111s_m_promisc,
225 	amd8111s_m_multicst,
226 	amd8111s_m_unicst,
227 	amd8111s_m_tx,
228 	amd8111s_m_resources,
229 	amd8111s_m_ioctl
230 };
231 
232 
233 /*
234  * Standard Driver Load Entry Point
235  * It will be called at load time of driver.
236  */
237 int
238 _init()
239 {
240 	int status;
241 	mac_init_ops(&amd8111s_dev_ops, "amd8111s");
242 
243 	status = mod_install(&amd8111s_modlinkage);
244 	if (status != DDI_SUCCESS) {
245 		mac_fini_ops(&amd8111s_dev_ops);
246 	}
247 
248 	return (status);
249 }
250 
251 /*
252  * Standard Driver Entry Point for Query.
253  * It will be called at any time to get Driver info.
254  */
255 int
256 _info(struct modinfo *modinfop)
257 {
258 	return (mod_info(&amd8111s_modlinkage, modinfop));
259 }
260 
261 /*
262  *	Standard Driver Entry Point for Unload.
263  *	It will be called at unload time of driver.
264  */
265 int
266 _fini()
267 {
268 	int status;
269 
270 	status = mod_remove(&amd8111s_modlinkage);
271 	if (status == DDI_SUCCESS) {
272 		mac_fini_ops(&amd8111s_dev_ops);
273 	}
274 
275 	return (status);
276 }
277 
278 /* Adjust Interrupt Coalescing Register to coalesce interrupts */
279 static void
280 amd8111s_m_blank(void *arg, time_t ticks, uint32_t count)
281 {
282 	_NOTE(ARGUNUSED(arg, ticks, count));
283 }
284 
285 static void
286 amd8111s_m_resources(void *arg)
287 {
288 	struct LayerPointers *adapter = arg;
289 	mac_rx_fifo_t mrf;
290 
291 	mrf.mrf_type = MAC_RX_FIFO;
292 	mrf.mrf_blank = amd8111s_m_blank;
293 	mrf.mrf_arg = (void *)adapter;
294 	mrf.mrf_normal_blank_time = 128;
295 	mrf.mrf_normal_pkt_count = 8;
296 
297 	adapter->pOdl->mrh = mac_resource_add(adapter->pOdl->mh,
298 	    (mac_resource_t *)&mrf);
299 }
300 
301 /*
302  * Loopback Support
303  */
304 static lb_property_t loopmodes[] = {
305 	{ normal,	"normal",	AMD8111S_LB_NONE		},
306 	{ external,	"100Mbps",	AMD8111S_LB_EXTERNAL_100	},
307 	{ external,	"10Mbps",	AMD8111S_LB_EXTERNAL_10		},
308 	{ internal,	"MAC",		AMD8111S_LB_INTERNAL_MAC	}
309 };
310 
311 static void
312 amd8111s_set_loop_mode(struct LayerPointers *adapter, uint32_t mode)
313 {
314 
315 	/*
316 	 * If the mode isn't being changed, there's nothing to do ...
317 	 */
318 	if (mode == adapter->pOdl->loopback_mode)
319 		return;
320 
321 	/*
322 	 * Validate the requested mode and prepare a suitable message
323 	 * to explain the link down/up cycle that the change will
324 	 * probably induce ...
325 	 */
326 	switch (mode) {
327 	default:
328 		return;
329 
330 	case AMD8111S_LB_NONE:
331 		mdlStopChip(adapter);
332 		if (adapter->pOdl->loopback_mode == AMD8111S_LB_INTERNAL_MAC) {
333 			cmn_err(CE_NOTE, "LB_NONE restored from Interanl LB");
334 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
335 			    INLOOP);
336 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
337 			    FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
338 		} else {
339 			cmn_err(CE_NOTE, "LB_NONE restored from Exteranl LB");
340 			WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
341 			    EXLOOP);
342 		}
343 
344 		amd8111s_reset(adapter);
345 		adapter->pOdl->LinkStatus = LINK_STATE_DOWN;
346 		adapter->pOdl->rx_fcs_stripped = B_FALSE;
347 		mdlStartChip(adapter);
348 		break;
349 
350 	case AMD8111S_LB_EXTERNAL_100:
351 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_100");
352 		mdlStopChip(adapter);
353 		amd8111s_reset(adapter);
354 		SetIntrCoalesc(adapter, B_FALSE);
355 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_100);
356 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
357 		    VAL0 | EXLOOP);
358 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
359 		adapter->pMdl->Speed = 100;
360 		adapter->pMdl->FullDuplex = B_TRUE;
361 		/* Tell GLD the state of the physical link. */
362 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
363 
364 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
365 
366 		mdlStartChip(adapter);
367 		break;
368 
369 	case AMD8111S_LB_EXTERNAL_10:
370 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_EXTERNAL_10");
371 		mdlStopChip(adapter);
372 		amd8111s_reset(adapter);
373 		SetIntrCoalesc(adapter, B_FALSE);
374 		mdlPHYAutoNegotiation(adapter, PHY_FORCE_FD_10);
375 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
376 		    VAL0 | EXLOOP);
377 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
378 		adapter->pMdl->Speed = 10;
379 		adapter->pMdl->FullDuplex = B_TRUE;
380 		/* Tell GLD the state of the physical link. */
381 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
382 
383 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
384 
385 		mdlStartChip(adapter);
386 		break;
387 
388 	case AMD8111S_LB_INTERNAL_MAC:
389 		cmn_err(CE_NOTE, "amd8111s_set_loop_mode LB_INTERNAL_MAC");
390 		mdlStopChip(adapter);
391 		amd8111s_reset(adapter);
392 		SetIntrCoalesc(adapter, B_FALSE);
393 		/* Disable Port Manager */
394 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
395 		    EN_PMGR);
396 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD2,
397 		    VAL0 | INLOOP);
398 
399 		WRITE_REG32(adapter, adapter->pMdl->Mem_Address + CMD3,
400 		    VAL1 | FORCE_FULL_DUPLEX | FORCE_LINK_STATUS);
401 
402 		adapter->pOdl->LinkStatus = LINK_STATE_UP;
403 		adapter->pMdl->FullDuplex = B_TRUE;
404 		/* Tell GLD the state of the physical link. */
405 		mac_link_update(adapter->pOdl->mh, LINK_STATE_UP);
406 
407 		adapter->pOdl->rx_fcs_stripped = B_TRUE;
408 
409 		mdlStartChip(adapter);
410 		break;
411 	}
412 
413 	/*
414 	 * All OK; tell the caller to reprogram
415 	 * the PHY and/or MAC for the new mode ...
416 	 */
417 	adapter->pOdl->loopback_mode = mode;
418 }
419 
420 static enum ioc_reply
421 amd8111s_loopback_ioctl(struct LayerPointers *adapter, struct iocblk *iocp,
422     mblk_t *mp)
423 {
424 	lb_info_sz_t *lbsp;
425 	lb_property_t *lbpp;
426 	uint32_t *lbmp;
427 	int cmd;
428 
429 	/*
430 	 * Validate format of ioctl
431 	 */
432 	if (mp->b_cont == NULL)
433 		return (IOC_INVAL);
434 
435 	cmd = iocp->ioc_cmd;
436 	switch (cmd) {
437 	default:
438 		/* NOTREACHED */
439 		amd8111s_log(adapter, CE_NOTE,
440 		    "amd8111s_loop_ioctl: invalid cmd 0x%x", cmd);
441 		return (IOC_INVAL);
442 
443 	case LB_GET_INFO_SIZE:
444 		if (iocp->ioc_count != sizeof (lb_info_sz_t)) {
445 			amd8111s_log(adapter, CE_NOTE,
446 			    "wrong LB_GET_INFO_SIZE size");
447 			return (IOC_INVAL);
448 		}
449 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
450 		*lbsp = sizeof (loopmodes);
451 		break;
452 
453 	case LB_GET_INFO:
454 		if (iocp->ioc_count != sizeof (loopmodes)) {
455 			amd8111s_log(adapter, CE_NOTE,
456 			    "Wrong LB_GET_INFO size");
457 			return (IOC_INVAL);
458 		}
459 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
460 		bcopy(loopmodes, lbpp, sizeof (loopmodes));
461 		break;
462 
463 	case LB_GET_MODE:
464 		if (iocp->ioc_count != sizeof (uint32_t)) {
465 			amd8111s_log(adapter, CE_NOTE,
466 			    "Wrong LB_GET_MODE size");
467 			return (IOC_INVAL);
468 		}
469 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
470 		*lbmp = adapter->pOdl->loopback_mode;
471 		break;
472 
473 	case LB_SET_MODE:
474 		if (iocp->ioc_count != sizeof (uint32_t)) {
475 			amd8111s_log(adapter, CE_NOTE,
476 			    "Wrong LB_SET_MODE size");
477 			return (IOC_INVAL);
478 		}
479 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
480 		amd8111s_set_loop_mode(adapter, *lbmp);
481 		break;
482 	}
483 	return (IOC_REPLY);
484 }
485 
486 static void
487 amd8111s_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
488 {
489 	struct iocblk *iocp;
490 	struct LayerPointers *adapter;
491 	enum ioc_reply status;
492 
493 	iocp = (struct iocblk *)mp->b_rptr;
494 	iocp->ioc_error = 0;
495 	adapter = (struct LayerPointers *)arg;
496 
497 	ASSERT(adapter);
498 	if (adapter == NULL) {
499 		miocnak(q, mp, 0, EINVAL);
500 		return;
501 	}
502 
503 	switch (iocp->ioc_cmd) {
504 
505 	case LB_GET_INFO_SIZE:
506 	case LB_GET_INFO:
507 	case LB_GET_MODE:
508 	case LB_SET_MODE:
509 		status = amd8111s_loopback_ioctl(adapter, iocp, mp);
510 		break;
511 
512 	default:
513 		status = IOC_INVAL;
514 		break;
515 	}
516 
517 	/*
518 	 * Decide how to reply
519 	 */
520 	switch (status) {
521 	default:
522 	case IOC_INVAL:
523 		/*
524 		 * Error, reply with a NAK and EINVAL or the specified error
525 		 */
526 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
527 		    EINVAL : iocp->ioc_error);
528 		break;
529 
530 	case IOC_DONE:
531 		/*
532 		 * OK, reply already sent
533 		 */
534 		break;
535 
536 	case IOC_ACK:
537 		/*
538 		 * OK, reply with an ACK
539 		 */
540 		miocack(q, mp, 0, 0);
541 		break;
542 
543 	case IOC_REPLY:
544 		/*
545 		 * OK, send prepared reply as ACK or NAK
546 		 */
547 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
548 		    M_IOCACK : M_IOCNAK;
549 		qreply(q, mp);
550 		break;
551 	}
552 }
553 
554 /*
555  * Copy one packet from dma memory to mblk. Inc dma descriptor pointer.
556  */
557 static boolean_t
558 amd8111s_recv_copy(struct LayerPointers *pLayerPointers, mblk_t **last_mp)
559 {
560 	int length = 0;
561 	mblk_t *mp;
562 	struct rx_desc *descriptor;
563 	struct odl *pOdl = pLayerPointers->pOdl;
564 	struct amd8111s_statistics *statistics = &pOdl->statistics;
565 	struct nonphysical *pNonphysical = pLayerPointers->pMil
566 	    ->pNonphysical;
567 
568 	mutex_enter(&pOdl->mdlRcvLock);
569 	descriptor = pNonphysical->RxBufDescQRead->descriptor;
570 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle,
571 	    pNonphysical->RxBufDescQRead->descriptor -
572 	    pNonphysical->RxBufDescQStart->descriptor,
573 	    sizeof (struct rx_desc), DDI_DMA_SYNC_FORCPU);
574 	if ((descriptor->Rx_OWN) == 0) {
575 	/*
576 	 * If the frame is received with errors, then set MCNT
577 	 * of that pkt in ReceiveArray to 0. This packet would
578 	 * be discarded later and not indicated to OS.
579 	 */
580 		if (descriptor->Rx_ERR) {
581 			statistics->rx_desc_err ++;
582 			descriptor->Rx_ERR = 0;
583 			if (descriptor->Rx_FRAM == 1) {
584 				statistics->rx_desc_err_FRAM ++;
585 				descriptor->Rx_FRAM = 0;
586 			}
587 			if (descriptor->Rx_OFLO == 1) {
588 				statistics->rx_desc_err_OFLO ++;
589 				descriptor->Rx_OFLO = 0;
590 				pOdl->rx_overflow_counter ++;
591 				if ((pOdl->rx_overflow_counter > 5) &&
592 				    (pOdl->pause_interval == 0)) {
593 					statistics->rx_double_overflow ++;
594 					mdlSendPause(pLayerPointers);
595 					pOdl->rx_overflow_counter = 0;
596 					pOdl->pause_interval = 25;
597 				}
598 			}
599 			if (descriptor->Rx_CRC == 1) {
600 				statistics->rx_desc_err_CRC ++;
601 				descriptor->Rx_CRC = 0;
602 			}
603 			if (descriptor->Rx_BUFF == 1) {
604 				statistics->rx_desc_err_BUFF ++;
605 				descriptor->Rx_BUFF = 0;
606 			}
607 			goto Next_Descriptor;
608 		}
609 
610 		/* Length of incoming packet */
611 		if (pOdl->rx_fcs_stripped) {
612 			length = descriptor->Rx_MCNT -4;
613 		} else {
614 			length = descriptor->Rx_MCNT;
615 		}
616 		if (length < 62) {
617 			statistics->rx_error_zerosize ++;
618 		}
619 
620 		if ((mp = allocb(length, BPRI_MED)) == NULL) {
621 			statistics->rx_allocfail ++;
622 			goto failed;
623 		}
624 		/* Copy from virtual address of incoming packet */
625 		bcopy((long *)*(pNonphysical->RxBufDescQRead->USpaceMap),
626 		    mp->b_rptr, length);
627 		mp->b_wptr = mp->b_rptr + length;
628 		statistics->rx_ok_packets ++;
629 		if (*last_mp == NULL) {
630 			*last_mp = mp;
631 		} else {
632 			(*last_mp)->b_next = mp;
633 			*last_mp = mp;
634 		}
635 
636 Next_Descriptor:
637 		descriptor->Rx_MCNT = 0;
638 		descriptor->Rx_SOP = 0;
639 		descriptor->Rx_EOP = 0;
640 		descriptor->Rx_PAM = 0;
641 		descriptor->Rx_BAM = 0;
642 		descriptor->TT = 0;
643 		descriptor->Rx_OWN = 1;
644 		pNonphysical->RxBufDescQRead->descriptor++;
645 		pNonphysical->RxBufDescQRead->USpaceMap++;
646 		if (pNonphysical->RxBufDescQRead->descriptor >
647 		    pNonphysical->RxBufDescQEnd->descriptor) {
648 			pNonphysical->RxBufDescQRead->descriptor =
649 			    pNonphysical->RxBufDescQStart->descriptor;
650 			pNonphysical->RxBufDescQRead->USpaceMap =
651 			    pNonphysical->RxBufDescQStart->USpaceMap;
652 		}
653 		mutex_exit(&pOdl->mdlRcvLock);
654 
655 		return (B_TRUE);
656 	}
657 
658 failed:
659 	mutex_exit(&pOdl->mdlRcvLock);
660 	return (B_FALSE);
661 }
662 
663 /*
664  * Get the received packets from NIC card and send them to GLD.
665  */
666 static void
667 amd8111s_receive(struct LayerPointers *pLayerPointers)
668 {
669 	int numOfPkts = 0;
670 	struct odl *pOdl;
671 	mblk_t *ret_mp = NULL, *last_mp = NULL;
672 
673 	pOdl = pLayerPointers->pOdl;
674 
675 	rw_enter(&pOdl->chip_lock, RW_READER);
676 	if (!pLayerPointers->run) {
677 		rw_exit(&pOdl->chip_lock);
678 		return;
679 	}
680 
681 	if (pOdl->pause_interval > 0)
682 		pOdl->pause_interval --;
683 
684 	while (numOfPkts < RX_RING_SIZE) {
685 
686 		if (!amd8111s_recv_copy(pLayerPointers, &last_mp)) {
687 			break;
688 		}
689 		if (ret_mp == NULL)
690 			ret_mp = last_mp;
691 		numOfPkts++;
692 	}
693 
694 	if (ret_mp) {
695 		mac_rx(pOdl->mh, pOdl->mrh, ret_mp);
696 	}
697 
698 	(void) ddi_dma_sync(pOdl->rx_desc_dma_handle, 0, 0,
699 	    DDI_DMA_SYNC_FORDEV);
700 
701 	mdlReceive(pLayerPointers);
702 
703 	rw_exit(&pOdl->chip_lock);
704 
705 }
706 
707 /*
708  * Print message in release-version driver.
709  */
710 static void
711 amd8111s_log(struct LayerPointers *adapter, int level, char *fmt, ...)
712 {
713 	auto char name[32];
714 	auto char buf[256];
715 	va_list ap;
716 
717 	if (adapter != NULL) {
718 		(void) sprintf(name, "amd8111s%d",
719 		    ddi_get_instance(adapter->pOdl->devinfo));
720 	} else {
721 		(void) sprintf(name, "amd8111s");
722 	}
723 	va_start(ap, fmt);
724 	(void) vsprintf(buf, fmt, ap);
725 	va_end(ap);
726 	cmn_err(level, "%s: %s", name, buf);
727 }
728 
729 /*
730  * To allocate & initilize all resources.
731  * Called by amd8111s_attach().
732  */
733 static int
734 amd8111s_odlInit(struct LayerPointers *pLayerPointers)
735 {
736 	unsigned long mem_req_array[MEM_REQ_MAX];
737 	unsigned long mem_set_array[MEM_REQ_MAX];
738 	unsigned long *pmem_req_array;
739 	unsigned long *pmem_set_array;
740 	int i, size;
741 
742 	for (i = 0; i < MEM_REQ_MAX; i++) {
743 		mem_req_array[i] = 0;
744 		mem_set_array[i] = 0;
745 	}
746 
747 	milRequestResources(mem_req_array);
748 
749 	pmem_req_array = mem_req_array;
750 	pmem_set_array = mem_set_array;
751 	while (*pmem_req_array) {
752 		switch (*pmem_req_array) {
753 		case VIRTUAL:
754 			*pmem_set_array = VIRTUAL;
755 			pmem_req_array++;
756 			pmem_set_array++;
757 			*(pmem_set_array) = *(pmem_req_array);
758 			pmem_set_array++;
759 			*(pmem_set_array) = (unsigned long) kmem_zalloc(
760 			    *(pmem_req_array), KM_NOSLEEP);
761 			if (*pmem_set_array == NULL)
762 				goto odl_init_failure;
763 			break;
764 		}
765 		pmem_req_array++;
766 		pmem_set_array++;
767 	}
768 
769 	/*
770 	 * Initilize memory on lower layers
771 	 */
772 	milSetResources(pLayerPointers, mem_set_array);
773 
774 	/* Allocate Rx/Tx descriptors */
775 	if (amd8111s_allocate_descriptors(pLayerPointers) != B_TRUE) {
776 		*pmem_set_array = NULL;
777 		goto odl_init_failure;
778 	}
779 
780 	/*
781 	 * Allocate Rx buffer for each Rx descriptor. Then call mil layer
782 	 * routine to fill physical address of Rx buffer into Rx descriptor.
783 	 */
784 	if (amd8111s_allocate_buffers(pLayerPointers) == B_FALSE) {
785 		amd8111s_free_descriptors(pLayerPointers);
786 		*pmem_set_array = NULL;
787 		goto odl_init_failure;
788 	}
789 	milInitGlbds(pLayerPointers);
790 
791 	return (0);
792 
793 odl_init_failure:
794 	/*
795 	 * Free All memory allocated so far
796 	 */
797 	pmem_req_array = mem_set_array;
798 	while ((*pmem_req_array) && (pmem_req_array != pmem_set_array)) {
799 		switch (*pmem_req_array) {
800 		case VIRTUAL:
801 			pmem_req_array++;	/* Size */
802 			size = *(pmem_req_array);
803 			pmem_req_array++;	/* Virtual Address */
804 			if (pmem_req_array == NULL)
805 				return (1);
806 			kmem_free((int *)*pmem_req_array, size);
807 			break;
808 		}
809 		pmem_req_array++;
810 	}
811 	return (1);
812 }
813 
814 /*
815  * Allocate and initialize Tx/Rx descriptors
816  */
817 static boolean_t
818 amd8111s_allocate_descriptors(struct LayerPointers *pLayerPointers)
819 {
820 	struct odl *pOdl = pLayerPointers->pOdl;
821 	struct mil *pMil = pLayerPointers->pMil;
822 	dev_info_t *devinfo = pOdl->devinfo;
823 	uint_t length, count, i;
824 	size_t real_length;
825 
826 	/*
827 	 * Allocate Rx descriptors
828 	 */
829 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
830 	    NULL, &pOdl->rx_desc_dma_handle) != DDI_SUCCESS) {
831 		amd8111s_log(pLayerPointers, CE_WARN,
832 		    "ddi_dma_alloc_handle for Rx desc failed");
833 		pOdl->rx_desc_dma_handle = NULL;
834 		return (B_FALSE);
835 	}
836 
837 	length = sizeof (struct rx_desc) * RX_RING_SIZE + ALIGNMENT;
838 	if (ddi_dma_mem_alloc(pOdl->rx_desc_dma_handle, length,
839 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
840 	    NULL, (caddr_t *)&pMil->Rx_desc_original, &real_length,
841 	    &pOdl->rx_desc_acc_handle) != DDI_SUCCESS) {
842 
843 		amd8111s_log(pLayerPointers, CE_WARN,
844 		    "ddi_dma_mem_handle for Rx desc failed");
845 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
846 		pOdl->rx_desc_dma_handle = NULL;
847 		return (B_FALSE);
848 	}
849 
850 	if (ddi_dma_addr_bind_handle(pOdl->rx_desc_dma_handle,
851 	    NULL, (caddr_t)pMil->Rx_desc_original, real_length,
852 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
853 	    NULL, &pOdl->rx_desc_dma_cookie,
854 	    &count) != DDI_SUCCESS) {
855 
856 		amd8111s_log(pLayerPointers, CE_WARN,
857 		    "ddi_dma_addr_bind_handle for Rx desc failed");
858 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
859 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
860 		pOdl->rx_desc_dma_handle = NULL;
861 		return (B_FALSE);
862 	}
863 	ASSERT(count == 1);
864 
865 	/* Initialize Rx descriptors related variables */
866 	pMil->Rx_desc = (struct rx_desc *)
867 	    ((pMil->Rx_desc_original + ALIGNMENT) & ~ALIGNMENT);
868 	pMil->Rx_desc_pa = (unsigned int)
869 	    ((pOdl->rx_desc_dma_cookie.dmac_laddress + ALIGNMENT) & ~ALIGNMENT);
870 
871 	pLayerPointers->pMdl->init_blk->RDRA = pMil->Rx_desc_pa;
872 
873 
874 	/*
875 	 * Allocate Tx descriptors
876 	 */
877 	if (ddi_dma_alloc_handle(devinfo, &pcn_desc_dma_attr_t, DDI_DMA_SLEEP,
878 	    NULL, &pOdl->tx_desc_dma_handle) != DDI_SUCCESS) {
879 		amd8111s_log(pLayerPointers, CE_WARN,
880 		    "ddi_dma_alloc_handle for Tx desc failed");
881 		goto allocate_desc_fail;
882 	}
883 
884 	length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
885 	if (ddi_dma_mem_alloc(pOdl->tx_desc_dma_handle, length,
886 	    &pcn_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
887 	    NULL, (caddr_t *)&pMil->Tx_desc_original, &real_length,
888 	    &pOdl->tx_desc_acc_handle) != DDI_SUCCESS) {
889 
890 		amd8111s_log(pLayerPointers, CE_WARN,
891 		    "ddi_dma_mem_handle for Tx desc failed");
892 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
893 		goto allocate_desc_fail;
894 	}
895 
896 	if (ddi_dma_addr_bind_handle(pOdl->tx_desc_dma_handle,
897 	    NULL, (caddr_t)pMil->Tx_desc_original, real_length,
898 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
899 	    NULL, &pOdl->tx_desc_dma_cookie,
900 	    &count) != DDI_SUCCESS) {
901 
902 		amd8111s_log(pLayerPointers, CE_WARN,
903 		    "ddi_dma_addr_bind_handle for Tx desc failed");
904 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
905 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
906 		goto allocate_desc_fail;
907 	}
908 	ASSERT(count == 1);
909 	/* Set the DMA area to all zeros */
910 	bzero((caddr_t)pMil->Tx_desc_original, length);
911 
912 	/* Initialize Tx descriptors related variables */
913 	pMil->Tx_desc = (struct tx_desc *)
914 	    ((pMil->Tx_desc_original + ALIGNMENT) & ~ALIGNMENT);
915 	pMil->pNonphysical->TxDescQRead = pMil->Tx_desc;
916 	pMil->pNonphysical->TxDescQWrite = pMil->Tx_desc;
917 	pMil->pNonphysical->TxDescQStart = pMil->Tx_desc;
918 	pMil->pNonphysical->TxDescQEnd = &(pMil->Tx_desc[TX_RING_SIZE -1]);
919 
920 	/* Physical Addr of Tx_desc_original & Tx_desc */
921 	pLayerPointers->pMil->Tx_desc_pa =
922 	    ((pOdl->tx_desc_dma_cookie.dmac_laddress + ALIGNMENT) &
923 	    ~ALIGNMENT);
924 
925 	/* Setting the reserved bits in the tx descriptors */
926 	for (i = 0; i < TX_RING_SIZE; i++) {
927 		pMil->pNonphysical->TxDescQWrite->Tx_RES0 = 0x0f;
928 		pMil->pNonphysical->TxDescQWrite->Tx_OWN = 0;
929 		pMil->pNonphysical->TxDescQWrite++;
930 	}
931 	pMil->pNonphysical->TxDescQWrite = pMil->pNonphysical->TxDescQStart;
932 
933 	pLayerPointers->pMdl->init_blk->TDRA = pMil->Tx_desc_pa;
934 
935 	return (B_TRUE);
936 
937 allocate_desc_fail:
938 	pOdl->tx_desc_dma_handle = NULL;
939 	(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
940 	ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
941 	ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
942 	pOdl->rx_desc_dma_handle = NULL;
943 	return (B_FALSE);
944 }
945 
946 /*
947  * Free Tx/Rx descriptors
948  */
949 static void
950 amd8111s_free_descriptors(struct LayerPointers *pLayerPointers)
951 {
952 	struct odl *pOdl = pLayerPointers->pOdl;
953 
954 	/* Free Rx descriptors */
955 	if (pOdl->rx_desc_dma_handle) {
956 		(void) ddi_dma_unbind_handle(pOdl->rx_desc_dma_handle);
957 		ddi_dma_mem_free(&pOdl->rx_desc_acc_handle);
958 		ddi_dma_free_handle(&pOdl->rx_desc_dma_handle);
959 		pOdl->rx_desc_dma_handle = NULL;
960 	}
961 
962 	/* Free Rx descriptors */
963 	if (pOdl->tx_desc_dma_handle) {
964 		(void) ddi_dma_unbind_handle(pOdl->tx_desc_dma_handle);
965 		ddi_dma_mem_free(&pOdl->tx_desc_acc_handle);
966 		ddi_dma_free_handle(&pOdl->tx_desc_dma_handle);
967 		pOdl->tx_desc_dma_handle = NULL;
968 	}
969 }
970 
971 /*
972  * Allocate Tx/Rx Ring buffer
973  */
974 static boolean_t
975 amd8111s_alloc_dma_ringbuf(struct LayerPointers *pLayerPointers,
976 			struct amd8111s_dma_ringbuf *pRing,
977 			uint32_t ring_size, uint32_t msg_size)
978 {
979 	uint32_t idx, msg_idx = 0, msg_acc;
980 	dev_info_t *devinfo = pLayerPointers->pOdl->devinfo;
981 	size_t real_length;
982 	uint_t count = 0;
983 
984 	ASSERT(pcn_buff_dma_attr_t.dma_attr_align == 1);
985 	pRing->dma_buf_sz = msg_size;
986 	pRing->ring_size = ring_size;
987 	pRing->trunk_num = AMD8111S_SPLIT;
988 	pRing->buf_sz = msg_size * ring_size;
989 	if (ring_size < pRing->trunk_num)
990 		pRing->trunk_num = ring_size;
991 	ASSERT((pRing->buf_sz % pRing->trunk_num) == 0);
992 
993 	pRing->trunk_sz = pRing->buf_sz / pRing->trunk_num;
994 	ASSERT((pRing->trunk_sz % pRing->dma_buf_sz) == 0);
995 
996 	pRing->msg_buf = kmem_zalloc(sizeof (struct amd8111s_msgbuf) *
997 	    ring_size, KM_NOSLEEP);
998 	pRing->dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
999 	    pRing->trunk_num, KM_NOSLEEP);
1000 	pRing->acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
1001 	    pRing->trunk_num, KM_NOSLEEP);
1002 	pRing->dma_cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) *
1003 	    pRing->trunk_num, KM_NOSLEEP);
1004 	pRing->trunk_addr = kmem_zalloc(sizeof (caddr_t) *
1005 	    pRing->trunk_num, KM_NOSLEEP);
1006 	if (pRing->msg_buf == NULL || pRing->dma_hdl == NULL ||
1007 	    pRing->acc_hdl == NULL || pRing->trunk_addr == NULL ||
1008 	    pRing->dma_cookie == NULL) {
1009 		amd8111s_log(pLayerPointers, CE_NOTE,
1010 		    "kmem_zalloc failed");
1011 		goto failed;
1012 	}
1013 
1014 	for (idx = 0; idx < pRing->trunk_num; ++idx) {
1015 		if (ddi_dma_alloc_handle(devinfo, &pcn_buff_dma_attr_t,
1016 		    DDI_DMA_SLEEP, NULL, &(pRing->dma_hdl[idx]))
1017 		    != DDI_SUCCESS) {
1018 
1019 			amd8111s_log(pLayerPointers, CE_WARN,
1020 			    "ddi_dma_alloc_handle failed");
1021 			goto failed;
1022 		} else if (ddi_dma_mem_alloc(pRing->dma_hdl[idx],
1023 		    pRing->trunk_sz, &pcn_acc_attr, DDI_DMA_STREAMING,
1024 		    DDI_DMA_SLEEP, NULL,
1025 		    (caddr_t *)&(pRing->trunk_addr[idx]),
1026 		    (size_t *)(&real_length), &pRing->acc_hdl[idx])
1027 		    != DDI_SUCCESS) {
1028 
1029 			amd8111s_log(pLayerPointers, CE_WARN,
1030 			    "ddi_dma_mem_alloc failed");
1031 			goto failed;
1032 		} else if (real_length != pRing->trunk_sz) {
1033 			amd8111s_log(pLayerPointers, CE_WARN,
1034 			    "ddi_dma_mem_alloc failed");
1035 			goto failed;
1036 		} else if (ddi_dma_addr_bind_handle(pRing->dma_hdl[idx],
1037 		    NULL, (caddr_t)pRing->trunk_addr[idx], real_length,
1038 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
1039 		    &pRing->dma_cookie[idx], &count) != DDI_DMA_MAPPED) {
1040 
1041 			amd8111s_log(pLayerPointers, CE_WARN,
1042 			    "ddi_dma_addr_bind_handle failed");
1043 			goto failed;
1044 		} else {
1045 			for (msg_acc = 0;
1046 			    msg_acc < pRing->trunk_sz / pRing->dma_buf_sz;
1047 			    ++ msg_acc) {
1048 				pRing->msg_buf[msg_idx].offset =
1049 				    msg_acc * pRing->dma_buf_sz;
1050 				pRing->msg_buf[msg_idx].vir_addr =
1051 				    pRing->trunk_addr[idx] +
1052 				    pRing->msg_buf[msg_idx].offset;
1053 				pRing->msg_buf[msg_idx].phy_addr =
1054 				    pRing->dma_cookie[idx].dmac_laddress +
1055 				    pRing->msg_buf[msg_idx].offset;
1056 				pRing->msg_buf[msg_idx].p_hdl =
1057 				    pRing->dma_hdl[idx];
1058 				msg_idx ++;
1059 			}
1060 		}
1061 	}
1062 
1063 	pRing->free = pRing->msg_buf;
1064 	pRing->next = pRing->msg_buf;
1065 	pRing->curr = pRing->msg_buf;
1066 
1067 	return (B_TRUE);
1068 failed:
1069 	amd8111s_free_dma_ringbuf(pRing);
1070 	return (B_FALSE);
1071 }
1072 
1073 /*
1074  * Free Tx/Rx ring buffer
1075  */
1076 static void
1077 amd8111s_free_dma_ringbuf(struct amd8111s_dma_ringbuf *pRing)
1078 {
1079 	int idx;
1080 
1081 	if (pRing->dma_cookie != NULL) {
1082 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1083 			if (pRing->dma_cookie[idx].dmac_laddress == 0) {
1084 				break;
1085 			}
1086 			(void) ddi_dma_unbind_handle(pRing->dma_hdl[idx]);
1087 		}
1088 		kmem_free(pRing->dma_cookie,
1089 		    sizeof (ddi_dma_cookie_t) * pRing->trunk_num);
1090 	}
1091 
1092 	if (pRing->acc_hdl != NULL) {
1093 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1094 			if (pRing->acc_hdl[idx] == NULL)
1095 				break;
1096 			ddi_dma_mem_free(&pRing->acc_hdl[idx]);
1097 		}
1098 		kmem_free(pRing->acc_hdl,
1099 		    sizeof (ddi_acc_handle_t) * pRing->trunk_num);
1100 	}
1101 
1102 	if (pRing->dma_hdl != NULL) {
1103 		for (idx = 0; idx < pRing->trunk_num; idx ++) {
1104 			if (pRing->dma_hdl[idx] == 0) {
1105 				break;
1106 			}
1107 			ddi_dma_free_handle(&pRing->dma_hdl[idx]);
1108 		}
1109 		kmem_free(pRing->dma_hdl,
1110 		    sizeof (ddi_dma_handle_t) * pRing->trunk_num);
1111 	}
1112 
1113 	if (pRing->msg_buf != NULL) {
1114 		kmem_free(pRing->msg_buf,
1115 		    sizeof (struct amd8111s_msgbuf) * pRing->ring_size);
1116 	}
1117 
1118 	if (pRing->trunk_addr != NULL) {
1119 		kmem_free(pRing->trunk_addr,
1120 		    sizeof (caddr_t) * pRing->trunk_num);
1121 	}
1122 
1123 	bzero(pRing, sizeof (*pRing));
1124 }
1125 
1126 
1127 /*
1128  * Allocate all Tx buffer.
1129  * Allocate a Rx buffer for each Rx descriptor. Then
1130  * call mil routine to fill physical address of Rx
1131  * buffer into Rx descriptors
1132  */
1133 static boolean_t
1134 amd8111s_allocate_buffers(struct LayerPointers *pLayerPointers)
1135 {
1136 	struct odl *pOdl = pLayerPointers->pOdl;
1137 
1138 	/*
1139 	 * Allocate rx Buffers
1140 	 */
1141 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->rx_buf,
1142 	    RX_RING_SIZE, RX_BUF_SIZE) == B_FALSE) {
1143 		amd8111s_log(pLayerPointers, CE_WARN,
1144 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1145 		goto allocate_buf_fail;
1146 	}
1147 
1148 	/*
1149 	 * Allocate Tx buffers
1150 	 */
1151 	if (amd8111s_alloc_dma_ringbuf(pLayerPointers, &pOdl->tx_buf,
1152 	    TX_COALESC_SIZE, TX_BUF_SIZE) == B_FALSE) {
1153 		amd8111s_log(pLayerPointers, CE_WARN,
1154 		    "amd8111s_alloc_dma_ringbuf for tx failed");
1155 		goto allocate_buf_fail;
1156 	}
1157 
1158 	/*
1159 	 * Initilize the mil Queues
1160 	 */
1161 	milInitGlbds(pLayerPointers);
1162 
1163 	milInitRxQ(pLayerPointers);
1164 
1165 	return (B_TRUE);
1166 
1167 allocate_buf_fail:
1168 
1169 	amd8111s_log(pLayerPointers, CE_WARN,
1170 	    "amd8111s_allocate_buffers failed");
1171 	return (B_FALSE);
1172 }
1173 
1174 /*
1175  * Free all Rx/Tx buffer
1176  */
1177 
1178 static void
1179 amd8111s_free_buffers(struct LayerPointers *pLayerPointers)
1180 {
1181 	/* Free Tx buffers */
1182 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->tx_buf);
1183 
1184 	/* Free Rx Buffers */
1185 	amd8111s_free_dma_ringbuf(&pLayerPointers->pOdl->rx_buf);
1186 }
1187 
1188 /*
1189  * Try to recycle all the descriptors and Tx buffers
1190  * which are already freed by hardware.
1191  */
1192 static int
1193 amd8111s_recycle_tx(struct LayerPointers *pLayerPointers)
1194 {
1195 	struct nonphysical *pNonphysical;
1196 	uint32_t count = 0;
1197 
1198 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1199 	while (pNonphysical->TxDescQRead->Tx_OWN == 0 &&
1200 	    pNonphysical->TxDescQRead != pNonphysical->TxDescQWrite) {
1201 		pLayerPointers->pOdl->tx_buf.free =
1202 		    NEXT(pLayerPointers->pOdl->tx_buf, free);
1203 		pNonphysical->TxDescQRead++;
1204 		if (pNonphysical->TxDescQRead > pNonphysical->TxDescQEnd) {
1205 			pNonphysical->TxDescQRead = pNonphysical->TxDescQStart;
1206 		}
1207 		count ++;
1208 	}
1209 
1210 	if (pLayerPointers->pMil->tx_reschedule)
1211 		ddi_trigger_softintr(pLayerPointers->pOdl->drain_id);
1212 
1213 	return (count);
1214 }
1215 
1216 /*
1217  * Get packets in the Tx buffer, then copy them to the send buffer.
1218  * Trigger hardware to send out packets.
1219  */
1220 static void
1221 amd8111s_send_serial(struct LayerPointers *pLayerPointers)
1222 {
1223 	struct nonphysical *pNonphysical;
1224 	uint32_t count;
1225 
1226 	pNonphysical = pLayerPointers->pMil->pNonphysical;
1227 
1228 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1229 
1230 	for (count = 0; count < AMD8111S_SEND_MAX; count ++) {
1231 		if (pLayerPointers->pOdl->tx_buf.curr ==
1232 		    pLayerPointers->pOdl->tx_buf.next) {
1233 			break;
1234 		}
1235 		/* to verify if it needs to recycle the tx Buf */
1236 		if (((pNonphysical->TxDescQWrite + 1 >
1237 		    pNonphysical->TxDescQEnd) ? pNonphysical->TxDescQStart :
1238 		    (pNonphysical->TxDescQWrite + 1)) ==
1239 		    pNonphysical->TxDescQRead)
1240 			if (amd8111s_recycle_tx(pLayerPointers) == 0) {
1241 				pLayerPointers->pOdl
1242 				    ->statistics.tx_no_descriptor ++;
1243 				break;
1244 			}
1245 
1246 		/* Fill packet length */
1247 		pNonphysical->TxDescQWrite->Tx_BCNT = (uint16_t)pLayerPointers
1248 		    ->pOdl->tx_buf.curr->msg_size;
1249 
1250 		/* Fill physical buffer address */
1251 		pNonphysical->TxDescQWrite->Tx_Base_Addr = (unsigned int)
1252 		    pLayerPointers->pOdl->tx_buf.curr->phy_addr;
1253 
1254 		pNonphysical->TxDescQWrite->Tx_SOP = 1;
1255 		pNonphysical->TxDescQWrite->Tx_EOP = 1;
1256 		pNonphysical->TxDescQWrite->Tx_ADD_FCS = 1;
1257 		pNonphysical->TxDescQWrite->Tx_LTINT = 1;
1258 		pNonphysical->TxDescQWrite->Tx_USPACE = 0;
1259 		pNonphysical->TxDescQWrite->Tx_OWN = 1;
1260 
1261 		pNonphysical->TxDescQWrite++;
1262 		if (pNonphysical->TxDescQWrite > pNonphysical->TxDescQEnd) {
1263 			pNonphysical->TxDescQWrite = pNonphysical->TxDescQStart;
1264 		}
1265 
1266 		pLayerPointers->pOdl->tx_buf.curr =
1267 		    NEXT(pLayerPointers->pOdl->tx_buf, curr);
1268 
1269 	}
1270 
1271 	pLayerPointers->pOdl->statistics.tx_ok_packets += count;
1272 
1273 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1274 
1275 	/* Call mdlTransmit to send the pkt out on the network */
1276 	mdlTransmit(pLayerPointers);
1277 
1278 }
1279 
1280 /*
1281  * Softintr entrance. try to send out packets in the Tx buffer.
1282  * If reschedule is True, call mac_tx_update to re-enable the
1283  * transmit
1284  */
1285 static uint_t
1286 amd8111s_send_drain(caddr_t arg)
1287 {
1288 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1289 
1290 	amd8111s_send_serial(pLayerPointers);
1291 
1292 	if (pLayerPointers->pMil->tx_reschedule &&
1293 	    NEXT(pLayerPointers->pOdl->tx_buf, next) !=
1294 	    pLayerPointers->pOdl->tx_buf.free) {
1295 		mac_tx_update(pLayerPointers->pOdl->mh);
1296 		pLayerPointers->pMil->tx_reschedule = B_FALSE;
1297 	}
1298 
1299 	return (DDI_INTR_CLAIMED);
1300 }
1301 
1302 /*
1303  * Get a Tx buffer
1304  */
1305 static struct amd8111s_msgbuf *
1306 amd8111s_getTxbuf(struct LayerPointers *pLayerPointers)
1307 {
1308 	struct amd8111s_msgbuf *tmp, *next;
1309 
1310 	mutex_enter(&pLayerPointers->pOdl->mdlSendLock);
1311 	next = NEXT(pLayerPointers->pOdl->tx_buf, next);
1312 	if (next == pLayerPointers->pOdl->tx_buf.free) {
1313 		tmp = NULL;
1314 	} else {
1315 		tmp = pLayerPointers->pOdl->tx_buf.next;
1316 		pLayerPointers->pOdl->tx_buf.next = next;
1317 	}
1318 	mutex_exit(&pLayerPointers->pOdl->mdlSendLock);
1319 
1320 	return (tmp);
1321 }
1322 
1323 static boolean_t
1324 amd8111s_send(struct LayerPointers *pLayerPointers, mblk_t *mp)
1325 {
1326 	struct odl *pOdl;
1327 	size_t frag_len;
1328 	mblk_t *tmp;
1329 	struct amd8111s_msgbuf *txBuf;
1330 	uint8_t *pMsg;
1331 
1332 	pOdl = pLayerPointers->pOdl;
1333 
1334 	/* alloc send buffer */
1335 	txBuf = amd8111s_getTxbuf(pLayerPointers);
1336 	if (txBuf == NULL) {
1337 		pOdl->statistics.tx_no_buffer ++;
1338 		pLayerPointers->pMil->tx_reschedule = B_TRUE;
1339 		amd8111s_send_serial(pLayerPointers);
1340 		return (B_FALSE);
1341 	}
1342 
1343 	/* copy packet to send buffer */
1344 	txBuf->msg_size = 0;
1345 	pMsg = (uint8_t *)txBuf->vir_addr;
1346 	for (tmp = mp; tmp; tmp = tmp->b_cont) {
1347 		frag_len = MBLKL(tmp);
1348 		bcopy(tmp->b_rptr, pMsg, frag_len);
1349 		txBuf->msg_size += frag_len;
1350 		pMsg += frag_len;
1351 	}
1352 	freemsg(mp);
1353 
1354 	amd8111s_send_serial(pLayerPointers);
1355 
1356 	return (B_TRUE);
1357 }
1358 
1359 /*
1360  * (GLD Entry Point) Send the message block to lower layer
1361  */
1362 static mblk_t *
1363 amd8111s_m_tx(void *arg, mblk_t *mp)
1364 {
1365 	struct LayerPointers *pLayerPointers = arg;
1366 	mblk_t *next;
1367 
1368 	rw_enter(&pLayerPointers->pOdl->chip_lock, RW_READER);
1369 	if (!pLayerPointers->run) {
1370 		pLayerPointers->pOdl->statistics.tx_afterunplumb ++;
1371 		freemsgchain(mp);
1372 		mp = NULL;
1373 	}
1374 
1375 	while (mp != NULL) {
1376 		next = mp->b_next;
1377 		mp->b_next = NULL;
1378 		if (!amd8111s_send(pLayerPointers, mp)) {
1379 			/* Send fail */
1380 			mp->b_next = next;
1381 			break;
1382 		}
1383 		mp = next;
1384 	}
1385 
1386 	rw_exit(&pLayerPointers->pOdl->chip_lock);
1387 	return (mp);
1388 }
1389 
1390 /*
1391  * (GLD Entry Point) Interrupt Service Routine
1392  */
1393 static uint_t
1394 amd8111s_intr(caddr_t arg)
1395 {
1396 	unsigned int intrCauses;
1397 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1398 
1399 	/* Read the interrupt status from mdl */
1400 	intrCauses = mdlReadInterrupt(pLayerPointers);
1401 
1402 	if (intrCauses == 0) {
1403 		pLayerPointers->pOdl->statistics.intr_OTHER ++;
1404 		return (DDI_INTR_UNCLAIMED);
1405 	}
1406 
1407 	if (intrCauses & LCINT) {
1408 		if (mdlReadLink(pLayerPointers) == LINK_UP) {
1409 			mdlGetActiveMediaInfo(pLayerPointers);
1410 			/* Link status changed */
1411 			if (pLayerPointers->pOdl->LinkStatus !=
1412 			    LINK_STATE_UP) {
1413 				pLayerPointers->pOdl->LinkStatus =
1414 				    LINK_STATE_UP;
1415 				mac_link_update(pLayerPointers->pOdl->mh,
1416 				    LINK_STATE_UP);
1417 			}
1418 		} else {
1419 			if (pLayerPointers->pOdl->LinkStatus !=
1420 			    LINK_STATE_DOWN) {
1421 				pLayerPointers->pOdl->LinkStatus =
1422 				    LINK_STATE_DOWN;
1423 				mac_link_update(pLayerPointers->pOdl->mh,
1424 				    LINK_STATE_DOWN);
1425 			}
1426 		}
1427 	}
1428 	/*
1429 	 * RINT0: Receive Interrupt is set by the controller after the last
1430 	 * descriptor of a receive frame for this ring has been updated by
1431 	 * writing a 0 to the OWNership bit.
1432 	 */
1433 	if (intrCauses & RINT0) {
1434 		pLayerPointers->pOdl->statistics.intr_RINT0 ++;
1435 		amd8111s_receive(pLayerPointers);
1436 	}
1437 
1438 	/*
1439 	 * TINT0: Transmit Interrupt is set by the controller after the OWN bit
1440 	 * in the last descriptor of a transmit frame in this particular ring
1441 	 * has been cleared to indicate the frame has been copied to the
1442 	 * transmit FIFO.
1443 	 */
1444 	if (intrCauses & TINT0) {
1445 		pLayerPointers->pOdl->statistics.intr_TINT0 ++;
1446 		/*
1447 		 * if desc ring is NULL and tx buf is not NULL, it should
1448 		 * drain tx buffer
1449 		 */
1450 		amd8111s_send_serial(pLayerPointers);
1451 	}
1452 
1453 	if (intrCauses & STINT) {
1454 		pLayerPointers->pOdl->statistics.intr_STINT ++;
1455 	}
1456 
1457 
1458 	return (DDI_INTR_CLAIMED);
1459 }
1460 
1461 /*
1462  * To re-initilize data structures.
1463  */
1464 static void
1465 amd8111s_sw_reset(struct LayerPointers *pLayerPointers)
1466 {
1467 	/* Reset all Tx/Rx queues and descriptors */
1468 	milResetTxQ(pLayerPointers);
1469 	milInitRxQ(pLayerPointers);
1470 }
1471 
1472 /*
1473  * Send all pending tx packets
1474  */
1475 static void
1476 amd8111s_tx_drain(struct LayerPointers *adapter)
1477 {
1478 	struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
1479 	int i, desc_count = 0;
1480 	for (i = 0; i < 30; i++) {
1481 		while ((pTx_desc->Tx_OWN == 0) && (desc_count < TX_RING_SIZE)) {
1482 			/* This packet has been transmitted */
1483 			pTx_desc ++;
1484 			desc_count ++;
1485 		}
1486 		if (desc_count == TX_RING_SIZE) {
1487 			break;
1488 		}
1489 		/* Wait 1 ms */
1490 		drv_usecwait(1000);
1491 	}
1492 	adapter->pOdl->statistics.tx_draintime = i;
1493 }
1494 
1495 /*
1496  * (GLD Entry Point) To start card will be called at
1497  * ifconfig plumb
1498  */
1499 static int
1500 amd8111s_m_start(void *arg)
1501 {
1502 	struct LayerPointers *pLayerPointers = arg;
1503 	struct odl *pOdl = pLayerPointers->pOdl;
1504 
1505 	amd8111s_sw_reset(pLayerPointers);
1506 	mdlHWReset(pLayerPointers);
1507 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1508 	pLayerPointers->run = B_TRUE;
1509 	rw_exit(&pOdl->chip_lock);
1510 	return (0);
1511 }
1512 
1513 /*
1514  * (GLD Entry Point) To stop card will be called at
1515  * ifconfig unplumb
1516  */
1517 static void
1518 amd8111s_m_stop(void *arg)
1519 {
1520 	struct LayerPointers *pLayerPointers = (struct LayerPointers *)arg;
1521 	struct odl *pOdl = pLayerPointers->pOdl;
1522 
1523 	/* Ensure send all pending tx packets */
1524 	amd8111s_tx_drain(pLayerPointers);
1525 	/*
1526 	 * Stop the controller and disable the controller interrupt
1527 	 */
1528 	rw_enter(&pOdl->chip_lock, RW_WRITER);
1529 	mdlStopChip(pLayerPointers);
1530 	pLayerPointers->run = B_FALSE;
1531 	rw_exit(&pOdl->chip_lock);
1532 }
1533 
1534 /*
1535  *	To clean up all
1536  */
1537 static void
1538 amd8111s_free_resource(struct LayerPointers *pLayerPointers)
1539 {
1540 	unsigned long mem_free_array[100];
1541 	unsigned long *pmem_free_array, size;
1542 
1543 	/* Free Rx/Tx descriptors */
1544 	amd8111s_free_descriptors(pLayerPointers);
1545 
1546 	/* Free memory on lower layers */
1547 	milFreeResources(pLayerPointers, mem_free_array);
1548 	pmem_free_array = mem_free_array;
1549 	while (*pmem_free_array) {
1550 		switch (*pmem_free_array) {
1551 		case VIRTUAL:
1552 			size = *(++pmem_free_array);
1553 			pmem_free_array++;
1554 			kmem_free((void *)*(pmem_free_array), size);
1555 			break;
1556 		}
1557 		pmem_free_array++;
1558 	}
1559 
1560 	amd8111s_free_buffers(pLayerPointers);
1561 }
1562 
1563 /*
1564  * (GLD Enty pointer) To add/delete multi cast addresses
1565  *
1566  */
1567 static int
1568 amd8111s_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1569 {
1570 	struct LayerPointers *pLayerPointers = arg;
1571 
1572 	if (add) {
1573 		/* Add a multicast entry */
1574 		mdlAddMulticastAddress(pLayerPointers, (UCHAR *)addr);
1575 	} else {
1576 		/* Delete a multicast entry */
1577 		mdlDeleteMulticastAddress(pLayerPointers, (UCHAR *)addr);
1578 	}
1579 
1580 	return (0);
1581 }
1582 
1583 #ifdef AMD8111S_DEBUG
1584 /*
1585  * The size of MIB registers is only 32 bits. Dump them before one
1586  * of them overflows.
1587  */
1588 static void
1589 amd8111s_dump_mib(struct LayerPointers *pLayerPointers)
1590 {
1591 	struct amd8111s_statistics *adapterStat;
1592 
1593 	adapterStat = &pLayerPointers->pOdl->statistics;
1594 
1595 	adapterStat->mib_dump_counter ++;
1596 
1597 	/*
1598 	 * Rx Counters
1599 	 */
1600 	adapterStat->rx_mib_unicst_packets +=
1601 	    mdlReadMib(pLayerPointers, RcvUniCastPkts);
1602 	adapterStat->rx_mib_multicst_packets +=
1603 	    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
1604 	adapterStat->rx_mib_broadcst_packets +=
1605 	    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
1606 	adapterStat->rx_mib_macctrl_packets +=
1607 	    mdlReadMib(pLayerPointers, RcvMACCtrl);
1608 	adapterStat->rx_mib_flowctrl_packets +=
1609 	    mdlReadMib(pLayerPointers, RcvFlowCtrl);
1610 
1611 	adapterStat->rx_mib_bytes +=
1612 	    mdlReadMib(pLayerPointers, RcvOctets);
1613 	adapterStat->rx_mib_good_bytes +=
1614 	    mdlReadMib(pLayerPointers, RcvGoodOctets);
1615 
1616 	adapterStat->rx_mib_undersize_packets +=
1617 	    mdlReadMib(pLayerPointers, RcvUndersizePkts);
1618 	adapterStat->rx_mib_oversize_packets +=
1619 	    mdlReadMib(pLayerPointers, RcvOversizePkts);
1620 
1621 	adapterStat->rx_mib_drop_packets +=
1622 	    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
1623 	adapterStat->rx_mib_align_err_packets +=
1624 	    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
1625 	adapterStat->rx_mib_fcs_err_packets +=
1626 	    mdlReadMib(pLayerPointers, RcvFCSErrors);
1627 	adapterStat->rx_mib_symbol_err_packets +=
1628 	    mdlReadMib(pLayerPointers, RcvSymbolErrors);
1629 	adapterStat->rx_mib_miss_packets +=
1630 	    mdlReadMib(pLayerPointers, RcvMissPkts);
1631 
1632 	/*
1633 	 * Tx Counters
1634 	 */
1635 	adapterStat->tx_mib_packets +=
1636 	    mdlReadMib(pLayerPointers, XmtPackets);
1637 	adapterStat->tx_mib_multicst_packets +=
1638 	    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
1639 	adapterStat->tx_mib_broadcst_packets +=
1640 	    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
1641 	adapterStat->tx_mib_flowctrl_packets +=
1642 	    mdlReadMib(pLayerPointers, XmtFlowCtrl);
1643 
1644 	adapterStat->tx_mib_bytes +=
1645 	    mdlReadMib(pLayerPointers, XmtOctets);
1646 
1647 	adapterStat->tx_mib_defer_trans_packets +=
1648 	    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
1649 	adapterStat->tx_mib_collision_packets +=
1650 	    mdlReadMib(pLayerPointers, XmtCollisions);
1651 	adapterStat->tx_mib_one_coll_packets +=
1652 	    mdlReadMib(pLayerPointers, XmtOneCollision);
1653 	adapterStat->tx_mib_multi_coll_packets +=
1654 	    mdlReadMib(pLayerPointers, XmtMultipleCollision);
1655 	adapterStat->tx_mib_late_coll_packets +=
1656 	    mdlReadMib(pLayerPointers, XmtLateCollision);
1657 	adapterStat->tx_mib_ex_coll_packets +=
1658 	    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
1659 
1660 
1661 	/* Clear all MIB registers */
1662 	WRITE_REG16(pLayerPointers, pLayerPointers->pMdl->Mem_Address
1663 	    + MIB_ADDR, MIB_CLEAR);
1664 }
1665 #endif
1666 
1667 /*
1668  * (GLD Entry Point) set/unset promiscus mode
1669  */
1670 static int
1671 amd8111s_m_promisc(void *arg, boolean_t on)
1672 {
1673 	struct LayerPointers *pLayerPointers = arg;
1674 
1675 	if (on) {
1676 		mdlSetPromiscuous(pLayerPointers);
1677 	} else {
1678 		mdlDisablePromiscuous(pLayerPointers);
1679 	}
1680 
1681 	return (0);
1682 }
1683 
1684 /*
1685  * (Gld Entry point) Changes the Mac address of card
1686  */
1687 static int
1688 amd8111s_m_unicst(void *arg, const uint8_t *macaddr)
1689 {
1690 	struct LayerPointers *pLayerPointers = arg;
1691 
1692 	mdlDisableInterrupt(pLayerPointers);
1693 	mdlSetMacAddress(pLayerPointers, (unsigned char *)macaddr);
1694 	mdlEnableInterrupt(pLayerPointers);
1695 
1696 	return (0);
1697 }
1698 
1699 /*
1700  * Reset the card
1701  */
1702 void
1703 amd8111s_reset(struct LayerPointers *pLayerPointers)
1704 {
1705 	amd8111s_sw_reset(pLayerPointers);
1706 	mdlHWReset(pLayerPointers);
1707 }
1708 
1709 /*
1710  * attach(9E) -- Attach a device to the system
1711  *
1712  * Called once for each board after successfully probed.
1713  * will do
1714  * 	a. creating minor device node for the instance.
1715  *	b. allocate & Initilize four layers (call odlInit)
1716  *	c. get MAC address
1717  *	d. initilize pLayerPointers to gld private pointer
1718  *	e. register with GLD
1719  * if any action fails does clean up & returns DDI_FAILURE
1720  * else retursn DDI_SUCCESS
1721  */
1722 static int
1723 amd8111s_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1724 {
1725 	mac_register_t *macp;
1726 	struct LayerPointers *pLayerPointers;
1727 	struct odl *pOdl;
1728 	ddi_acc_handle_t *pci_handle;
1729 	ddi_device_acc_attr_t dev_attr;
1730 	caddr_t addrp = NULL;
1731 
1732 	switch (cmd) {
1733 	case DDI_ATTACH:
1734 		break;
1735 	default:
1736 		return (DDI_FAILURE);
1737 	}
1738 
1739 	pLayerPointers = (struct LayerPointers *)
1740 	    kmem_zalloc(sizeof (struct LayerPointers), KM_SLEEP);
1741 	amd8111sadapter = pLayerPointers;
1742 
1743 	/* Get device instance number */
1744 	pLayerPointers->instance = ddi_get_instance(devinfo);
1745 	ddi_set_driver_private(devinfo, (caddr_t)pLayerPointers);
1746 
1747 	pOdl = (struct odl *)kmem_zalloc(sizeof (struct odl), KM_SLEEP);
1748 	pLayerPointers->pOdl = pOdl;
1749 
1750 	pOdl->devinfo = devinfo;
1751 
1752 	/*
1753 	 * Here, we only allocate memory for struct odl and initilize it.
1754 	 * All other memory allocation & initilization will be done in odlInit
1755 	 * later on this routine.
1756 	 */
1757 	if (ddi_get_iblock_cookie(devinfo, 0, &pLayerPointers->pOdl->iblock)
1758 	    != DDI_SUCCESS) {
1759 		amd8111s_log(pLayerPointers, CE_NOTE,
1760 		    "attach: get iblock cookies failed");
1761 		goto attach_failure;
1762 	}
1763 
1764 	rw_init(&pOdl->chip_lock, NULL, RW_DRIVER, (void *)pOdl->iblock);
1765 	mutex_init(&pOdl->mdlSendLock, "amd8111s Send Protection Lock",
1766 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1767 	mutex_init(&pOdl->mdlRcvLock, "amd8111s Rcv Protection Lock",
1768 	    MUTEX_DRIVER, (void *)pOdl->iblock);
1769 
1770 	/* Setup PCI space */
1771 	if (pci_config_setup(devinfo, &pOdl->pci_handle) != DDI_SUCCESS) {
1772 		return (DDI_FAILURE);
1773 	}
1774 	pLayerPointers->attach_progress = AMD8111S_ATTACH_PCI;
1775 	pci_handle = &pOdl->pci_handle;
1776 
1777 	pOdl->vendor_id = pci_config_get16(*pci_handle, PCI_CONF_VENID);
1778 	pOdl->device_id = pci_config_get16(*pci_handle, PCI_CONF_DEVID);
1779 
1780 	/*
1781 	 * Allocate and initialize all resource and map device registers.
1782 	 * If failed, it returns a non-zero value.
1783 	 */
1784 	if (amd8111s_odlInit(pLayerPointers) != 0) {
1785 		goto attach_failure;
1786 	}
1787 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESOURCE;
1788 
1789 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1790 	dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1791 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1792 
1793 	if (ddi_regs_map_setup(devinfo, 1, &addrp, 0,  4096, &dev_attr,
1794 	    &(pLayerPointers->pOdl->MemBasehandle)) != 0) {
1795 		amd8111s_log(pLayerPointers, CE_NOTE,
1796 		    "attach: ddi_regs_map_setup failed");
1797 		goto attach_failure;
1798 	}
1799 	pLayerPointers->pMdl->Mem_Address = (unsigned long)addrp;
1800 
1801 	/* Initialize HW */
1802 	mdlOpen(pLayerPointers);
1803 	mdlGetActiveMediaInfo(pLayerPointers);
1804 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_REGS;
1805 
1806 	/*
1807 	 * Setup the interrupt
1808 	 */
1809 	if (ddi_add_intr(devinfo, 0, &pOdl->iblock, 0, amd8111s_intr,
1810 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1811 		goto attach_failure;
1812 	}
1813 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_INTRADDED;
1814 
1815 	/*
1816 	 * Setup soft intr
1817 	 */
1818 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &pOdl->drain_id,
1819 	    NULL, NULL, amd8111s_send_drain,
1820 	    (caddr_t)pLayerPointers) != DDI_SUCCESS) {
1821 		goto attach_failure;
1822 	}
1823 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_RESCHED;
1824 
1825 	/*
1826 	 * Initilize the mac structure
1827 	 */
1828 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
1829 		goto attach_failure;
1830 
1831 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1832 	macp->m_driver = pLayerPointers;
1833 	macp->m_dip = devinfo;
1834 	/* Get MAC address */
1835 	mdlGetMacAddress(pLayerPointers, (unsigned char *)pOdl->MacAddress);
1836 	macp->m_src_addr = pOdl->MacAddress;
1837 	macp->m_callbacks = &amd8111s_m_callbacks;
1838 	macp->m_min_sdu = 0;
1839 	/* 1518 - 14 (ether header) - 4 (CRC) */
1840 	macp->m_max_sdu = ETHERMTU;
1841 	macp->m_margin = VLAN_TAGSZ;
1842 
1843 	/*
1844 	 * Finally, we're ready to register ourselves with the MAC layer
1845 	 * interface; if this succeeds, we're ready to start.
1846 	 */
1847 	if (mac_register(macp, &pOdl->mh) != DDI_SUCCESS) {
1848 		mac_free(macp);
1849 		goto attach_failure;
1850 	}
1851 	mac_free(macp);
1852 
1853 	pLayerPointers->attach_progress |= AMD8111S_ATTACH_MACREGED;
1854 
1855 	return (DDI_SUCCESS);
1856 
1857 attach_failure:
1858 	(void) amd8111s_unattach(devinfo, pLayerPointers);
1859 	return (DDI_FAILURE);
1860 
1861 }
1862 
1863 /*
1864  * detach(9E) -- Detach a device from the system
1865  *
1866  * It is called for each device instance when the system is preparing to
1867  * unload a dynamically unloadable driver.
1868  * will Do
1869  * 	a. check if any driver buffers are held by OS.
1870  *	b. do clean up of all allocated memory if it is not in use by OS.
1871  *	c. un register with GLD
1872  *	d. return DDI_SUCCESS on succes full free & unregister
1873  *	else GLD_FAILURE
1874  */
1875 static int
1876 amd8111s_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1877 {
1878 	struct LayerPointers *pLayerPointers;
1879 
1880 	switch (cmd) {
1881 	case DDI_DETACH:
1882 		break;
1883 	default:
1884 		return (DDI_FAILURE);
1885 	}
1886 
1887 	/*
1888 	 * Get the driver private (struct LayerPointers *) structure
1889 	 */
1890 	if ((pLayerPointers = (struct LayerPointers *)ddi_get_driver_private
1891 	    (devinfo)) == NULL) {
1892 		return (DDI_FAILURE);
1893 	}
1894 
1895 	return (amd8111s_unattach(devinfo, pLayerPointers));
1896 }
1897 
1898 static int
1899 amd8111s_unattach(dev_info_t *devinfo, struct LayerPointers *pLayerPointers)
1900 {
1901 	struct odl *pOdl = pLayerPointers->pOdl;
1902 
1903 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_MACREGED) {
1904 		/* Unregister driver from the GLD interface */
1905 		if (mac_unregister(pOdl->mh) != DDI_SUCCESS) {
1906 			return (DDI_FAILURE);
1907 		}
1908 	}
1909 
1910 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_INTRADDED) {
1911 		ddi_remove_intr(devinfo, 0, pOdl->iblock);
1912 	}
1913 
1914 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESCHED) {
1915 		ddi_remove_softintr(pOdl->drain_id);
1916 	}
1917 
1918 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_REGS) {
1919 		/* Stop HW */
1920 		mdlStopChip(pLayerPointers);
1921 		ddi_regs_map_free(&(pOdl->MemBasehandle));
1922 	}
1923 
1924 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_RESOURCE) {
1925 		/* Free All memory allocated */
1926 		amd8111s_free_resource(pLayerPointers);
1927 	}
1928 
1929 	if (pLayerPointers->attach_progress & AMD8111S_ATTACH_PCI) {
1930 		pci_config_teardown(&pOdl->pci_handle);
1931 		mutex_destroy(&pOdl->mdlSendLock);
1932 		mutex_destroy(&pOdl->mdlRcvLock);
1933 		rw_destroy(&pOdl->chip_lock);
1934 	}
1935 
1936 	kmem_free(pOdl, sizeof (struct odl));
1937 	kmem_free(pLayerPointers, sizeof (struct LayerPointers));
1938 
1939 	return (DDI_SUCCESS);
1940 }
1941 
1942 /*
1943  * (GLD Entry Point)GLD will call this entry point perodicaly to
1944  * get driver statistices.
1945  */
1946 static int
1947 amd8111s_m_stat(void *arg, uint_t stat, uint64_t *val)
1948 {
1949 	struct LayerPointers *pLayerPointers = arg;
1950 	struct amd8111s_statistics *adapterStat;
1951 
1952 	adapterStat = &pLayerPointers->pOdl->statistics;
1953 
1954 	switch (stat) {
1955 
1956 	/*
1957 	 * Current Status
1958 	 */
1959 	case MAC_STAT_IFSPEED:
1960 		*val = 	pLayerPointers->pMdl->Speed * 1000000;
1961 		break;
1962 
1963 	case ETHER_STAT_LINK_DUPLEX:
1964 		if (pLayerPointers->pMdl->FullDuplex) {
1965 			*val = LINK_DUPLEX_FULL;
1966 		} else {
1967 			*val = LINK_DUPLEX_HALF;
1968 		}
1969 		break;
1970 
1971 	/*
1972 	 * Capabilities
1973 	 */
1974 	case ETHER_STAT_CAP_1000FDX:
1975 		*val = 0;
1976 		break;
1977 
1978 	case ETHER_STAT_CAP_1000HDX:
1979 		*val = 0;
1980 		break;
1981 
1982 	case ETHER_STAT_CAP_100FDX:
1983 		*val = 1;
1984 		break;
1985 
1986 	case ETHER_STAT_CAP_100HDX:
1987 		*val = 1;
1988 		break;
1989 
1990 	case ETHER_STAT_CAP_10FDX:
1991 		*val = 1;
1992 		break;
1993 
1994 	case ETHER_STAT_CAP_10HDX:
1995 		*val = 1;
1996 		break;
1997 
1998 	case ETHER_STAT_CAP_ASMPAUSE:
1999 		*val = 1;
2000 		break;
2001 
2002 	case ETHER_STAT_CAP_PAUSE:
2003 		*val = 1;
2004 		break;
2005 
2006 	case ETHER_STAT_CAP_AUTONEG:
2007 		*val = 1;
2008 		break;
2009 
2010 	case ETHER_STAT_ADV_CAP_1000FDX:
2011 		*val = 0;
2012 		break;
2013 
2014 	case ETHER_STAT_ADV_CAP_1000HDX:
2015 		*val = 0;
2016 		break;
2017 
2018 	case ETHER_STAT_ADV_CAP_100FDX:
2019 		*val = 1;
2020 		break;
2021 
2022 	case ETHER_STAT_ADV_CAP_100HDX:
2023 		*val = 1;
2024 		break;
2025 
2026 	case ETHER_STAT_ADV_CAP_10FDX:
2027 		*val = 1;
2028 		break;
2029 
2030 	case ETHER_STAT_ADV_CAP_10HDX:
2031 		*val = 1;
2032 		break;
2033 
2034 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
2035 		*val = 1;
2036 		break;
2037 
2038 	case ETHER_STAT_ADV_CAP_PAUSE:
2039 		*val = 1;
2040 		break;
2041 
2042 	case ETHER_STAT_ADV_CAP_AUTONEG:
2043 		*val = 1;
2044 		break;
2045 
2046 	/*
2047 	 * Rx Counters
2048 	 */
2049 	case MAC_STAT_IPACKETS:
2050 		*val = adapterStat->rx_mib_unicst_packets +
2051 		    adapterStat->rx_mib_multicst_packets +
2052 		    adapterStat->rx_mib_broadcst_packets +
2053 		    mdlReadMib(pLayerPointers, RcvUniCastPkts) +
2054 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts) +
2055 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2056 		break;
2057 
2058 	case MAC_STAT_RBYTES:
2059 		*val = adapterStat->rx_mib_bytes +
2060 		    mdlReadMib(pLayerPointers, RcvOctets);
2061 		break;
2062 
2063 	case MAC_STAT_MULTIRCV:
2064 		*val = adapterStat->rx_mib_multicst_packets +
2065 		    mdlReadMib(pLayerPointers, RcvMultiCastPkts);
2066 		break;
2067 
2068 	case MAC_STAT_BRDCSTRCV:
2069 		*val = adapterStat->rx_mib_broadcst_packets +
2070 		    mdlReadMib(pLayerPointers, RcvBroadCastPkts);
2071 		break;
2072 
2073 	case MAC_STAT_NORCVBUF:
2074 		*val = adapterStat->rx_allocfail +
2075 		    adapterStat->rx_mib_drop_packets +
2076 		    mdlReadMib(pLayerPointers, RcvDropPktsRing0);
2077 		break;
2078 
2079 	case MAC_STAT_IERRORS:
2080 		*val = adapterStat->rx_mib_align_err_packets +
2081 		    adapterStat->rx_mib_fcs_err_packets +
2082 		    adapterStat->rx_mib_symbol_err_packets +
2083 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors) +
2084 		    mdlReadMib(pLayerPointers, RcvFCSErrors) +
2085 		    mdlReadMib(pLayerPointers, RcvSymbolErrors);
2086 		break;
2087 
2088 	case ETHER_STAT_ALIGN_ERRORS:
2089 		*val = adapterStat->rx_mib_align_err_packets +
2090 		    mdlReadMib(pLayerPointers, RcvAlignmentErrors);
2091 		break;
2092 
2093 	case ETHER_STAT_FCS_ERRORS:
2094 		*val = adapterStat->rx_mib_fcs_err_packets +
2095 		    mdlReadMib(pLayerPointers, RcvFCSErrors);
2096 		break;
2097 
2098 	/*
2099 	 * Tx Counters
2100 	 */
2101 	case MAC_STAT_OPACKETS:
2102 		*val = adapterStat->tx_mib_packets +
2103 		    mdlReadMib(pLayerPointers, XmtPackets);
2104 		break;
2105 
2106 	case MAC_STAT_OBYTES:
2107 		*val = adapterStat->tx_mib_bytes +
2108 		    mdlReadMib(pLayerPointers, XmtOctets);
2109 		break;
2110 
2111 	case MAC_STAT_MULTIXMT:
2112 		*val = adapterStat->tx_mib_multicst_packets +
2113 		    mdlReadMib(pLayerPointers, XmtMultiCastPkts);
2114 		break;
2115 
2116 	case MAC_STAT_BRDCSTXMT:
2117 		*val = adapterStat->tx_mib_broadcst_packets +
2118 		    mdlReadMib(pLayerPointers, XmtBroadCastPkts);
2119 		break;
2120 
2121 	case MAC_STAT_NOXMTBUF:
2122 		*val = adapterStat->tx_no_descriptor;
2123 		break;
2124 
2125 	case MAC_STAT_OERRORS:
2126 		*val = adapterStat->tx_mib_ex_coll_packets +
2127 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2128 		break;
2129 
2130 	case MAC_STAT_COLLISIONS:
2131 		*val = adapterStat->tx_mib_ex_coll_packets +
2132 		    mdlReadMib(pLayerPointers, XmtCollisions);
2133 		break;
2134 
2135 	case ETHER_STAT_FIRST_COLLISIONS:
2136 		*val = adapterStat->tx_mib_one_coll_packets +
2137 		    mdlReadMib(pLayerPointers, XmtOneCollision);
2138 		break;
2139 
2140 	case ETHER_STAT_MULTI_COLLISIONS:
2141 		*val = adapterStat->tx_mib_multi_coll_packets +
2142 		    mdlReadMib(pLayerPointers, XmtMultipleCollision);
2143 		break;
2144 
2145 	case ETHER_STAT_EX_COLLISIONS:
2146 		*val = adapterStat->tx_mib_ex_coll_packets +
2147 		    mdlReadMib(pLayerPointers, XmtExcessiveCollision);
2148 		break;
2149 
2150 	case ETHER_STAT_TX_LATE_COLLISIONS:
2151 		*val = adapterStat->tx_mib_late_coll_packets +
2152 		    mdlReadMib(pLayerPointers, XmtLateCollision);
2153 		break;
2154 
2155 	case ETHER_STAT_DEFER_XMTS:
2156 		*val = adapterStat->tx_mib_defer_trans_packets +
2157 		    mdlReadMib(pLayerPointers, XmtDeferredTransmit);
2158 		break;
2159 
2160 	default:
2161 		return (ENOTSUP);
2162 	}
2163 	return (0);
2164 }
2165 
2166 /*
2167  *	Memory Read Function Used by MDL to set card registers.
2168  */
2169 unsigned char
2170 READ_REG8(struct LayerPointers *pLayerPointers, long x)
2171 {
2172 	return (ddi_get8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)x));
2173 }
2174 
2175 int
2176 READ_REG16(struct LayerPointers *pLayerPointers, long x)
2177 {
2178 	return (ddi_get16(pLayerPointers->pOdl->MemBasehandle,
2179 	    (uint16_t *)(x)));
2180 }
2181 
2182 long
2183 READ_REG32(struct LayerPointers *pLayerPointers, long x)
2184 {
2185 	return (ddi_get32(pLayerPointers->pOdl->MemBasehandle,
2186 	    (uint32_t *)(x)));
2187 }
2188 
2189 void
2190 WRITE_REG8(struct LayerPointers *pLayerPointers, long x, int y)
2191 {
2192 	ddi_put8(pLayerPointers->pOdl->MemBasehandle, (uint8_t *)(x), y);
2193 }
2194 
2195 void
2196 WRITE_REG16(struct LayerPointers *pLayerPointers, long x, int y)
2197 {
2198 	ddi_put16(pLayerPointers->pOdl->MemBasehandle, (uint16_t *)(x), y);
2199 }
2200 
2201 void
2202 WRITE_REG32(struct LayerPointers *pLayerPointers, long x, int y)
2203 {
2204 	ddi_put32(pLayerPointers->pOdl->MemBasehandle, (uint32_t *)(x), y);
2205 }
2206 
2207 void
2208 WRITE_REG64(struct LayerPointers *pLayerPointers, long x, char *y)
2209 {
2210 	int i;
2211 	for (i = 0; i < 8; i++) {
2212 		WRITE_REG8(pLayerPointers, (x + i), y[i]);
2213 	}
2214 }
2215