xref: /illumos-gate/usr/src/uts/common/io/dmfe/dmfe_main.c (revision 0dc2366f)
15c1d0199Sgd78059 /*
25c1d0199Sgd78059  * CDDL HEADER START
35c1d0199Sgd78059  *
45c1d0199Sgd78059  * The contents of this file are subject to the terms of the
55c1d0199Sgd78059  * Common Development and Distribution License (the "License").
65c1d0199Sgd78059  * You may not use this file except in compliance with the License.
75c1d0199Sgd78059  *
85c1d0199Sgd78059  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
95c1d0199Sgd78059  * or http://www.opensolaris.org/os/licensing.
105c1d0199Sgd78059  * See the License for the specific language governing permissions
115c1d0199Sgd78059  * and limitations under the License.
125c1d0199Sgd78059  *
135c1d0199Sgd78059  * When distributing Covered Code, include this CDDL HEADER in each
145c1d0199Sgd78059  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
155c1d0199Sgd78059  * If applicable, add the following below this CDDL HEADER, with the
165c1d0199Sgd78059  * fields enclosed by brackets "[]" replaced with your own identifying
175c1d0199Sgd78059  * information: Portions Copyright [yyyy] [name of copyright owner]
185c1d0199Sgd78059  *
195c1d0199Sgd78059  * CDDL HEADER END
205c1d0199Sgd78059  */
215c1d0199Sgd78059 /*
22*0dc2366fSVenugopal Iyer  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
235c1d0199Sgd78059  * Use is subject to license terms.
245c1d0199Sgd78059  */
255c1d0199Sgd78059 
265c1d0199Sgd78059 
275c1d0199Sgd78059 #include <sys/types.h>
285c1d0199Sgd78059 #include <sys/sunddi.h>
29bdb9230aSGarrett D'Amore #include <sys/policy.h>
30bdb9230aSGarrett D'Amore #include <sys/sdt.h>
315c1d0199Sgd78059 #include "dmfe_impl.h"
325c1d0199Sgd78059 
335c1d0199Sgd78059 /*
345c1d0199Sgd78059  * This is the string displayed by modinfo, etc.
355c1d0199Sgd78059  */
365c1d0199Sgd78059 static char dmfe_ident[] = "Davicom DM9102 Ethernet";
375c1d0199Sgd78059 
385c1d0199Sgd78059 
395c1d0199Sgd78059 /*
405c1d0199Sgd78059  * NOTES:
415c1d0199Sgd78059  *
425c1d0199Sgd78059  * #defines:
435c1d0199Sgd78059  *
445c1d0199Sgd78059  *	DMFE_PCI_RNUMBER is the register-set number to use for the operating
455c1d0199Sgd78059  *	registers.  On an OBP-based machine, regset 0 refers to CONFIG space,
465c1d0199Sgd78059  *	regset 1 will be the operating registers in I/O space, and regset 2
475c1d0199Sgd78059  *	will be the operating registers in MEMORY space (preferred).  If an
485c1d0199Sgd78059  *	expansion ROM is fitted, it may appear as a further register set.
495c1d0199Sgd78059  *
505c1d0199Sgd78059  *	DMFE_SLOP defines the amount by which the chip may read beyond
515c1d0199Sgd78059  *	the end of a buffer or descriptor, apparently 6-8 dwords :(
525c1d0199Sgd78059  *	We have to make sure this doesn't cause it to access unallocated
535c1d0199Sgd78059  *	or unmapped memory.
545c1d0199Sgd78059  *
555c1d0199Sgd78059  *	DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP)
565c1d0199Sgd78059  *	rounded up to a multiple of 4.  Here we choose a power of two for
575c1d0199Sgd78059  *	speed & simplicity at the cost of a bit more memory.
585c1d0199Sgd78059  *
595c1d0199Sgd78059  *	However, the buffer length field in the TX/RX descriptors is only
605c1d0199Sgd78059  *	eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes
615c1d0199Sgd78059  *	per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1
625c1d0199Sgd78059  *	(2000) bytes each.
635c1d0199Sgd78059  *
645c1d0199Sgd78059  *	DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for
655c1d0199Sgd78059  *	the data buffers.  The descriptors are always set up in CONSISTENT
665c1d0199Sgd78059  *	mode.
675c1d0199Sgd78059  *
685c1d0199Sgd78059  *	DMFE_HEADROOM defines how much space we'll leave in allocated
695c1d0199Sgd78059  *	mblks before the first valid data byte.  This should be chosen
705c1d0199Sgd78059  *	to be 2 modulo 4, so that once the ethernet header (14 bytes)
715c1d0199Sgd78059  *	has been stripped off, the packet data will be 4-byte aligned.
725c1d0199Sgd78059  *	The remaining space can be used by upstream modules to prepend
735c1d0199Sgd78059  *	any headers required.
745c1d0199Sgd78059  *
755c1d0199Sgd78059  * Patchable globals:
765c1d0199Sgd78059  *
775c1d0199Sgd78059  *	dmfe_bus_modes: the bus mode bits to be put into CSR0.
785c1d0199Sgd78059  *		Setting READ_MULTIPLE in this register seems to cause
795c1d0199Sgd78059  *		the chip to generate a READ LINE command with a parity
805c1d0199Sgd78059  *		error!  Don't do it!
815c1d0199Sgd78059  *
825c1d0199Sgd78059  *	dmfe_setup_desc1: the value to be put into descriptor word 1
835c1d0199Sgd78059  *		when sending a SETUP packet.
845c1d0199Sgd78059  *
855c1d0199Sgd78059  *		Setting TX_LAST_DESC in desc1 in a setup packet seems
865c1d0199Sgd78059  *		to make the chip spontaneously reset internally - it
875c1d0199Sgd78059  *		attempts to give back the setup packet descriptor by
885c1d0199Sgd78059  *		writing to PCI address 00000000 - which may or may not
895c1d0199Sgd78059  *		get a MASTER ABORT - after which most of its registers
905c1d0199Sgd78059  *		seem to have either default values or garbage!
915c1d0199Sgd78059  *
925c1d0199Sgd78059  *		TX_FIRST_DESC doesn't seem to have the same effect but
935c1d0199Sgd78059  *		it isn't needed on a setup packet so we'll leave it out
945c1d0199Sgd78059  *		too, just in case it has some other wierd side-effect.
955c1d0199Sgd78059  *
965c1d0199Sgd78059  *		The default hardware packet filtering mode is now
975c1d0199Sgd78059  *		HASH_AND_PERFECT (imperfect filtering of multicast
985c1d0199Sgd78059  *		packets and perfect filtering of unicast packets).
995c1d0199Sgd78059  *		If this is found not to work reliably, setting the
1005c1d0199Sgd78059  *		TX_FILTER_TYPE1 bit will cause a switchover to using
1015c1d0199Sgd78059  *		HASH_ONLY mode (imperfect filtering of *all* packets).
1025c1d0199Sgd78059  *		Software will then perform the additional filtering
1035c1d0199Sgd78059  *		as required.
1045c1d0199Sgd78059  */
1055c1d0199Sgd78059 
1065c1d0199Sgd78059 #define	DMFE_PCI_RNUMBER	2
1075c1d0199Sgd78059 #define	DMFE_SLOP		(8*sizeof (uint32_t))
1085c1d0199Sgd78059 #define	DMFE_BUF_SIZE		2048
1095c1d0199Sgd78059 #define	DMFE_BUF_SIZE_1		2000
1105c1d0199Sgd78059 #define	DMFE_DMA_MODE		DDI_DMA_STREAMING
1115c1d0199Sgd78059 #define	DMFE_HEADROOM		34
1125c1d0199Sgd78059 
1135c1d0199Sgd78059 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN;
1145c1d0199Sgd78059 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE |
1155c1d0199Sgd78059 					TX_FILTER_TYPE0;
1165c1d0199Sgd78059 
1175c1d0199Sgd78059 /*
1185c1d0199Sgd78059  * Some tunable parameters ...
1195c1d0199Sgd78059  *	Number of RX/TX ring entries (128/128)
1205c1d0199Sgd78059  *	Minimum number of TX ring slots to keep free (1)
1215c1d0199Sgd78059  *	Low-water mark at which to try to reclaim TX ring slots (1)
1225c1d0199Sgd78059  *	How often to take a TX-done interrupt (twice per ring cycle)
1235c1d0199Sgd78059  *	Whether to reclaim TX ring entries on a TX-done interrupt (no)
1245c1d0199Sgd78059  */
1255c1d0199Sgd78059 
1265c1d0199Sgd78059 #define	DMFE_TX_DESC		128	/* Should be a multiple of 4 <= 256 */
1275c1d0199Sgd78059 #define	DMFE_RX_DESC		128	/* Should be a multiple of 4 <= 256 */
1285c1d0199Sgd78059 
1295c1d0199Sgd78059 static uint32_t dmfe_rx_desc = DMFE_RX_DESC;
1305c1d0199Sgd78059 static uint32_t dmfe_tx_desc = DMFE_TX_DESC;
1315c1d0199Sgd78059 static uint32_t dmfe_tx_min_free = 1;
1325c1d0199Sgd78059 static uint32_t dmfe_tx_reclaim_level = 1;
1335c1d0199Sgd78059 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1;
1345c1d0199Sgd78059 static boolean_t dmfe_reclaim_on_done = B_FALSE;
1355c1d0199Sgd78059 
1365c1d0199Sgd78059 /*
1375c1d0199Sgd78059  * Time-related parameters:
1385c1d0199Sgd78059  *
1395c1d0199Sgd78059  *	We use a cyclic to provide a periodic callback; this is then used
1405c1d0199Sgd78059  * 	to check for TX-stall and poll the link status register.
1415c1d0199Sgd78059  *
1425c1d0199Sgd78059  *	DMFE_TICK is the interval between cyclic callbacks, in microseconds.
1435c1d0199Sgd78059  *
1445c1d0199Sgd78059  *	TX_STALL_TIME_100 is the timeout in microseconds between passing
1455c1d0199Sgd78059  *	a packet to the chip for transmission and seeing that it's gone,
1465c1d0199Sgd78059  *	when running at 100Mb/s.  If we haven't reclaimed at least one
1475c1d0199Sgd78059  *	descriptor in this time we assume the transmitter has stalled
1485c1d0199Sgd78059  *	and reset the chip.
1495c1d0199Sgd78059  *
1505c1d0199Sgd78059  *	TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s.
1515c1d0199Sgd78059  *
1525c1d0199Sgd78059  * Patchable globals:
1535c1d0199Sgd78059  *
1545c1d0199Sgd78059  *	dmfe_tick_us:		DMFE_TICK
1555c1d0199Sgd78059  *	dmfe_tx100_stall_us:	TX_STALL_TIME_100
1565c1d0199Sgd78059  *	dmfe_tx10_stall_us:	TX_STALL_TIME_10
1575c1d0199Sgd78059  *
1585c1d0199Sgd78059  * These are then used in _init() to calculate:
1595c1d0199Sgd78059  *
1605c1d0199Sgd78059  *	stall_100_tix[]: number of consecutive cyclic callbacks without a
1615c1d0199Sgd78059  *			 reclaim before the TX process is considered stalled,
1625c1d0199Sgd78059  *			 when running at 100Mb/s.  The elements are indexed
1635c1d0199Sgd78059  *			 by transmit-engine-state.
1645c1d0199Sgd78059  *	stall_10_tix[]:	 number of consecutive cyclic callbacks without a
1655c1d0199Sgd78059  *			 reclaim before the TX process is considered stalled,
1665c1d0199Sgd78059  *			 when running at 10Mb/s.  The elements are indexed
1675c1d0199Sgd78059  *			 by transmit-engine-state.
1685c1d0199Sgd78059  */
1695c1d0199Sgd78059 
1705c1d0199Sgd78059 #define	DMFE_TICK		25000		/* microseconds		*/
1715c1d0199Sgd78059 #define	TX_STALL_TIME_100	50000		/* microseconds		*/
1725c1d0199Sgd78059 #define	TX_STALL_TIME_10	200000		/* microseconds		*/
1735c1d0199Sgd78059 
1745c1d0199Sgd78059 static uint32_t dmfe_tick_us = DMFE_TICK;
1755c1d0199Sgd78059 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100;
1765c1d0199Sgd78059 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10;
1775c1d0199Sgd78059 
1785c1d0199Sgd78059 /*
1795c1d0199Sgd78059  * Calculated from above in _init()
1805c1d0199Sgd78059  */
1815c1d0199Sgd78059 
1825c1d0199Sgd78059 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1];
1835c1d0199Sgd78059 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1];
1845c1d0199Sgd78059 
1855c1d0199Sgd78059 /*
1865c1d0199Sgd78059  * Property names
1875c1d0199Sgd78059  */
1885c1d0199Sgd78059 static char localmac_propname[] = "local-mac-address";
1895c1d0199Sgd78059 static char opmode_propname[] = "opmode-reg-value";
1905c1d0199Sgd78059 
1915c1d0199Sgd78059 static int		dmfe_m_start(void *);
1925c1d0199Sgd78059 static void		dmfe_m_stop(void *);
1935c1d0199Sgd78059 static int		dmfe_m_promisc(void *, boolean_t);
1945c1d0199Sgd78059 static int		dmfe_m_multicst(void *, boolean_t, const uint8_t *);
1955c1d0199Sgd78059 static int		dmfe_m_unicst(void *, const uint8_t *);
1965c1d0199Sgd78059 static void		dmfe_m_ioctl(void *, queue_t *, mblk_t *);
1975c1d0199Sgd78059 static mblk_t		*dmfe_m_tx(void *, mblk_t *);
1985c1d0199Sgd78059 static int 		dmfe_m_stat(void *, uint_t, uint64_t *);
199bdb9230aSGarrett D'Amore static int		dmfe_m_getprop(void *, const char *, mac_prop_id_t,
200*0dc2366fSVenugopal Iyer     uint_t, void *);
201bdb9230aSGarrett D'Amore static int		dmfe_m_setprop(void *, const char *, mac_prop_id_t,
202bdb9230aSGarrett D'Amore     uint_t,  const void *);
203*0dc2366fSVenugopal Iyer static void		dmfe_m_propinfo(void *, const char *, mac_prop_id_t,
204*0dc2366fSVenugopal Iyer     mac_prop_info_handle_t);
2055c1d0199Sgd78059 
2065c1d0199Sgd78059 static mac_callbacks_t dmfe_m_callbacks = {
207*0dc2366fSVenugopal Iyer 	MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
2085c1d0199Sgd78059 	dmfe_m_stat,
2095c1d0199Sgd78059 	dmfe_m_start,
2105c1d0199Sgd78059 	dmfe_m_stop,
2115c1d0199Sgd78059 	dmfe_m_promisc,
2125c1d0199Sgd78059 	dmfe_m_multicst,
2135c1d0199Sgd78059 	dmfe_m_unicst,
2145c1d0199Sgd78059 	dmfe_m_tx,
215*0dc2366fSVenugopal Iyer 	NULL,
2165c1d0199Sgd78059 	dmfe_m_ioctl,
217bdb9230aSGarrett D'Amore 	NULL,	/* getcapab */
218bdb9230aSGarrett D'Amore 	NULL,	/* open */
219bdb9230aSGarrett D'Amore 	NULL,	/* close */
220bdb9230aSGarrett D'Amore 	dmfe_m_setprop,
221*0dc2366fSVenugopal Iyer 	dmfe_m_getprop,
222*0dc2366fSVenugopal Iyer 	dmfe_m_propinfo
2235c1d0199Sgd78059 };
2245c1d0199Sgd78059 
2255c1d0199Sgd78059 
2265c1d0199Sgd78059 /*
2275c1d0199Sgd78059  * Describes the chip's DMA engine
2285c1d0199Sgd78059  */
2295c1d0199Sgd78059 static ddi_dma_attr_t dma_attr = {
2305c1d0199Sgd78059 	DMA_ATTR_V0,		/* dma_attr version */
2315c1d0199Sgd78059 	0,			/* dma_attr_addr_lo */
2325c1d0199Sgd78059 	(uint32_t)0xFFFFFFFF,	/* dma_attr_addr_hi */
2335c1d0199Sgd78059 	0x0FFFFFF,		/* dma_attr_count_max */
2345c1d0199Sgd78059 	0x20,			/* dma_attr_align */
2355c1d0199Sgd78059 	0x7F,			/* dma_attr_burstsizes */
2365c1d0199Sgd78059 	1,			/* dma_attr_minxfer */
2375c1d0199Sgd78059 	(uint32_t)0xFFFFFFFF,	/* dma_attr_maxxfer */
2385c1d0199Sgd78059 	(uint32_t)0xFFFFFFFF,	/* dma_attr_seg */
2395c1d0199Sgd78059 	1,			/* dma_attr_sgllen */
2405c1d0199Sgd78059 	1,			/* dma_attr_granular */
2415c1d0199Sgd78059 	0			/* dma_attr_flags */
2425c1d0199Sgd78059 };
2435c1d0199Sgd78059 
2445c1d0199Sgd78059 /*
2455c1d0199Sgd78059  * DMA access attributes for registers and descriptors
2465c1d0199Sgd78059  */
2475c1d0199Sgd78059 static ddi_device_acc_attr_t dmfe_reg_accattr = {
2485c1d0199Sgd78059 	DDI_DEVICE_ATTR_V0,
2495c1d0199Sgd78059 	DDI_STRUCTURE_LE_ACC,
2505c1d0199Sgd78059 	DDI_STRICTORDER_ACC
2515c1d0199Sgd78059 };
2525c1d0199Sgd78059 
2535c1d0199Sgd78059 /*
2545c1d0199Sgd78059  * DMA access attributes for data: NOT to be byte swapped.
2555c1d0199Sgd78059  */
2565c1d0199Sgd78059 static ddi_device_acc_attr_t dmfe_data_accattr = {
2575c1d0199Sgd78059 	DDI_DEVICE_ATTR_V0,
2585c1d0199Sgd78059 	DDI_NEVERSWAP_ACC,
2595c1d0199Sgd78059 	DDI_STRICTORDER_ACC
2605c1d0199Sgd78059 };
2615c1d0199Sgd78059 
2625c1d0199Sgd78059 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = {
2635c1d0199Sgd78059 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2645c1d0199Sgd78059 };
2655c1d0199Sgd78059 
2665c1d0199Sgd78059 
2675c1d0199Sgd78059 /*
2685c1d0199Sgd78059  * ========== Lowest-level chip register & ring access routines ==========
2695c1d0199Sgd78059  */
2705c1d0199Sgd78059 
2715c1d0199Sgd78059 /*
2725c1d0199Sgd78059  * I/O register get/put routines
2735c1d0199Sgd78059  */
2745c1d0199Sgd78059 uint32_t
dmfe_chip_get32(dmfe_t * dmfep,off_t offset)2755c1d0199Sgd78059 dmfe_chip_get32(dmfe_t *dmfep, off_t offset)
2765c1d0199Sgd78059 {
27722eb7cb5Sgd78059 	uint32_t *addr;
2785c1d0199Sgd78059 
27922eb7cb5Sgd78059 	addr = (void *)(dmfep->io_reg + offset);
28022eb7cb5Sgd78059 	return (ddi_get32(dmfep->io_handle, addr));
2815c1d0199Sgd78059 }
2825c1d0199Sgd78059 
2835c1d0199Sgd78059 void
dmfe_chip_put32(dmfe_t * dmfep,off_t offset,uint32_t value)2845c1d0199Sgd78059 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value)
2855c1d0199Sgd78059 {
28622eb7cb5Sgd78059 	uint32_t *addr;
2875c1d0199Sgd78059 
28822eb7cb5Sgd78059 	addr = (void *)(dmfep->io_reg + offset);
28922eb7cb5Sgd78059 	ddi_put32(dmfep->io_handle, addr, value);
2905c1d0199Sgd78059 }
2915c1d0199Sgd78059 
2925c1d0199Sgd78059 /*
2935c1d0199Sgd78059  * TX/RX ring get/put routines
2945c1d0199Sgd78059  */
2955c1d0199Sgd78059 static uint32_t
dmfe_ring_get32(dma_area_t * dma_p,uint_t index,uint_t offset)2965c1d0199Sgd78059 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset)
2975c1d0199Sgd78059 {
2985c1d0199Sgd78059 	uint32_t *addr;
2995c1d0199Sgd78059 
30022eb7cb5Sgd78059 	addr = (void *)dma_p->mem_va;
3015c1d0199Sgd78059 	return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset));
3025c1d0199Sgd78059 }
3035c1d0199Sgd78059 
3045c1d0199Sgd78059 static void
dmfe_ring_put32(dma_area_t * dma_p,uint_t index,uint_t offset,uint32_t value)3055c1d0199Sgd78059 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value)
3065c1d0199Sgd78059 {
3075c1d0199Sgd78059 	uint32_t *addr;
3085c1d0199Sgd78059 
30922eb7cb5Sgd78059 	addr = (void *)dma_p->mem_va;
3105c1d0199Sgd78059 	ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value);
3115c1d0199Sgd78059 }
3125c1d0199Sgd78059 
3135c1d0199Sgd78059 /*
3145c1d0199Sgd78059  * Setup buffer get/put routines
3155c1d0199Sgd78059  */
3165c1d0199Sgd78059 static uint32_t
dmfe_setup_get32(dma_area_t * dma_p,uint_t index)3175c1d0199Sgd78059 dmfe_setup_get32(dma_area_t *dma_p, uint_t index)
3185c1d0199Sgd78059 {
3195c1d0199Sgd78059 	uint32_t *addr;
3205c1d0199Sgd78059 
32122eb7cb5Sgd78059 	addr = (void *)dma_p->setup_va;
3225c1d0199Sgd78059 	return (ddi_get32(dma_p->acc_hdl, addr + index));
3235c1d0199Sgd78059 }
3245c1d0199Sgd78059 
3255c1d0199Sgd78059 static void
dmfe_setup_put32(dma_area_t * dma_p,uint_t index,uint32_t value)3265c1d0199Sgd78059 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value)
3275c1d0199Sgd78059 {
3285c1d0199Sgd78059 	uint32_t *addr;
3295c1d0199Sgd78059 
33022eb7cb5Sgd78059 	addr = (void *)dma_p->setup_va;
3315c1d0199Sgd78059 	ddi_put32(dma_p->acc_hdl, addr + index, value);
3325c1d0199Sgd78059 }
3335c1d0199Sgd78059 
3345c1d0199Sgd78059 
3355c1d0199Sgd78059 /*
3365c1d0199Sgd78059  * ========== Low-level chip & ring buffer manipulation ==========
3375c1d0199Sgd78059  */
3385c1d0199Sgd78059 
3395c1d0199Sgd78059 /*
3405c1d0199Sgd78059  * dmfe_set_opmode() -- function to set operating mode
3415c1d0199Sgd78059  */
3425c1d0199Sgd78059 static void
dmfe_set_opmode(dmfe_t * dmfep)3435c1d0199Sgd78059 dmfe_set_opmode(dmfe_t *dmfep)
3445c1d0199Sgd78059 {
3455c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
3465c1d0199Sgd78059 
3475c1d0199Sgd78059 	dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode);
3485c1d0199Sgd78059 	drv_usecwait(10);
3495c1d0199Sgd78059 }
3505c1d0199Sgd78059 
3515c1d0199Sgd78059 /*
3525c1d0199Sgd78059  * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w
3535c1d0199Sgd78059  */
3545c1d0199Sgd78059 static void
dmfe_stop_chip(dmfe_t * dmfep,enum chip_state newstate)3555c1d0199Sgd78059 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate)
3565c1d0199Sgd78059 {
3575c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
3585c1d0199Sgd78059 
3595c1d0199Sgd78059 	/*
3605c1d0199Sgd78059 	 * Stop the chip:
3615c1d0199Sgd78059 	 *	disable all interrupts
3625c1d0199Sgd78059 	 *	stop TX/RX processes
3635c1d0199Sgd78059 	 *	clear the status bits for TX/RX stopped
3645c1d0199Sgd78059 	 * If required, reset the chip
3655c1d0199Sgd78059 	 * Record the new state
3665c1d0199Sgd78059 	 */
3675c1d0199Sgd78059 	dmfe_chip_put32(dmfep, INT_MASK_REG, 0);
3685c1d0199Sgd78059 	dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE);
3695c1d0199Sgd78059 	dmfe_set_opmode(dmfep);
3705c1d0199Sgd78059 	dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
3715c1d0199Sgd78059 
3725c1d0199Sgd78059 	switch (newstate) {
3735c1d0199Sgd78059 	default:
3745c1d0199Sgd78059 		ASSERT(!"can't get here");
3755c1d0199Sgd78059 		return;
3765c1d0199Sgd78059 
3775c1d0199Sgd78059 	case CHIP_STOPPED:
3785c1d0199Sgd78059 	case CHIP_ERROR:
3795c1d0199Sgd78059 		break;
3805c1d0199Sgd78059 
3815c1d0199Sgd78059 	case CHIP_RESET:
3825c1d0199Sgd78059 		dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET);
3835c1d0199Sgd78059 		drv_usecwait(10);
3845c1d0199Sgd78059 		dmfe_chip_put32(dmfep, BUS_MODE_REG, 0);
3855c1d0199Sgd78059 		drv_usecwait(10);
3865c1d0199Sgd78059 		dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes);
3875c1d0199Sgd78059 		break;
3885c1d0199Sgd78059 	}
3895c1d0199Sgd78059 
3905c1d0199Sgd78059 	dmfep->chip_state = newstate;
3915c1d0199Sgd78059 }
3925c1d0199Sgd78059 
3935c1d0199Sgd78059 /*
3945c1d0199Sgd78059  * Initialize transmit and receive descriptor rings, and
3955c1d0199Sgd78059  * set the chip to point to the first entry in each ring
3965c1d0199Sgd78059  */
3975c1d0199Sgd78059 static void
dmfe_init_rings(dmfe_t * dmfep)3985c1d0199Sgd78059 dmfe_init_rings(dmfe_t *dmfep)
3995c1d0199Sgd78059 {
4005c1d0199Sgd78059 	dma_area_t *descp;
4015c1d0199Sgd78059 	uint32_t pstart;
4025c1d0199Sgd78059 	uint32_t pnext;
4035c1d0199Sgd78059 	uint32_t pbuff;
4045c1d0199Sgd78059 	uint32_t desc1;
4055c1d0199Sgd78059 	int i;
4065c1d0199Sgd78059 
4075c1d0199Sgd78059 	/*
4085c1d0199Sgd78059 	 * You need all the locks in order to rewrite the descriptor rings
4095c1d0199Sgd78059 	 */
4105c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
4115c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->rxlock));
4125c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
4135c1d0199Sgd78059 
4145c1d0199Sgd78059 	/*
4155c1d0199Sgd78059 	 * Program the RX ring entries
4165c1d0199Sgd78059 	 */
4175c1d0199Sgd78059 	descp = &dmfep->rx_desc;
4185c1d0199Sgd78059 	pstart = descp->mem_dvma;
4195c1d0199Sgd78059 	pnext = pstart + sizeof (struct rx_desc_type);
4205c1d0199Sgd78059 	pbuff = dmfep->rx_buff.mem_dvma;
4215c1d0199Sgd78059 	desc1 = RX_CHAINING | DMFE_BUF_SIZE_1;
4225c1d0199Sgd78059 
4235c1d0199Sgd78059 	for (i = 0; i < dmfep->rx.n_desc; ++i) {
4245c1d0199Sgd78059 		dmfe_ring_put32(descp, i, RD_NEXT, pnext);
4255c1d0199Sgd78059 		dmfe_ring_put32(descp, i, BUFFER1, pbuff);
4265c1d0199Sgd78059 		dmfe_ring_put32(descp, i, DESC1, desc1);
4275c1d0199Sgd78059 		dmfe_ring_put32(descp, i, DESC0, RX_OWN);
4285c1d0199Sgd78059 
4295c1d0199Sgd78059 		pnext += sizeof (struct rx_desc_type);
4305c1d0199Sgd78059 		pbuff += DMFE_BUF_SIZE;
4315c1d0199Sgd78059 	}
4325c1d0199Sgd78059 
4335c1d0199Sgd78059 	/*
4345c1d0199Sgd78059 	 * Fix up last entry & sync
4355c1d0199Sgd78059 	 */
4365c1d0199Sgd78059 	dmfe_ring_put32(descp, --i, RD_NEXT, pstart);
4375c1d0199Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
4385c1d0199Sgd78059 	dmfep->rx.next_free = 0;
4395c1d0199Sgd78059 
4405c1d0199Sgd78059 	/*
4415c1d0199Sgd78059 	 * Set the base address of the RX descriptor list in CSR3
4425c1d0199Sgd78059 	 */
4435c1d0199Sgd78059 	dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma);
4445c1d0199Sgd78059 
4455c1d0199Sgd78059 	/*
4465c1d0199Sgd78059 	 * Program the TX ring entries
4475c1d0199Sgd78059 	 */
4485c1d0199Sgd78059 	descp = &dmfep->tx_desc;
4495c1d0199Sgd78059 	pstart = descp->mem_dvma;
4505c1d0199Sgd78059 	pnext = pstart + sizeof (struct tx_desc_type);
4515c1d0199Sgd78059 	pbuff = dmfep->tx_buff.mem_dvma;
4525c1d0199Sgd78059 	desc1 = TX_CHAINING;
4535c1d0199Sgd78059 
4545c1d0199Sgd78059 	for (i = 0; i < dmfep->tx.n_desc; ++i) {
4555c1d0199Sgd78059 		dmfe_ring_put32(descp, i, TD_NEXT, pnext);
4565c1d0199Sgd78059 		dmfe_ring_put32(descp, i, BUFFER1, pbuff);
4575c1d0199Sgd78059 		dmfe_ring_put32(descp, i, DESC1, desc1);
4585c1d0199Sgd78059 		dmfe_ring_put32(descp, i, DESC0, 0);
4595c1d0199Sgd78059 
4605c1d0199Sgd78059 		pnext += sizeof (struct tx_desc_type);
4615c1d0199Sgd78059 		pbuff += DMFE_BUF_SIZE;
4625c1d0199Sgd78059 	}
4635c1d0199Sgd78059 
4645c1d0199Sgd78059 	/*
4655c1d0199Sgd78059 	 * Fix up last entry & sync
4665c1d0199Sgd78059 	 */
4675c1d0199Sgd78059 	dmfe_ring_put32(descp, --i, TD_NEXT, pstart);
4685c1d0199Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
4695c1d0199Sgd78059 	dmfep->tx.n_free = dmfep->tx.n_desc;
4705c1d0199Sgd78059 	dmfep->tx.next_free = dmfep->tx.next_busy = 0;
4715c1d0199Sgd78059 
4725c1d0199Sgd78059 	/*
4735c1d0199Sgd78059 	 * Set the base address of the TX descrptor list in CSR4
4745c1d0199Sgd78059 	 */
4755c1d0199Sgd78059 	dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma);
4765c1d0199Sgd78059 }
4775c1d0199Sgd78059 
4785c1d0199Sgd78059 /*
4795c1d0199Sgd78059  * dmfe_start_chip() -- start the chip transmitting and/or receiving
4805c1d0199Sgd78059  */
4815c1d0199Sgd78059 static void
dmfe_start_chip(dmfe_t * dmfep,int mode)4825c1d0199Sgd78059 dmfe_start_chip(dmfe_t *dmfep, int mode)
4835c1d0199Sgd78059 {
4845c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
4855c1d0199Sgd78059 
4865c1d0199Sgd78059 	dmfep->opmode |= mode;
4875c1d0199Sgd78059 	dmfe_set_opmode(dmfep);
4885c1d0199Sgd78059 
4895c1d0199Sgd78059 	dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0);
4905c1d0199Sgd78059 	/*
4915c1d0199Sgd78059 	 * Enable VLAN length mode (allows packets to be 4 bytes Longer).
4925c1d0199Sgd78059 	 */
4935c1d0199Sgd78059 	dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE);
4945c1d0199Sgd78059 
4955c1d0199Sgd78059 	/*
4965c1d0199Sgd78059 	 * Clear any pending process-stopped interrupts
4975c1d0199Sgd78059 	 */
4985c1d0199Sgd78059 	dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
4995c1d0199Sgd78059 	dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX :
5005c1d0199Sgd78059 	    mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED;
5015c1d0199Sgd78059 }
5025c1d0199Sgd78059 
5035c1d0199Sgd78059 /*
5045c1d0199Sgd78059  * dmfe_enable_interrupts() -- enable our favourite set of interrupts.
5055c1d0199Sgd78059  *
5065c1d0199Sgd78059  * Normal interrupts:
5075c1d0199Sgd78059  *	We always enable:
5085c1d0199Sgd78059  *		RX_PKTDONE_INT		(packet received)
5095c1d0199Sgd78059  *		TX_PKTDONE_INT		(TX complete)
5105c1d0199Sgd78059  *	We never enable:
5115c1d0199Sgd78059  *		TX_ALLDONE_INT		(next TX buffer not ready)
5125c1d0199Sgd78059  *
5135c1d0199Sgd78059  * Abnormal interrupts:
5145c1d0199Sgd78059  *	We always enable:
5155c1d0199Sgd78059  *		RX_STOPPED_INT
5165c1d0199Sgd78059  *		TX_STOPPED_INT
5175c1d0199Sgd78059  *		SYSTEM_ERR_INT
5185c1d0199Sgd78059  *		RX_UNAVAIL_INT
5195c1d0199Sgd78059  *	We never enable:
5205c1d0199Sgd78059  *		RX_EARLY_INT
5215c1d0199Sgd78059  *		RX_WATCHDOG_INT
5225c1d0199Sgd78059  *		TX_JABBER_INT
5235c1d0199Sgd78059  *		TX_EARLY_INT
5245c1d0199Sgd78059  *		TX_UNDERFLOW_INT
5255c1d0199Sgd78059  *		GP_TIMER_INT		(not valid in -9 chips)
5265c1d0199Sgd78059  *		LINK_STATUS_INT		(not valid in -9 chips)
5275c1d0199Sgd78059  */
5285c1d0199Sgd78059 static void
dmfe_enable_interrupts(dmfe_t * dmfep)5295c1d0199Sgd78059 dmfe_enable_interrupts(dmfe_t *dmfep)
5305c1d0199Sgd78059 {
5315c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
5325c1d0199Sgd78059 
5335c1d0199Sgd78059 	/*
5345c1d0199Sgd78059 	 * Put 'the standard set of interrupts' in the interrupt mask register
5355c1d0199Sgd78059 	 */
5365c1d0199Sgd78059 	dmfep->imask =	RX_PKTDONE_INT | TX_PKTDONE_INT |
5375c1d0199Sgd78059 	    RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT;
5385c1d0199Sgd78059 
5395c1d0199Sgd78059 	dmfe_chip_put32(dmfep, INT_MASK_REG,
5405c1d0199Sgd78059 	    NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask);
5415c1d0199Sgd78059 	dmfep->chip_state = CHIP_RUNNING;
5425c1d0199Sgd78059 }
5435c1d0199Sgd78059 
5445c1d0199Sgd78059 /*
5455c1d0199Sgd78059  * ========== RX side routines ==========
5465c1d0199Sgd78059  */
5475c1d0199Sgd78059 
5485c1d0199Sgd78059 /*
5495c1d0199Sgd78059  * Function to update receive statistics on various errors
5505c1d0199Sgd78059  */
5515c1d0199Sgd78059 static void
dmfe_update_rx_stats(dmfe_t * dmfep,uint32_t desc0)5525c1d0199Sgd78059 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0)
5535c1d0199Sgd78059 {
5545c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->rxlock));
5555c1d0199Sgd78059 
5565c1d0199Sgd78059 	/*
5575c1d0199Sgd78059 	 * The error summary bit and the error bits that it summarises
5585c1d0199Sgd78059 	 * are only valid if this is the last fragment.  Therefore, a
5595c1d0199Sgd78059 	 * fragment only contributes to the error statistics if both
5605c1d0199Sgd78059 	 * the last-fragment and error summary bits are set.
5615c1d0199Sgd78059 	 */
5625c1d0199Sgd78059 	if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) {
5635c1d0199Sgd78059 		dmfep->rx_stats_ierrors += 1;
5645c1d0199Sgd78059 
5655c1d0199Sgd78059 		/*
5665c1d0199Sgd78059 		 * There are some other error bits in the descriptor for
5675c1d0199Sgd78059 		 * which there don't seem to be appropriate MAC statistics,
5685c1d0199Sgd78059 		 * notably RX_COLLISION and perhaps RX_DESC_ERR.  The
5695c1d0199Sgd78059 		 * latter may not be possible if it is supposed to indicate
5705c1d0199Sgd78059 		 * that one buffer has been filled with a partial packet
5715c1d0199Sgd78059 		 * and the next buffer required for the rest of the packet
5725c1d0199Sgd78059 		 * was not available, as all our buffers are more than large
5735c1d0199Sgd78059 		 * enough for a whole packet without fragmenting.
5745c1d0199Sgd78059 		 */
5755c1d0199Sgd78059 
5765c1d0199Sgd78059 		if (desc0 & RX_OVERFLOW) {
5775c1d0199Sgd78059 			dmfep->rx_stats_overflow += 1;
5785c1d0199Sgd78059 
5795c1d0199Sgd78059 		} else if (desc0 & RX_RUNT_FRAME)
5805c1d0199Sgd78059 			dmfep->rx_stats_short += 1;
5815c1d0199Sgd78059 
5825c1d0199Sgd78059 		if (desc0 & RX_CRC)
5835c1d0199Sgd78059 			dmfep->rx_stats_fcs += 1;
5845c1d0199Sgd78059 
5855c1d0199Sgd78059 		if (desc0 & RX_FRAME2LONG)
5865c1d0199Sgd78059 			dmfep->rx_stats_toolong += 1;
5875c1d0199Sgd78059 	}
5885c1d0199Sgd78059 
5895c1d0199Sgd78059 	/*
5905c1d0199Sgd78059 	 * A receive watchdog timeout is counted as a MAC-level receive
5915c1d0199Sgd78059 	 * error.  Strangely, it doesn't set the packet error summary bit,
5925c1d0199Sgd78059 	 * according to the chip data sheet :-?
5935c1d0199Sgd78059 	 */
5945c1d0199Sgd78059 	if (desc0 & RX_RCV_WD_TO)
5955c1d0199Sgd78059 		dmfep->rx_stats_macrcv_errors += 1;
5965c1d0199Sgd78059 
5975c1d0199Sgd78059 	if (desc0 & RX_DRIBBLING)
5985c1d0199Sgd78059 		dmfep->rx_stats_align += 1;
5995c1d0199Sgd78059 
6005c1d0199Sgd78059 	if (desc0 & RX_MII_ERR)
6015c1d0199Sgd78059 		dmfep->rx_stats_macrcv_errors += 1;
6025c1d0199Sgd78059 }
6035c1d0199Sgd78059 
6045c1d0199Sgd78059 /*
6055c1d0199Sgd78059  * Receive incoming packet(s) and pass them up ...
6065c1d0199Sgd78059  */
6075c1d0199Sgd78059 static mblk_t *
dmfe_getp(dmfe_t * dmfep)6085c1d0199Sgd78059 dmfe_getp(dmfe_t *dmfep)
6095c1d0199Sgd78059 {
6105c1d0199Sgd78059 	dma_area_t *descp;
6115c1d0199Sgd78059 	mblk_t **tail;
6125c1d0199Sgd78059 	mblk_t *head;
6135c1d0199Sgd78059 	mblk_t *mp;
6145c1d0199Sgd78059 	char *rxb;
6155c1d0199Sgd78059 	uchar_t *dp;
6165c1d0199Sgd78059 	uint32_t desc0;
6175c1d0199Sgd78059 	uint32_t misses;
6185c1d0199Sgd78059 	int packet_length;
6195c1d0199Sgd78059 	int index;
6205c1d0199Sgd78059 
6215c1d0199Sgd78059 	mutex_enter(dmfep->rxlock);
6225c1d0199Sgd78059 
6235c1d0199Sgd78059 	/*
6245c1d0199Sgd78059 	 * Update the missed frame statistic from the on-chip counter.
6255c1d0199Sgd78059 	 */
6265c1d0199Sgd78059 	misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG);
6275c1d0199Sgd78059 	dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK);
6285c1d0199Sgd78059 
6295c1d0199Sgd78059 	/*
6305c1d0199Sgd78059 	 * sync (all) receive descriptors before inspecting them
6315c1d0199Sgd78059 	 */
6325c1d0199Sgd78059 	descp = &dmfep->rx_desc;
6335c1d0199Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
6345c1d0199Sgd78059 
6355c1d0199Sgd78059 	/*
6365c1d0199Sgd78059 	 * We should own at least one RX entry, since we've had a
6375c1d0199Sgd78059 	 * receive interrupt, but let's not be dogmatic about it.
6385c1d0199Sgd78059 	 */
6395c1d0199Sgd78059 	index = dmfep->rx.next_free;
6405c1d0199Sgd78059 	desc0 = dmfe_ring_get32(descp, index, DESC0);
6415c1d0199Sgd78059 
642bdb9230aSGarrett D'Amore 	DTRACE_PROBE1(rx__start, uint32_t, desc0);
6435c1d0199Sgd78059 	for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) {
6445c1d0199Sgd78059 		/*
6455c1d0199Sgd78059 		 * Maintain statistics for every descriptor returned
6465c1d0199Sgd78059 		 * to us by the chip ...
6475c1d0199Sgd78059 		 */
6485c1d0199Sgd78059 		dmfe_update_rx_stats(dmfep, desc0);
6495c1d0199Sgd78059 
6505c1d0199Sgd78059 		/*
6515c1d0199Sgd78059 		 * Check that the entry has both "packet start" and
6525c1d0199Sgd78059 		 * "packet end" flags.  We really shouldn't get packet
6535c1d0199Sgd78059 		 * fragments, 'cos all the RX buffers are bigger than
6545c1d0199Sgd78059 		 * the largest valid packet.  So we'll just drop any
6555c1d0199Sgd78059 		 * fragments we find & skip on to the next entry.
6565c1d0199Sgd78059 		 */
6575c1d0199Sgd78059 		if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) {
658bdb9230aSGarrett D'Amore 			DTRACE_PROBE1(rx__frag, uint32_t, desc0);
6595c1d0199Sgd78059 			goto skip;
6605c1d0199Sgd78059 		}
6615c1d0199Sgd78059 
6625c1d0199Sgd78059 		/*
6635c1d0199Sgd78059 		 * A whole packet in one buffer.  We have to check error
6645c1d0199Sgd78059 		 * status and packet length before forwarding it upstream.
6655c1d0199Sgd78059 		 */
6665c1d0199Sgd78059 		if (desc0 & RX_ERR_SUMMARY) {
667bdb9230aSGarrett D'Amore 			DTRACE_PROBE1(rx__err, uint32_t, desc0);
6685c1d0199Sgd78059 			goto skip;
6695c1d0199Sgd78059 		}
6705c1d0199Sgd78059 
6715c1d0199Sgd78059 		packet_length = (desc0 >> 16) & 0x3fff;
6725c1d0199Sgd78059 		if (packet_length > DMFE_MAX_PKT_SIZE) {
673bdb9230aSGarrett D'Amore 			DTRACE_PROBE1(rx__toobig, int, packet_length);
6745c1d0199Sgd78059 			goto skip;
6755c1d0199Sgd78059 		} else if (packet_length < ETHERMIN) {
6765c1d0199Sgd78059 			/*
6775c1d0199Sgd78059 			 * Note that VLAN packet would be even larger,
6785c1d0199Sgd78059 			 * but we don't worry about dropping runt VLAN
6795c1d0199Sgd78059 			 * frames.
6805c1d0199Sgd78059 			 *
6815c1d0199Sgd78059 			 * This check is probably redundant, as well,
6825c1d0199Sgd78059 			 * since the hardware should drop RUNT frames.
6835c1d0199Sgd78059 			 */
684bdb9230aSGarrett D'Amore 			DTRACE_PROBE1(rx__runt, int, packet_length);
6855c1d0199Sgd78059 			goto skip;
6865c1d0199Sgd78059 		}
6875c1d0199Sgd78059 
6885c1d0199Sgd78059 		/*
6895c1d0199Sgd78059 		 * Sync the data, so we can examine it; then check that
6905c1d0199Sgd78059 		 * the packet is really intended for us (remember that
6915c1d0199Sgd78059 		 * if we're using Imperfect Filtering, then the chip will
6925c1d0199Sgd78059 		 * receive unicast packets sent to stations whose addresses
6935c1d0199Sgd78059 		 * just happen to hash to the same value as our own; we
6945c1d0199Sgd78059 		 * discard these here so they don't get sent upstream ...)
6955c1d0199Sgd78059 		 */
6965c1d0199Sgd78059 		(void) ddi_dma_sync(dmfep->rx_buff.dma_hdl,
6975c1d0199Sgd78059 		    index * DMFE_BUF_SIZE, DMFE_BUF_SIZE,
6985c1d0199Sgd78059 		    DDI_DMA_SYNC_FORKERNEL);
6995c1d0199Sgd78059 		rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE];
7005c1d0199Sgd78059 
7015c1d0199Sgd78059 
7025c1d0199Sgd78059 		/*
7035c1d0199Sgd78059 		 * We do not bother to check that the packet is really for
7045c1d0199Sgd78059 		 * us, we let the MAC framework make that check instead.
7055c1d0199Sgd78059 		 * This is especially important if we ever want to support
7065c1d0199Sgd78059 		 * multiple MAC addresses.
7075c1d0199Sgd78059 		 */
7085c1d0199Sgd78059 
7095c1d0199Sgd78059 		/*
7105c1d0199Sgd78059 		 * Packet looks good; get a buffer to copy it into.  We
7115c1d0199Sgd78059 		 * allow some space at the front of the allocated buffer
7125c1d0199Sgd78059 		 * (HEADROOM) in case any upstream modules want to prepend
7135c1d0199Sgd78059 		 * some sort of header.  The value has been carefully chosen
7145c1d0199Sgd78059 		 * So that it also has the side-effect of making the packet
7155c1d0199Sgd78059 		 * *contents* 4-byte aligned, as required by NCA!
7165c1d0199Sgd78059 		 */
7175c1d0199Sgd78059 		mp = allocb(DMFE_HEADROOM + packet_length, 0);
7185c1d0199Sgd78059 		if (mp == NULL) {
719bdb9230aSGarrett D'Amore 			DTRACE_PROBE(rx__no__buf);
7205c1d0199Sgd78059 			dmfep->rx_stats_norcvbuf += 1;
7215c1d0199Sgd78059 			goto skip;
7225c1d0199Sgd78059 		}
7235c1d0199Sgd78059 
7245c1d0199Sgd78059 		/*
7255c1d0199Sgd78059 		 * Account for statistics of good packets.
7265c1d0199Sgd78059 		 */
7275c1d0199Sgd78059 		dmfep->rx_stats_ipackets += 1;
7285c1d0199Sgd78059 		dmfep->rx_stats_rbytes += packet_length;
7295c1d0199Sgd78059 		if (desc0 & RX_MULTI_FRAME) {
7305c1d0199Sgd78059 			if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) {
7315c1d0199Sgd78059 				dmfep->rx_stats_multi += 1;
7325c1d0199Sgd78059 			} else {
7335c1d0199Sgd78059 				dmfep->rx_stats_bcast += 1;
7345c1d0199Sgd78059 			}
7355c1d0199Sgd78059 		}
7365c1d0199Sgd78059 
7375c1d0199Sgd78059 		/*
7385c1d0199Sgd78059 		 * Copy the packet into the STREAMS buffer
7395c1d0199Sgd78059 		 */
7405c1d0199Sgd78059 		dp = mp->b_rptr += DMFE_HEADROOM;
7415c1d0199Sgd78059 		mp->b_cont = mp->b_next = NULL;
7425c1d0199Sgd78059 
7435c1d0199Sgd78059 		/*
7445c1d0199Sgd78059 		 * Don't worry about stripping the vlan tag, the MAC
7455c1d0199Sgd78059 		 * layer will take care of that for us.
7465c1d0199Sgd78059 		 */
7475c1d0199Sgd78059 		bcopy(rxb, dp, packet_length);
7485c1d0199Sgd78059 
7495c1d0199Sgd78059 		/*
7505c1d0199Sgd78059 		 * Fix up the packet length, and link it to the chain
7515c1d0199Sgd78059 		 */
7525c1d0199Sgd78059 		mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL;
7535c1d0199Sgd78059 		*tail = mp;
7545c1d0199Sgd78059 		tail = &mp->b_next;
7555c1d0199Sgd78059 
7565c1d0199Sgd78059 	skip:
7575c1d0199Sgd78059 		/*
7585c1d0199Sgd78059 		 * Return ownership of ring entry & advance to next
7595c1d0199Sgd78059 		 */
7605c1d0199Sgd78059 		dmfe_ring_put32(descp, index, DESC0, RX_OWN);
7615c1d0199Sgd78059 		index = NEXT(index, dmfep->rx.n_desc);
7625c1d0199Sgd78059 		desc0 = dmfe_ring_get32(descp, index, DESC0);
7635c1d0199Sgd78059 	}
7645c1d0199Sgd78059 
7655c1d0199Sgd78059 	/*
7665c1d0199Sgd78059 	 * Remember where to start looking next time ...
7675c1d0199Sgd78059 	 */
7685c1d0199Sgd78059 	dmfep->rx.next_free = index;
7695c1d0199Sgd78059 
7705c1d0199Sgd78059 	/*
7715c1d0199Sgd78059 	 * sync the receive descriptors that we've given back
7725c1d0199Sgd78059 	 * (actually, we sync all of them for simplicity), and
7735c1d0199Sgd78059 	 * wake the chip in case it had suspended receive
7745c1d0199Sgd78059 	 */
7755c1d0199Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
7765c1d0199Sgd78059 	dmfe_chip_put32(dmfep, RX_POLL_REG, 0);
7775c1d0199Sgd78059 
7785c1d0199Sgd78059 	mutex_exit(dmfep->rxlock);
7795c1d0199Sgd78059 	return (head);
7805c1d0199Sgd78059 }
7815c1d0199Sgd78059 
7825c1d0199Sgd78059 /*
7835c1d0199Sgd78059  * ========== Primary TX side routines ==========
7845c1d0199Sgd78059  */
7855c1d0199Sgd78059 
7865c1d0199Sgd78059 /*
7875c1d0199Sgd78059  *	TX ring management:
7885c1d0199Sgd78059  *
7895c1d0199Sgd78059  *	There are <tx.n_desc> entries in the ring, of which those from
7905c1d0199Sgd78059  *	<tx.next_free> round to but not including <tx.next_busy> must
7915c1d0199Sgd78059  *	be owned by the CPU.  The number of such entries should equal
7925c1d0199Sgd78059  *	<tx.n_free>; but there may also be some more entries which the
7935c1d0199Sgd78059  *	chip has given back but which we haven't yet accounted for.
7945c1d0199Sgd78059  *	The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts
7955c1d0199Sgd78059  *	as it discovers such entries.
7965c1d0199Sgd78059  *
7975c1d0199Sgd78059  *	Initially, or when the ring is entirely free:
7985c1d0199Sgd78059  *		C = Owned by CPU
7995c1d0199Sgd78059  *		D = Owned by Davicom (DMFE) chip
8005c1d0199Sgd78059  *
8015c1d0199Sgd78059  *	tx.next_free					tx.n_desc = 16
8025c1d0199Sgd78059  *	  |
8035c1d0199Sgd78059  *	  v
8045c1d0199Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8055c1d0199Sgd78059  *	| C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C |
8065c1d0199Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8075c1d0199Sgd78059  *	  ^
8085c1d0199Sgd78059  *	  |
8095c1d0199Sgd78059  *	tx.next_busy					tx.n_free = 16
8105c1d0199Sgd78059  *
8115c1d0199Sgd78059  *	On entry to reclaim() during normal use:
8125c1d0199Sgd78059  *
8135c1d0199Sgd78059  *					tx.next_free	tx.n_desc = 16
8145c1d0199Sgd78059  *					      |
8155c1d0199Sgd78059  *					      v
8165c1d0199Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8175c1d0199Sgd78059  *	| C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
8185c1d0199Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8195c1d0199Sgd78059  *		  ^
8205c1d0199Sgd78059  *		  |
8215c1d0199Sgd78059  *		tx.next_busy				tx.n_free = 9
8225c1d0199Sgd78059  *
8235c1d0199Sgd78059  *	On exit from reclaim():
8245c1d0199Sgd78059  *
8255c1d0199Sgd78059  *					tx.next_free	tx.n_desc = 16
8265c1d0199Sgd78059  *					      |
8275c1d0199Sgd78059  *					      v
8285c1d0199Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8295c1d0199Sgd78059  *	| C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
8305c1d0199Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8315c1d0199Sgd78059  *				  ^
8325c1d0199Sgd78059  *				  |
8335c1d0199Sgd78059  *			     tx.next_busy		tx.n_free = 13
8345c1d0199Sgd78059  *
8355c1d0199Sgd78059  *	The ring is considered "full" when only one entry is owned by
8365c1d0199Sgd78059  *	the CPU; thus <tx.n_free> should always be >= 1.
8375c1d0199Sgd78059  *
8385c1d0199Sgd78059  *			tx.next_free			tx.n_desc = 16
8395c1d0199Sgd78059  *			      |
8405c1d0199Sgd78059  *			      v
8415c1d0199Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8425c1d0199Sgd78059  *	| D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D |
8435c1d0199Sgd78059  *	+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
8445c1d0199Sgd78059  *				  ^
8455c1d0199Sgd78059  *				  |
8465c1d0199Sgd78059  *			     tx.next_busy		tx.n_free = 1
8475c1d0199Sgd78059  */
8485c1d0199Sgd78059 
8495c1d0199Sgd78059 /*
8505c1d0199Sgd78059  * Function to update transmit statistics on various errors
8515c1d0199Sgd78059  */
8525c1d0199Sgd78059 static void
dmfe_update_tx_stats(dmfe_t * dmfep,int index,uint32_t desc0,uint32_t desc1)8535c1d0199Sgd78059 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1)
8545c1d0199Sgd78059 {
8555c1d0199Sgd78059 	uint32_t collisions;
8565c1d0199Sgd78059 	uint32_t errbits;
8575c1d0199Sgd78059 	uint32_t errsum;
8585c1d0199Sgd78059 
8595c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
8605c1d0199Sgd78059 
8615c1d0199Sgd78059 	collisions = ((desc0 >> 3) & 0x0f);
8625c1d0199Sgd78059 	errsum = desc0 & TX_ERR_SUMMARY;
8635c1d0199Sgd78059 	errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS |
8645c1d0199Sgd78059 	    TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO);
8655c1d0199Sgd78059 	if ((errsum == 0) != (errbits == 0)) {
8665c1d0199Sgd78059 		dmfe_log(dmfep, "dubious TX error status 0x%x", desc0);
8675c1d0199Sgd78059 		desc0 |= TX_ERR_SUMMARY;
8685c1d0199Sgd78059 	}
8695c1d0199Sgd78059 
8705c1d0199Sgd78059 	if (desc0 & TX_ERR_SUMMARY) {
8715c1d0199Sgd78059 		dmfep->tx_stats_oerrors += 1;
8725c1d0199Sgd78059 
8735c1d0199Sgd78059 		/*
8745c1d0199Sgd78059 		 * If we ever see a transmit jabber timeout, we count it
8755c1d0199Sgd78059 		 * as a MAC-level transmit error; but we probably won't
8765c1d0199Sgd78059 		 * see it as it causes an Abnormal interrupt and we reset
8775c1d0199Sgd78059 		 * the chip in order to recover
8785c1d0199Sgd78059 		 */
8795c1d0199Sgd78059 		if (desc0 & TX_JABBER_TO) {
8805c1d0199Sgd78059 			dmfep->tx_stats_macxmt_errors += 1;
8815c1d0199Sgd78059 			dmfep->tx_stats_jabber += 1;
8825c1d0199Sgd78059 		}
8835c1d0199Sgd78059 
8845c1d0199Sgd78059 		if (desc0 & TX_UNDERFLOW)
8855c1d0199Sgd78059 			dmfep->tx_stats_underflow += 1;
8865c1d0199Sgd78059 		else if (desc0 & TX_LATE_COLL)
8875c1d0199Sgd78059 			dmfep->tx_stats_xmtlatecoll += 1;
8885c1d0199Sgd78059 
8895c1d0199Sgd78059 		if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER))
8905c1d0199Sgd78059 			dmfep->tx_stats_nocarrier += 1;
8915c1d0199Sgd78059 
8925c1d0199Sgd78059 		if (desc0 & TX_EXCESS_COLL) {
8935c1d0199Sgd78059 			dmfep->tx_stats_excoll += 1;
8945c1d0199Sgd78059 			collisions = 16;
8955c1d0199Sgd78059 		}
8965c1d0199Sgd78059 	} else {
8975c1d0199Sgd78059 		int	bit = index % NBBY;
8985c1d0199Sgd78059 		int	byt = index / NBBY;
8995c1d0199Sgd78059 
9005c1d0199Sgd78059 		if (dmfep->tx_mcast[byt] & bit) {
9015c1d0199Sgd78059 			dmfep->tx_mcast[byt] &= ~bit;
9025c1d0199Sgd78059 			dmfep->tx_stats_multi += 1;
9035c1d0199Sgd78059 
9045c1d0199Sgd78059 		} else if (dmfep->tx_bcast[byt] & bit) {
9055c1d0199Sgd78059 			dmfep->tx_bcast[byt] &= ~bit;
9065c1d0199Sgd78059 			dmfep->tx_stats_bcast += 1;
9075c1d0199Sgd78059 		}
9085c1d0199Sgd78059 
9095c1d0199Sgd78059 		dmfep->tx_stats_opackets += 1;
9105c1d0199Sgd78059 		dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1;
9115c1d0199Sgd78059 	}
9125c1d0199Sgd78059 
9135c1d0199Sgd78059 	if (collisions == 1)
9145c1d0199Sgd78059 		dmfep->tx_stats_first_coll += 1;
9155c1d0199Sgd78059 	else if (collisions != 0)
9165c1d0199Sgd78059 		dmfep->tx_stats_multi_coll += 1;
9175c1d0199Sgd78059 	dmfep->tx_stats_collisions += collisions;
9185c1d0199Sgd78059 
9195c1d0199Sgd78059 	if (desc0 & TX_DEFERRED)
9205c1d0199Sgd78059 		dmfep->tx_stats_defer += 1;
9215c1d0199Sgd78059 }
9225c1d0199Sgd78059 
9235c1d0199Sgd78059 /*
9245c1d0199Sgd78059  * Reclaim all the ring entries that the chip has returned to us ...
9255c1d0199Sgd78059  *
9265c1d0199Sgd78059  * Returns B_FALSE if no entries could be reclaimed.  Otherwise, reclaims
9275c1d0199Sgd78059  * as many as possible, restarts the TX stall timeout, and returns B_TRUE.
9285c1d0199Sgd78059  */
9295c1d0199Sgd78059 static boolean_t
dmfe_reclaim_tx_desc(dmfe_t * dmfep)9305c1d0199Sgd78059 dmfe_reclaim_tx_desc(dmfe_t *dmfep)
9315c1d0199Sgd78059 {
9325c1d0199Sgd78059 	dma_area_t *descp;
9335c1d0199Sgd78059 	uint32_t desc0;
9345c1d0199Sgd78059 	uint32_t desc1;
9355c1d0199Sgd78059 	int i;
9365c1d0199Sgd78059 
9375c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
9385c1d0199Sgd78059 
9395c1d0199Sgd78059 	/*
9405c1d0199Sgd78059 	 * sync transmit descriptor ring before looking at it
9415c1d0199Sgd78059 	 */
9425c1d0199Sgd78059 	descp = &dmfep->tx_desc;
9435c1d0199Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
9445c1d0199Sgd78059 
9455c1d0199Sgd78059 	/*
9465c1d0199Sgd78059 	 * Early exit if there are no descriptors to reclaim, either
9475c1d0199Sgd78059 	 * because they're all reclaimed already, or because the next
9485c1d0199Sgd78059 	 * one is still owned by the chip ...
9495c1d0199Sgd78059 	 */
9505c1d0199Sgd78059 	i = dmfep->tx.next_busy;
9515c1d0199Sgd78059 	if (i == dmfep->tx.next_free)
9525c1d0199Sgd78059 		return (B_FALSE);
9535c1d0199Sgd78059 	desc0 = dmfe_ring_get32(descp, i, DESC0);
9545c1d0199Sgd78059 	if (desc0 & TX_OWN)
9555c1d0199Sgd78059 		return (B_FALSE);
9565c1d0199Sgd78059 
9575c1d0199Sgd78059 	/*
9585c1d0199Sgd78059 	 * Reclaim as many descriptors as possible ...
9595c1d0199Sgd78059 	 */
9605c1d0199Sgd78059 	for (;;) {
9615c1d0199Sgd78059 		desc1 = dmfe_ring_get32(descp, i, DESC1);
9625c1d0199Sgd78059 		ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0);
9635c1d0199Sgd78059 
964bdb9230aSGarrett D'Amore 		if ((desc1 & TX_SETUP_PACKET) == 0) {
9655c1d0199Sgd78059 			/*
9665c1d0199Sgd78059 			 * Regular packet - just update stats
9675c1d0199Sgd78059 			 */
9685c1d0199Sgd78059 			dmfe_update_tx_stats(dmfep, i, desc0, desc1);
9695c1d0199Sgd78059 		}
9705c1d0199Sgd78059 
9715c1d0199Sgd78059 		/*
9725c1d0199Sgd78059 		 * Update count & index; we're all done if the ring is
9735c1d0199Sgd78059 		 * now fully reclaimed, or the next entry if still owned
9745c1d0199Sgd78059 		 * by the chip ...
9755c1d0199Sgd78059 		 */
9765c1d0199Sgd78059 		dmfep->tx.n_free += 1;
9775c1d0199Sgd78059 		i = NEXT(i, dmfep->tx.n_desc);
9785c1d0199Sgd78059 		if (i == dmfep->tx.next_free)
9795c1d0199Sgd78059 			break;
9805c1d0199Sgd78059 		desc0 = dmfe_ring_get32(descp, i, DESC0);
9815c1d0199Sgd78059 		if (desc0 & TX_OWN)
9825c1d0199Sgd78059 			break;
9835c1d0199Sgd78059 	}
9845c1d0199Sgd78059 
9855c1d0199Sgd78059 	dmfep->tx.next_busy = i;
9865c1d0199Sgd78059 	dmfep->tx_pending_tix = 0;
9875c1d0199Sgd78059 	return (B_TRUE);
9885c1d0199Sgd78059 }
9895c1d0199Sgd78059 
9905c1d0199Sgd78059 /*
9915c1d0199Sgd78059  * Send the message in the message block chain <mp>.
9925c1d0199Sgd78059  *
9935c1d0199Sgd78059  * The message is freed if and only if its contents are successfully copied
9945c1d0199Sgd78059  * and queued for transmission (so that the return value is B_TRUE).
9955c1d0199Sgd78059  * If we can't queue the message, the return value is B_FALSE and
9965c1d0199Sgd78059  * the message is *not* freed.
9975c1d0199Sgd78059  *
9985c1d0199Sgd78059  * This routine handles the special case of <mp> == NULL, which indicates
9995c1d0199Sgd78059  * that we want to "send" the special "setup packet" allocated during
10005c1d0199Sgd78059  * startup.  We have to use some different flags in the packet descriptor
10015c1d0199Sgd78059  * to say its a setup packet (from the global <dmfe_setup_desc1>), and the
10025c1d0199Sgd78059  * setup packet *isn't* freed after use.
10035c1d0199Sgd78059  */
10045c1d0199Sgd78059 static boolean_t
dmfe_send_msg(dmfe_t * dmfep,mblk_t * mp)10055c1d0199Sgd78059 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp)
10065c1d0199Sgd78059 {
10075c1d0199Sgd78059 	dma_area_t *descp;
10085c1d0199Sgd78059 	mblk_t *bp;
10095c1d0199Sgd78059 	char *txb;
10105c1d0199Sgd78059 	uint32_t desc1;
10115c1d0199Sgd78059 	uint32_t index;
10125c1d0199Sgd78059 	size_t totlen;
10135c1d0199Sgd78059 	size_t mblen;
1014bdb9230aSGarrett D'Amore 	uint32_t paddr;
10155c1d0199Sgd78059 
10165c1d0199Sgd78059 	/*
10175c1d0199Sgd78059 	 * If the number of free slots is below the reclaim threshold
10185c1d0199Sgd78059 	 * (soft limit), we'll try to reclaim some.  If we fail, and
10195c1d0199Sgd78059 	 * the number of free slots is also below the minimum required
10205c1d0199Sgd78059 	 * (the hard limit, usually 1), then we can't send the packet.
10215c1d0199Sgd78059 	 */
10225c1d0199Sgd78059 	mutex_enter(dmfep->txlock);
1023bdb9230aSGarrett D'Amore 	if (dmfep->suspended)
1024bdb9230aSGarrett D'Amore 		return (B_FALSE);
1025bdb9230aSGarrett D'Amore 
10265c1d0199Sgd78059 	if (dmfep->tx.n_free <= dmfe_tx_reclaim_level &&
10275c1d0199Sgd78059 	    dmfe_reclaim_tx_desc(dmfep) == B_FALSE &&
10285c1d0199Sgd78059 	    dmfep->tx.n_free <= dmfe_tx_min_free) {
10295c1d0199Sgd78059 		/*
10305c1d0199Sgd78059 		 * Resource shortage - return B_FALSE so the packet
10315c1d0199Sgd78059 		 * will be queued for retry after the next TX-done
10325c1d0199Sgd78059 		 * interrupt.
10335c1d0199Sgd78059 		 */
10345c1d0199Sgd78059 		mutex_exit(dmfep->txlock);
1035bdb9230aSGarrett D'Amore 		DTRACE_PROBE(tx__no__desc);
10365c1d0199Sgd78059 		return (B_FALSE);
10375c1d0199Sgd78059 	}
10385c1d0199Sgd78059 
10395c1d0199Sgd78059 	/*
10405c1d0199Sgd78059 	 * There's a slot available, so claim it by incrementing
10415c1d0199Sgd78059 	 * the next-free index and decrementing the free count.
10425c1d0199Sgd78059 	 * If the ring is currently empty, we also restart the
10435c1d0199Sgd78059 	 * stall-detect timer.  The ASSERTions check that our
10445c1d0199Sgd78059 	 * invariants still hold:
10455c1d0199Sgd78059 	 *	the next-free index must not match the next-busy index
10465c1d0199Sgd78059 	 *	there must still be at least one free entry
10475c1d0199Sgd78059 	 * After this, we now have exclusive ownership of the ring
10485c1d0199Sgd78059 	 * entry (and matching buffer) indicated by <index>, so we
10495c1d0199Sgd78059 	 * don't need to hold the TX lock any longer
10505c1d0199Sgd78059 	 */
10515c1d0199Sgd78059 	index = dmfep->tx.next_free;
10525c1d0199Sgd78059 	dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc);
10535c1d0199Sgd78059 	ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy);
10545c1d0199Sgd78059 	if (dmfep->tx.n_free-- == dmfep->tx.n_desc)
10555c1d0199Sgd78059 		dmfep->tx_pending_tix = 0;
10565c1d0199Sgd78059 	ASSERT(dmfep->tx.n_free >= 1);
10575c1d0199Sgd78059 	mutex_exit(dmfep->txlock);
10585c1d0199Sgd78059 
10595c1d0199Sgd78059 	/*
10605c1d0199Sgd78059 	 * Check the ownership of the ring entry ...
10615c1d0199Sgd78059 	 */
10625c1d0199Sgd78059 	descp = &dmfep->tx_desc;
10635c1d0199Sgd78059 	ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0);
10645c1d0199Sgd78059 
10655c1d0199Sgd78059 	if (mp == NULL) {
10665c1d0199Sgd78059 		/*
10675c1d0199Sgd78059 		 * Indicates we should send a SETUP packet, which we do by
10685c1d0199Sgd78059 		 * temporarily switching the BUFFER1 pointer in the ring
10695c1d0199Sgd78059 		 * entry.  The reclaim routine will restore BUFFER1 to its
10705c1d0199Sgd78059 		 * usual value.
10715c1d0199Sgd78059 		 *
10725c1d0199Sgd78059 		 * Note that as the setup packet is tagged on the end of
10735c1d0199Sgd78059 		 * the TX ring, when we sync the descriptor we're also
10745c1d0199Sgd78059 		 * implicitly syncing the setup packet - hence, we don't
10755c1d0199Sgd78059 		 * need a separate ddi_dma_sync() call here.
10765c1d0199Sgd78059 		 */
10775c1d0199Sgd78059 		desc1 = dmfe_setup_desc1;
1078bdb9230aSGarrett D'Amore 		paddr = descp->setup_dvma;
10795c1d0199Sgd78059 	} else {
10805c1d0199Sgd78059 		/*
10815c1d0199Sgd78059 		 * A regular packet; we copy the data into a pre-mapped
10825c1d0199Sgd78059 		 * buffer, which avoids the overhead (and complication)
10835c1d0199Sgd78059 		 * of mapping/unmapping STREAMS buffers and keeping hold
10845c1d0199Sgd78059 		 * of them until the DMA has completed.
10855c1d0199Sgd78059 		 *
10865c1d0199Sgd78059 		 * Because all buffers are the same size, and larger
10875c1d0199Sgd78059 		 * than the longest single valid message, we don't have
10885c1d0199Sgd78059 		 * to bother about splitting the message across multiple
10895c1d0199Sgd78059 		 * buffers.
10905c1d0199Sgd78059 		 */
10915c1d0199Sgd78059 		txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
10925c1d0199Sgd78059 		totlen = 0;
10935c1d0199Sgd78059 		bp = mp;
10945c1d0199Sgd78059 
10955c1d0199Sgd78059 		/*
10965c1d0199Sgd78059 		 * Copy all (remaining) mblks in the message ...
10975c1d0199Sgd78059 		 */
10985c1d0199Sgd78059 		for (; bp != NULL; bp = bp->b_cont) {
109922eb7cb5Sgd78059 			mblen = MBLKL(bp);
11005c1d0199Sgd78059 			if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) {
11015c1d0199Sgd78059 				bcopy(bp->b_rptr, txb, mblen);
11025c1d0199Sgd78059 				txb += mblen;
11035c1d0199Sgd78059 			}
11045c1d0199Sgd78059 		}
11055c1d0199Sgd78059 
11065c1d0199Sgd78059 		/*
11075c1d0199Sgd78059 		 * Is this a multicast or broadcast packet?  We do
11085c1d0199Sgd78059 		 * this so that we can track statistics accurately
11095c1d0199Sgd78059 		 * when we reclaim it.
11105c1d0199Sgd78059 		 */
11115c1d0199Sgd78059 		txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
11125c1d0199Sgd78059 		if (txb[0] & 0x1) {
11135c1d0199Sgd78059 			if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) {
11145c1d0199Sgd78059 				dmfep->tx_bcast[index / NBBY] |=
11155c1d0199Sgd78059 				    (1 << (index % NBBY));
11165c1d0199Sgd78059 			} else {
11175c1d0199Sgd78059 				dmfep->tx_mcast[index / NBBY] |=
11185c1d0199Sgd78059 				    (1 << (index % NBBY));
11195c1d0199Sgd78059 			}
11205c1d0199Sgd78059 		}
11215c1d0199Sgd78059 
11225c1d0199Sgd78059 		/*
11235c1d0199Sgd78059 		 * We'e reached the end of the chain; and we should have
11245c1d0199Sgd78059 		 * collected no more than DMFE_MAX_PKT_SIZE bytes into our
11255c1d0199Sgd78059 		 * buffer.  Note that the <size> field in the descriptor is
11265c1d0199Sgd78059 		 * only 11 bits, so bigger packets would be a problem!
11275c1d0199Sgd78059 		 */
11285c1d0199Sgd78059 		ASSERT(bp == NULL);
11295c1d0199Sgd78059 		ASSERT(totlen <= DMFE_MAX_PKT_SIZE);
11305c1d0199Sgd78059 		totlen &= TX_BUFFER_SIZE1;
11315c1d0199Sgd78059 		desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen;
1132bdb9230aSGarrett D'Amore 		paddr = dmfep->tx_buff.mem_dvma + index*DMFE_BUF_SIZE;
11335c1d0199Sgd78059 
11345c1d0199Sgd78059 		(void) ddi_dma_sync(dmfep->tx_buff.dma_hdl,
11355c1d0199Sgd78059 		    index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV);
11365c1d0199Sgd78059 	}
11375c1d0199Sgd78059 
11385c1d0199Sgd78059 	/*
11395c1d0199Sgd78059 	 * Update ring descriptor entries, sync them, and wake up the
11405c1d0199Sgd78059 	 * transmit process
11415c1d0199Sgd78059 	 */
11425c1d0199Sgd78059 	if ((index & dmfe_tx_int_factor) == 0)
11435c1d0199Sgd78059 		desc1 |= TX_INT_ON_COMP;
11445c1d0199Sgd78059 	desc1 |= TX_CHAINING;
1145bdb9230aSGarrett D'Amore 	dmfe_ring_put32(descp, index, BUFFER1, paddr);
11465c1d0199Sgd78059 	dmfe_ring_put32(descp, index, DESC1, desc1);
11475c1d0199Sgd78059 	dmfe_ring_put32(descp, index, DESC0, TX_OWN);
11485c1d0199Sgd78059 	DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
11495c1d0199Sgd78059 	dmfe_chip_put32(dmfep, TX_POLL_REG, 0);
11505c1d0199Sgd78059 
11515c1d0199Sgd78059 	/*
11525c1d0199Sgd78059 	 * Finally, free the message & return success
11535c1d0199Sgd78059 	 */
11545c1d0199Sgd78059 	if (mp)
11555c1d0199Sgd78059 		freemsg(mp);
11565c1d0199Sgd78059 	return (B_TRUE);
11575c1d0199Sgd78059 }
11585c1d0199Sgd78059 
11595c1d0199Sgd78059 /*
11605c1d0199Sgd78059  *	dmfe_m_tx() -- send a chain of packets
11615c1d0199Sgd78059  *
11625c1d0199Sgd78059  *	Called when packet(s) are ready to be transmitted. A pointer to an
11635c1d0199Sgd78059  *	M_DATA message that contains the packet is passed to this routine.
11645c1d0199Sgd78059  *	The complete LLC header is contained in the message's first message
11655c1d0199Sgd78059  *	block, and the remainder of the packet is contained within
11665c1d0199Sgd78059  *	additional M_DATA message blocks linked to the first message block.
11675c1d0199Sgd78059  *
11685c1d0199Sgd78059  *	Additional messages may be passed by linking with b_next.
11695c1d0199Sgd78059  */
11705c1d0199Sgd78059 static mblk_t *
dmfe_m_tx(void * arg,mblk_t * mp)11715c1d0199Sgd78059 dmfe_m_tx(void *arg, mblk_t *mp)
11725c1d0199Sgd78059 {
11735c1d0199Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
11745c1d0199Sgd78059 	mblk_t *next;
11755c1d0199Sgd78059 
11765c1d0199Sgd78059 	ASSERT(mp != NULL);
11775c1d0199Sgd78059 	ASSERT(dmfep->mac_state == DMFE_MAC_STARTED);
11785c1d0199Sgd78059 
11795c1d0199Sgd78059 	if (dmfep->chip_state != CHIP_RUNNING)
11805c1d0199Sgd78059 		return (mp);
11815c1d0199Sgd78059 
11825c1d0199Sgd78059 	while (mp != NULL) {
11835c1d0199Sgd78059 		next = mp->b_next;
11845c1d0199Sgd78059 		mp->b_next = NULL;
11855c1d0199Sgd78059 		if (!dmfe_send_msg(dmfep, mp)) {
11865c1d0199Sgd78059 			mp->b_next = next;
11875c1d0199Sgd78059 			break;
11885c1d0199Sgd78059 		}
11895c1d0199Sgd78059 		mp = next;
11905c1d0199Sgd78059 	}
11915c1d0199Sgd78059 
11925c1d0199Sgd78059 	return (mp);
11935c1d0199Sgd78059 }
11945c1d0199Sgd78059 
11955c1d0199Sgd78059 /*
11965c1d0199Sgd78059  * ========== Address-setting routines (TX-side) ==========
11975c1d0199Sgd78059  */
11985c1d0199Sgd78059 
11995c1d0199Sgd78059 /*
12005c1d0199Sgd78059  * Find the index of the relevant bit in the setup packet.
12015c1d0199Sgd78059  * This must mirror the way the hardware will actually calculate it!
12025c1d0199Sgd78059  */
12035c1d0199Sgd78059 static uint32_t
dmfe_hash_index(const uint8_t * address)12045c1d0199Sgd78059 dmfe_hash_index(const uint8_t *address)
12055c1d0199Sgd78059 {
12065c1d0199Sgd78059 	uint32_t const POLY = HASH_POLY;
12075c1d0199Sgd78059 	uint32_t crc = HASH_CRC;
12085c1d0199Sgd78059 	uint32_t index;
12095c1d0199Sgd78059 	uint32_t msb;
12105c1d0199Sgd78059 	uchar_t currentbyte;
12115c1d0199Sgd78059 	int byteslength;
12125c1d0199Sgd78059 	int shift;
12135c1d0199Sgd78059 	int bit;
12145c1d0199Sgd78059 
12155c1d0199Sgd78059 	for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) {
12165c1d0199Sgd78059 		currentbyte = address[byteslength];
12175c1d0199Sgd78059 		for (bit = 0; bit < 8; ++bit) {
12185c1d0199Sgd78059 			msb = crc >> 31;
12195c1d0199Sgd78059 			crc <<= 1;
12205c1d0199Sgd78059 			if (msb ^ (currentbyte & 1)) {
12215c1d0199Sgd78059 				crc ^= POLY;
12225c1d0199Sgd78059 				crc |= 0x00000001;
12235c1d0199Sgd78059 			}
12245c1d0199Sgd78059 			currentbyte >>= 1;
12255c1d0199Sgd78059 		}
12265c1d0199Sgd78059 	}
12275c1d0199Sgd78059 
12285c1d0199Sgd78059 	for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift)
12295c1d0199Sgd78059 		index |= (((crc >> bit) & 1) << shift);
12305c1d0199Sgd78059 
12315c1d0199Sgd78059 	return (index);
12325c1d0199Sgd78059 }
12335c1d0199Sgd78059 
12345c1d0199Sgd78059 /*
12355c1d0199Sgd78059  * Find and set/clear the relevant bit in the setup packet hash table
12365c1d0199Sgd78059  * This must mirror the way the hardware will actually interpret it!
12375c1d0199Sgd78059  */
12385c1d0199Sgd78059 static void
dmfe_update_hash(dmfe_t * dmfep,uint32_t index,boolean_t val)12395c1d0199Sgd78059 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val)
12405c1d0199Sgd78059 {
12415c1d0199Sgd78059 	dma_area_t *descp;
12425c1d0199Sgd78059 	uint32_t tmp;
12435c1d0199Sgd78059 
12445c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
12455c1d0199Sgd78059 
12465c1d0199Sgd78059 	descp = &dmfep->tx_desc;
12475c1d0199Sgd78059 	tmp = dmfe_setup_get32(descp, index/16);
12485c1d0199Sgd78059 	if (val)
12495c1d0199Sgd78059 		tmp |= 1 << (index%16);
12505c1d0199Sgd78059 	else
12515c1d0199Sgd78059 		tmp &= ~(1 << (index%16));
12525c1d0199Sgd78059 	dmfe_setup_put32(descp, index/16, tmp);
12535c1d0199Sgd78059 }
12545c1d0199Sgd78059 
12555c1d0199Sgd78059 /*
12565c1d0199Sgd78059  * Update the refcount for the bit in the setup packet corresponding
12575c1d0199Sgd78059  * to the specified address; if it changes between zero & nonzero,
12585c1d0199Sgd78059  * also update the bitmap itself & return B_TRUE, so that the caller
12595c1d0199Sgd78059  * knows to re-send the setup packet.  Otherwise (only the refcount
12605c1d0199Sgd78059  * changed), return B_FALSE
12615c1d0199Sgd78059  */
12625c1d0199Sgd78059 static boolean_t
dmfe_update_mcast(dmfe_t * dmfep,const uint8_t * mca,boolean_t val)12635c1d0199Sgd78059 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val)
12645c1d0199Sgd78059 {
12655c1d0199Sgd78059 	uint32_t index;
12665c1d0199Sgd78059 	uint8_t *refp;
12675c1d0199Sgd78059 	boolean_t change;
12685c1d0199Sgd78059 
12695c1d0199Sgd78059 	index = dmfe_hash_index(mca);
12705c1d0199Sgd78059 	refp = &dmfep->mcast_refs[index];
12715c1d0199Sgd78059 	change = (val ? (*refp)++ : --(*refp)) == 0;
12725c1d0199Sgd78059 
12735c1d0199Sgd78059 	if (change)
12745c1d0199Sgd78059 		dmfe_update_hash(dmfep, index, val);
12755c1d0199Sgd78059 
12765c1d0199Sgd78059 	return (change);
12775c1d0199Sgd78059 }
12785c1d0199Sgd78059 
12795c1d0199Sgd78059 /*
12805c1d0199Sgd78059  * "Transmit" the (possibly updated) magic setup packet
12815c1d0199Sgd78059  */
12825c1d0199Sgd78059 static int
dmfe_send_setup(dmfe_t * dmfep)12835c1d0199Sgd78059 dmfe_send_setup(dmfe_t *dmfep)
12845c1d0199Sgd78059 {
12855c1d0199Sgd78059 	int status;
12865c1d0199Sgd78059 
12875c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
12885c1d0199Sgd78059 
1289bdb9230aSGarrett D'Amore 	if (dmfep->suspended)
1290bdb9230aSGarrett D'Amore 		return (0);
1291bdb9230aSGarrett D'Amore 
12925c1d0199Sgd78059 	/*
12935c1d0199Sgd78059 	 * If the chip isn't running, we can't really send the setup frame
12945c1d0199Sgd78059 	 * now but it doesn't matter, 'cos it will be sent when the transmit
12955c1d0199Sgd78059 	 * process is restarted (see dmfe_start()).
12965c1d0199Sgd78059 	 */
12975c1d0199Sgd78059 	if ((dmfep->opmode & START_TRANSMIT) == 0)
12985c1d0199Sgd78059 		return (0);
12995c1d0199Sgd78059 
13005c1d0199Sgd78059 	/*
13015c1d0199Sgd78059 	 * "Send" the setup frame.  If it fails (e.g. no resources),
13025c1d0199Sgd78059 	 * set a flag; then the factotum will retry the "send".  Once
13035c1d0199Sgd78059 	 * it works, we can clear the flag no matter how many attempts
13045c1d0199Sgd78059 	 * had previously failed.  We tell the caller that it worked
13055c1d0199Sgd78059 	 * whether it did or not; after all, it *will* work eventually.
13065c1d0199Sgd78059 	 */
13075c1d0199Sgd78059 	status = dmfe_send_msg(dmfep, NULL);
13085c1d0199Sgd78059 	dmfep->need_setup = status ? B_FALSE : B_TRUE;
13095c1d0199Sgd78059 	return (0);
13105c1d0199Sgd78059 }
13115c1d0199Sgd78059 
13125c1d0199Sgd78059 /*
13135c1d0199Sgd78059  *	dmfe_m_unicst() -- set the physical network address
13145c1d0199Sgd78059  */
13155c1d0199Sgd78059 static int
dmfe_m_unicst(void * arg,const uint8_t * macaddr)13165c1d0199Sgd78059 dmfe_m_unicst(void *arg, const uint8_t *macaddr)
13175c1d0199Sgd78059 {
13185c1d0199Sgd78059 	dmfe_t *dmfep = arg;
13195c1d0199Sgd78059 	int status;
13205c1d0199Sgd78059 	int index;
13215c1d0199Sgd78059 
13225c1d0199Sgd78059 	/*
13235c1d0199Sgd78059 	 * Update our current address and send out a new setup packet
13245c1d0199Sgd78059 	 *
13255c1d0199Sgd78059 	 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT
13265c1d0199Sgd78059 	 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes).
13275c1d0199Sgd78059 	 *
13285c1d0199Sgd78059 	 * It is said that there is a bug in the 21140 where it fails to
13295c1d0199Sgd78059 	 * receive packes addresses to the specified perfect filter address.
13305c1d0199Sgd78059 	 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1
13315c1d0199Sgd78059 	 * bit should be set in the module variable dmfe_setup_desc1.
13325c1d0199Sgd78059 	 *
13335c1d0199Sgd78059 	 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering.
13345c1d0199Sgd78059 	 * In this mode, *all* incoming addresses are hashed and looked
13355c1d0199Sgd78059 	 * up in the bitmap described by the setup packet.  Therefore,
13365c1d0199Sgd78059 	 * the bit representing the station address has to be added to
13375c1d0199Sgd78059 	 * the table before sending it out.  If the address is changed,
13385c1d0199Sgd78059 	 * the old entry should be removed before the new entry is made.
13395c1d0199Sgd78059 	 *
13405c1d0199Sgd78059 	 * NOTE: in this mode, unicast packets that are not intended for
13415c1d0199Sgd78059 	 * this station may be received; it is up to software to filter
13425c1d0199Sgd78059 	 * them out afterwards!
13435c1d0199Sgd78059 	 *
13445c1d0199Sgd78059 	 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT
13455c1d0199Sgd78059 	 * filtering.  In this mode, multicast addresses are hashed and
13465c1d0199Sgd78059 	 * checked against the bitmap, while unicast addresses are simply
13475c1d0199Sgd78059 	 * matched against the one physical address specified in the setup
13485c1d0199Sgd78059 	 * packet.  This means that we shouldn't receive unicast packets
13495c1d0199Sgd78059 	 * that aren't intended for us (but software still has to filter
13505c1d0199Sgd78059 	 * multicast packets just the same).
13515c1d0199Sgd78059 	 *
13525c1d0199Sgd78059 	 * Whichever mode we're using, we have to enter the broadcast
13535c1d0199Sgd78059 	 * address into the multicast filter map too, so we do this on
13545c1d0199Sgd78059 	 * the first time through after attach or reset.
13555c1d0199Sgd78059 	 */
13565c1d0199Sgd78059 	mutex_enter(dmfep->oplock);
13575c1d0199Sgd78059 
13585c1d0199Sgd78059 	if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1)
13595c1d0199Sgd78059 		(void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE);
13605c1d0199Sgd78059 	if (dmfe_setup_desc1 & TX_FILTER_TYPE1)
13615c1d0199Sgd78059 		(void) dmfe_update_mcast(dmfep, macaddr, B_TRUE);
13625c1d0199Sgd78059 	if (!dmfep->addr_set)
13635c1d0199Sgd78059 		(void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE);
13645c1d0199Sgd78059 
13655c1d0199Sgd78059 	/*
13665c1d0199Sgd78059 	 * Remember the new current address
13675c1d0199Sgd78059 	 */
13685c1d0199Sgd78059 	ethaddr_copy(macaddr, dmfep->curr_addr);
13695c1d0199Sgd78059 	dmfep->addr_set = B_TRUE;
13705c1d0199Sgd78059 
13715c1d0199Sgd78059 	/*
13725c1d0199Sgd78059 	 * Install the new physical address into the proper position in
13735c1d0199Sgd78059 	 * the setup frame; this is only used if we select hash+perfect
13745c1d0199Sgd78059 	 * filtering, but we'll put it in anyway.  The ugliness here is
13755c1d0199Sgd78059 	 * down to the usual war of the egg :(
13765c1d0199Sgd78059 	 */
13775c1d0199Sgd78059 	for (index = 0; index < ETHERADDRL; index += 2)
13785c1d0199Sgd78059 		dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2,
13795c1d0199Sgd78059 		    (macaddr[index+1] << 8) | macaddr[index]);
13805c1d0199Sgd78059 
13815c1d0199Sgd78059 	/*
13825c1d0199Sgd78059 	 * Finally, we're ready to "transmit" the setup frame
13835c1d0199Sgd78059 	 */
13845c1d0199Sgd78059 	status = dmfe_send_setup(dmfep);
13855c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
13865c1d0199Sgd78059 
13875c1d0199Sgd78059 	return (status);
13885c1d0199Sgd78059 }
13895c1d0199Sgd78059 
13905c1d0199Sgd78059 /*
13915c1d0199Sgd78059  *	dmfe_m_multicst() -- enable or disable a multicast address
13925c1d0199Sgd78059  *
13935c1d0199Sgd78059  *	Program the hardware to enable/disable the multicast address
13945c1d0199Sgd78059  *	in "mca" (enable if add is true, otherwise disable it.)
13955c1d0199Sgd78059  *	We keep a refcount for each bit in the map, so that it still
13965c1d0199Sgd78059  *	works out properly if multiple addresses hash to the same bit.
13975c1d0199Sgd78059  *	dmfe_update_mcast() tells us whether the map actually changed;
13985c1d0199Sgd78059  *	if so, we have to re-"transmit" the magic setup packet.
13995c1d0199Sgd78059  */
14005c1d0199Sgd78059 static int
dmfe_m_multicst(void * arg,boolean_t add,const uint8_t * mca)14015c1d0199Sgd78059 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
14025c1d0199Sgd78059 {
14035c1d0199Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
14045c1d0199Sgd78059 	int status = 0;
14055c1d0199Sgd78059 
14065c1d0199Sgd78059 	mutex_enter(dmfep->oplock);
14075c1d0199Sgd78059 	if (dmfe_update_mcast(dmfep, mca, add))
14085c1d0199Sgd78059 		status = dmfe_send_setup(dmfep);
14095c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
14105c1d0199Sgd78059 
14115c1d0199Sgd78059 	return (status);
14125c1d0199Sgd78059 }
14135c1d0199Sgd78059 
14145c1d0199Sgd78059 
14155c1d0199Sgd78059 /*
14165c1d0199Sgd78059  * ========== Internal state management entry points ==========
14175c1d0199Sgd78059  */
14185c1d0199Sgd78059 
14195c1d0199Sgd78059 /*
14205c1d0199Sgd78059  * These routines provide all the functionality required by the
14215c1d0199Sgd78059  * corresponding MAC layer entry points, but don't update the MAC layer state
14225c1d0199Sgd78059  * so they can be called internally without disturbing our record
14235c1d0199Sgd78059  * of what MAC layer thinks we should be doing ...
14245c1d0199Sgd78059  */
14255c1d0199Sgd78059 
14265c1d0199Sgd78059 /*
14275c1d0199Sgd78059  *	dmfe_stop() -- stop processing, don't reset h/w or rings
14285c1d0199Sgd78059  */
14295c1d0199Sgd78059 static void
dmfe_stop(dmfe_t * dmfep)14305c1d0199Sgd78059 dmfe_stop(dmfe_t *dmfep)
14315c1d0199Sgd78059 {
14325c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
14335c1d0199Sgd78059 
14345c1d0199Sgd78059 	dmfe_stop_chip(dmfep, CHIP_STOPPED);
14355c1d0199Sgd78059 }
14365c1d0199Sgd78059 
14375c1d0199Sgd78059 /*
14385c1d0199Sgd78059  *	dmfe_reset() -- stop processing, reset h/w & rings to initial state
14395c1d0199Sgd78059  */
14405c1d0199Sgd78059 static void
dmfe_reset(dmfe_t * dmfep)14415c1d0199Sgd78059 dmfe_reset(dmfe_t *dmfep)
14425c1d0199Sgd78059 {
14435c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
14445c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->rxlock));
14455c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->txlock));
14465c1d0199Sgd78059 
14475c1d0199Sgd78059 	dmfe_stop_chip(dmfep, CHIP_RESET);
14485c1d0199Sgd78059 	dmfe_init_rings(dmfep);
14495c1d0199Sgd78059 }
14505c1d0199Sgd78059 
14515c1d0199Sgd78059 /*
14525c1d0199Sgd78059  *	dmfe_start() -- start transmitting/receiving
14535c1d0199Sgd78059  */
14545c1d0199Sgd78059 static void
dmfe_start(dmfe_t * dmfep)14555c1d0199Sgd78059 dmfe_start(dmfe_t *dmfep)
14565c1d0199Sgd78059 {
14575c1d0199Sgd78059 	uint32_t gpsr;
14585c1d0199Sgd78059 
14595c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
14605c1d0199Sgd78059 
14615c1d0199Sgd78059 	ASSERT(dmfep->chip_state == CHIP_RESET ||
14625c1d0199Sgd78059 	    dmfep->chip_state == CHIP_STOPPED);
14635c1d0199Sgd78059 
14645c1d0199Sgd78059 	/*
14655c1d0199Sgd78059 	 * Make opmode consistent with PHY duplex setting
14665c1d0199Sgd78059 	 */
14675c1d0199Sgd78059 	gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
14685c1d0199Sgd78059 	if (gpsr & GPS_FULL_DUPLEX)
14695c1d0199Sgd78059 		dmfep->opmode |= FULL_DUPLEX;
14705c1d0199Sgd78059 	else
14715c1d0199Sgd78059 		dmfep->opmode &= ~FULL_DUPLEX;
14725c1d0199Sgd78059 
14735c1d0199Sgd78059 	/*
14745c1d0199Sgd78059 	 * Start transmit processing
14755c1d0199Sgd78059 	 * Set up the address filters
14765c1d0199Sgd78059 	 * Start receive processing
14775c1d0199Sgd78059 	 * Enable interrupts
14785c1d0199Sgd78059 	 */
14795c1d0199Sgd78059 	dmfe_start_chip(dmfep, START_TRANSMIT);
14805c1d0199Sgd78059 	(void) dmfe_send_setup(dmfep);
14815c1d0199Sgd78059 	drv_usecwait(10);
14825c1d0199Sgd78059 	dmfe_start_chip(dmfep, START_RECEIVE);
14835c1d0199Sgd78059 	dmfe_enable_interrupts(dmfep);
14845c1d0199Sgd78059 }
14855c1d0199Sgd78059 
14865c1d0199Sgd78059 /*
14875c1d0199Sgd78059  * dmfe_restart - restart transmitting/receiving after error or suspend
14885c1d0199Sgd78059  */
14895c1d0199Sgd78059 static void
dmfe_restart(dmfe_t * dmfep)14905c1d0199Sgd78059 dmfe_restart(dmfe_t *dmfep)
14915c1d0199Sgd78059 {
14925c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
14935c1d0199Sgd78059 
14945c1d0199Sgd78059 	/*
14955c1d0199Sgd78059 	 * You need not only <oplock>, but also <rxlock> AND <txlock>
14965c1d0199Sgd78059 	 * in order to reset the rings, but then <txlock> *mustn't*
14975c1d0199Sgd78059 	 * be held across the call to dmfe_start()
14985c1d0199Sgd78059 	 */
14995c1d0199Sgd78059 	mutex_enter(dmfep->rxlock);
15005c1d0199Sgd78059 	mutex_enter(dmfep->txlock);
15015c1d0199Sgd78059 	dmfe_reset(dmfep);
15025c1d0199Sgd78059 	mutex_exit(dmfep->txlock);
15035c1d0199Sgd78059 	mutex_exit(dmfep->rxlock);
1504bdb9230aSGarrett D'Amore 	if (dmfep->mac_state == DMFE_MAC_STARTED) {
15055c1d0199Sgd78059 		dmfe_start(dmfep);
15065c1d0199Sgd78059 	}
1507bdb9230aSGarrett D'Amore }
15085c1d0199Sgd78059 
15095c1d0199Sgd78059 
15105c1d0199Sgd78059 /*
15115c1d0199Sgd78059  * ========== MAC-required management entry points ==========
15125c1d0199Sgd78059  */
15135c1d0199Sgd78059 
15145c1d0199Sgd78059 /*
15155c1d0199Sgd78059  *	dmfe_m_stop() -- stop transmitting/receiving
15165c1d0199Sgd78059  */
15175c1d0199Sgd78059 static void
dmfe_m_stop(void * arg)15185c1d0199Sgd78059 dmfe_m_stop(void *arg)
15195c1d0199Sgd78059 {
15205c1d0199Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
15215c1d0199Sgd78059 
15225c1d0199Sgd78059 	/*
15235c1d0199Sgd78059 	 * Just stop processing, then record new MAC state
15245c1d0199Sgd78059 	 */
1525bdb9230aSGarrett D'Amore 	mii_stop(dmfep->mii);
1526bdb9230aSGarrett D'Amore 
15275c1d0199Sgd78059 	mutex_enter(dmfep->oplock);
1528bdb9230aSGarrett D'Amore 	if (!dmfep->suspended)
15295c1d0199Sgd78059 		dmfe_stop(dmfep);
15305c1d0199Sgd78059 	dmfep->mac_state = DMFE_MAC_STOPPED;
15315c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
15325c1d0199Sgd78059 }
15335c1d0199Sgd78059 
15345c1d0199Sgd78059 /*
15355c1d0199Sgd78059  *	dmfe_m_start() -- start transmitting/receiving
15365c1d0199Sgd78059  */
15375c1d0199Sgd78059 static int
dmfe_m_start(void * arg)15385c1d0199Sgd78059 dmfe_m_start(void *arg)
15395c1d0199Sgd78059 {
15405c1d0199Sgd78059 	dmfe_t *dmfep = arg;			/* private device info	*/
15415c1d0199Sgd78059 
15425c1d0199Sgd78059 	/*
15435c1d0199Sgd78059 	 * Start processing and record new MAC state
15445c1d0199Sgd78059 	 */
15455c1d0199Sgd78059 	mutex_enter(dmfep->oplock);
1546bdb9230aSGarrett D'Amore 	if (!dmfep->suspended)
15475c1d0199Sgd78059 		dmfe_start(dmfep);
15485c1d0199Sgd78059 	dmfep->mac_state = DMFE_MAC_STARTED;
15495c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
15505c1d0199Sgd78059 
1551bdb9230aSGarrett D'Amore 	mii_start(dmfep->mii);
1552bdb9230aSGarrett D'Amore 
15535c1d0199Sgd78059 	return (0);
15545c1d0199Sgd78059 }
15555c1d0199Sgd78059 
15565c1d0199Sgd78059 /*
15575c1d0199Sgd78059  * dmfe_m_promisc() -- set or reset promiscuous mode on the board
15585c1d0199Sgd78059  *
15595c1d0199Sgd78059  *	Program the hardware to enable/disable promiscuous and/or
15605c1d0199Sgd78059  *	receive-all-multicast modes.  Davicom don't document this
15615c1d0199Sgd78059  *	clearly, but it looks like we can do this on-the-fly (i.e.
15625c1d0199Sgd78059  *	without stopping & restarting the TX/RX processes).
15635c1d0199Sgd78059  */
15645c1d0199Sgd78059 static int
dmfe_m_promisc(void * arg,boolean_t on)15655c1d0199Sgd78059 dmfe_m_promisc(void *arg, boolean_t on)
15665c1d0199Sgd78059 {
15675c1d0199Sgd78059 	dmfe_t *dmfep = arg;
15685c1d0199Sgd78059 
15695c1d0199Sgd78059 	mutex_enter(dmfep->oplock);
15705c1d0199Sgd78059 	dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
15715c1d0199Sgd78059 	if (on)
15725c1d0199Sgd78059 		dmfep->opmode |= PROMISC_MODE;
1573bdb9230aSGarrett D'Amore 	if (!dmfep->suspended)
15745c1d0199Sgd78059 		dmfe_set_opmode(dmfep);
15755c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
15765c1d0199Sgd78059 
15775c1d0199Sgd78059 	return (0);
15785c1d0199Sgd78059 }
15795c1d0199Sgd78059 
15805c1d0199Sgd78059 /*
15815c1d0199Sgd78059  * ========== Factotum, implemented as a softint handler ==========
15825c1d0199Sgd78059  */
15835c1d0199Sgd78059 
15845c1d0199Sgd78059 /*
15855c1d0199Sgd78059  * The factotum is woken up when there's something to do that we'd rather
15865c1d0199Sgd78059  * not do from inside a (high-level?) hardware interrupt handler.  Its
15875c1d0199Sgd78059  * two main tasks are:
15885c1d0199Sgd78059  *	reset & restart the chip after an error
15895c1d0199Sgd78059  *	update & restart the chip after a link status change
15905c1d0199Sgd78059  */
15915c1d0199Sgd78059 static uint_t
dmfe_factotum(caddr_t arg)15925c1d0199Sgd78059 dmfe_factotum(caddr_t arg)
15935c1d0199Sgd78059 {
15945c1d0199Sgd78059 	dmfe_t *dmfep;
15955c1d0199Sgd78059 
159622eb7cb5Sgd78059 	dmfep = (void *)arg;
15975c1d0199Sgd78059 	ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
15985c1d0199Sgd78059 
15995c1d0199Sgd78059 	mutex_enter(dmfep->oplock);
1600bdb9230aSGarrett D'Amore 	if (dmfep->suspended) {
1601bdb9230aSGarrett D'Amore 		mutex_exit(dmfep->oplock);
1602bdb9230aSGarrett D'Amore 		return (DDI_INTR_CLAIMED);
1603bdb9230aSGarrett D'Amore 	}
16045c1d0199Sgd78059 
16055c1d0199Sgd78059 	dmfep->factotum_flag = 0;
16065c1d0199Sgd78059 	DRV_KS_INC(dmfep, KS_FACTOTUM_RUN);
16075c1d0199Sgd78059 
16085c1d0199Sgd78059 	/*
16095c1d0199Sgd78059 	 * Check for chip error ...
16105c1d0199Sgd78059 	 */
16115c1d0199Sgd78059 	if (dmfep->chip_state == CHIP_ERROR) {
16125c1d0199Sgd78059 		/*
16135c1d0199Sgd78059 		 * Error recovery required: reset the chip and the rings,
16145c1d0199Sgd78059 		 * then, if it's supposed to be running, kick it off again.
16155c1d0199Sgd78059 		 */
16165c1d0199Sgd78059 		DRV_KS_INC(dmfep, KS_RECOVERY);
16175c1d0199Sgd78059 		dmfe_restart(dmfep);
1618bdb9230aSGarrett D'Amore 		mutex_exit(dmfep->oplock);
1619bdb9230aSGarrett D'Amore 
1620bdb9230aSGarrett D'Amore 		mii_reset(dmfep->mii);
1621bdb9230aSGarrett D'Amore 
16225c1d0199Sgd78059 	} else if (dmfep->need_setup) {
16235c1d0199Sgd78059 		(void) dmfe_send_setup(dmfep);
16245c1d0199Sgd78059 		mutex_exit(dmfep->oplock);
16255c1d0199Sgd78059 	}
16265c1d0199Sgd78059 
16275c1d0199Sgd78059 	return (DDI_INTR_CLAIMED);
16285c1d0199Sgd78059 }
16295c1d0199Sgd78059 
16305c1d0199Sgd78059 static void
dmfe_wake_factotum(dmfe_t * dmfep,int ks_id,const char * why)16315c1d0199Sgd78059 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why)
16325c1d0199Sgd78059 {
1633bdb9230aSGarrett D'Amore 	_NOTE(ARGUNUSED(why));
16345c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
16355c1d0199Sgd78059 	DRV_KS_INC(dmfep, ks_id);
16365c1d0199Sgd78059 
16375c1d0199Sgd78059 	if (dmfep->factotum_flag++ == 0)
16385c1d0199Sgd78059 		ddi_trigger_softintr(dmfep->factotum_id);
16395c1d0199Sgd78059 }
16405c1d0199Sgd78059 
16415c1d0199Sgd78059 
16425c1d0199Sgd78059 /*
16435c1d0199Sgd78059  * ========== Periodic Tasks (Cyclic handler & friends) ==========
16445c1d0199Sgd78059  */
16455c1d0199Sgd78059 
16465c1d0199Sgd78059 /*
16475c1d0199Sgd78059  * Periodic tick tasks, run from the cyclic handler
16485c1d0199Sgd78059  *
16495c1d0199Sgd78059  * Check for TX stall; flag an error and wake the factotum if so.
16505c1d0199Sgd78059  */
16515c1d0199Sgd78059 static void
dmfe_tick_stall_check(dmfe_t * dmfep,uint32_t gpsr,uint32_t istat)16525c1d0199Sgd78059 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
16535c1d0199Sgd78059 {
16545c1d0199Sgd78059 	boolean_t tx_stall;
16555c1d0199Sgd78059 	uint32_t tx_state;
16565c1d0199Sgd78059 	uint32_t limit;
16575c1d0199Sgd78059 
16585c1d0199Sgd78059 	ASSERT(mutex_owned(dmfep->oplock));
16595c1d0199Sgd78059 
16605c1d0199Sgd78059 	/*
16615c1d0199Sgd78059 	 * Check for transmit stall ...
16625c1d0199Sgd78059 	 *
16635c1d0199Sgd78059 	 * IF there's at least one packet in the ring, AND the timeout
16645c1d0199Sgd78059 	 * has elapsed, AND we can't reclaim any descriptors, THEN we've
16655c1d0199Sgd78059 	 * stalled; we return B_TRUE to trigger a reset-and-recover cycle.
16665c1d0199Sgd78059 	 *
16675c1d0199Sgd78059 	 * Note that the timeout limit is based on the transmit engine
16685c1d0199Sgd78059 	 * state; we allow the transmitter longer to make progress in
16695c1d0199Sgd78059 	 * some states than in others, based on observations of this
16705c1d0199Sgd78059 	 * chip's actual behaviour in the lab.
16715c1d0199Sgd78059 	 *
16725c1d0199Sgd78059 	 * By observation, we find that on about 1 in 10000 passes through
16735c1d0199Sgd78059 	 * here, the TX lock is already held.  In that case, we'll skip
16745c1d0199Sgd78059 	 * the check on this pass rather than wait.  Most likely, the send
16755c1d0199Sgd78059 	 * routine was holding the lock when the interrupt happened, and
16765c1d0199Sgd78059 	 * we'll succeed next time through.  In the event of a real stall,
16775c1d0199Sgd78059 	 * the TX ring will fill up, after which the send routine won't be
16785c1d0199Sgd78059 	 * called any more and then we're sure to get in.
16795c1d0199Sgd78059 	 */
16805c1d0199Sgd78059 	tx_stall = B_FALSE;
16815c1d0199Sgd78059 	if (mutex_tryenter(dmfep->txlock)) {
16825c1d0199Sgd78059 		if (dmfep->tx.n_free < dmfep->tx.n_desc) {
16835c1d0199Sgd78059 			tx_state = TX_PROCESS_STATE(istat);
16845c1d0199Sgd78059 			if (gpsr & GPS_LINK_100)
16855c1d0199Sgd78059 				limit = stall_100_tix[tx_state];
16865c1d0199Sgd78059 			else
16875c1d0199Sgd78059 				limit = stall_10_tix[tx_state];
16885c1d0199Sgd78059 			if (++dmfep->tx_pending_tix >= limit &&
16895c1d0199Sgd78059 			    dmfe_reclaim_tx_desc(dmfep) == B_FALSE) {
16905c1d0199Sgd78059 				dmfe_log(dmfep, "TX stall detected "
16915c1d0199Sgd78059 				    "after %d ticks in state %d; "
16925c1d0199Sgd78059 				    "automatic recovery initiated",
16935c1d0199Sgd78059 				    dmfep->tx_pending_tix, tx_state);
16945c1d0199Sgd78059 				tx_stall = B_TRUE;
16955c1d0199Sgd78059 			}
16965c1d0199Sgd78059 		}
16975c1d0199Sgd78059 		mutex_exit(dmfep->txlock);
16985c1d0199Sgd78059 	}
16995c1d0199Sgd78059 
17005c1d0199Sgd78059 	if (tx_stall) {
17015c1d0199Sgd78059 		dmfe_stop_chip(dmfep, CHIP_ERROR);
17025c1d0199Sgd78059 		dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)");
17035c1d0199Sgd78059 	}
17045c1d0199Sgd78059 }
17055c1d0199Sgd78059 
17065c1d0199Sgd78059 /*
17075c1d0199Sgd78059  * Cyclic callback handler
17085c1d0199Sgd78059  */
17095c1d0199Sgd78059 static void
dmfe_cyclic(void * arg)17105c1d0199Sgd78059 dmfe_cyclic(void *arg)
17115c1d0199Sgd78059 {
17125c1d0199Sgd78059 	dmfe_t *dmfep = arg;			/* private device info */
17135c1d0199Sgd78059 	uint32_t istat;
17145c1d0199Sgd78059 	uint32_t gpsr;
17155c1d0199Sgd78059 
17165c1d0199Sgd78059 	/*
17175c1d0199Sgd78059 	 * If the chip's not RUNNING, there's nothing to do.
17185c1d0199Sgd78059 	 * If we can't get the mutex straight away, we'll just
17195c1d0199Sgd78059 	 * skip this pass; we'll back back soon enough anyway.
17205c1d0199Sgd78059 	 */
17215c1d0199Sgd78059 	if (mutex_tryenter(dmfep->oplock) == 0)
17225c1d0199Sgd78059 		return;
1723bdb9230aSGarrett D'Amore 	if ((dmfep->suspended) || (dmfep->chip_state != CHIP_RUNNING)) {
1724bdb9230aSGarrett D'Amore 		mutex_exit(dmfep->oplock);
1725bdb9230aSGarrett D'Amore 		return;
1726bdb9230aSGarrett D'Amore 	}
17275c1d0199Sgd78059 
17285c1d0199Sgd78059 	/*
17295c1d0199Sgd78059 	 * Recheck chip state (it might have been stopped since we
17305c1d0199Sgd78059 	 * checked above).  If still running, call each of the *tick*
17315c1d0199Sgd78059 	 * tasks.  They will check for link change, TX stall, etc ...
17325c1d0199Sgd78059 	 */
17335c1d0199Sgd78059 	if (dmfep->chip_state == CHIP_RUNNING) {
17345c1d0199Sgd78059 		istat = dmfe_chip_get32(dmfep, STATUS_REG);
17355c1d0199Sgd78059 		gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
17365c1d0199Sgd78059 		dmfe_tick_stall_check(dmfep, gpsr, istat);
17375c1d0199Sgd78059 	}
17385c1d0199Sgd78059 
17395c1d0199Sgd78059 	DRV_KS_INC(dmfep, KS_CYCLIC_RUN);
17405c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
17415c1d0199Sgd78059 }
17425c1d0199Sgd78059 
17435c1d0199Sgd78059 /*
17445c1d0199Sgd78059  * ========== Hardware interrupt handler ==========
17455c1d0199Sgd78059  */
17465c1d0199Sgd78059 
17475c1d0199Sgd78059 /*
17485c1d0199Sgd78059  *	dmfe_interrupt() -- handle chip interrupts
17495c1d0199Sgd78059  */
17505c1d0199Sgd78059 static uint_t
dmfe_interrupt(caddr_t arg)17515c1d0199Sgd78059 dmfe_interrupt(caddr_t arg)
17525c1d0199Sgd78059 {
17535c1d0199Sgd78059 	dmfe_t *dmfep;			/* private device info */
17545c1d0199Sgd78059 	uint32_t interrupts;
17555c1d0199Sgd78059 	uint32_t istat;
17565c1d0199Sgd78059 	const char *msg;
17575c1d0199Sgd78059 	mblk_t *mp;
17585c1d0199Sgd78059 	boolean_t warning_msg = B_TRUE;
17595c1d0199Sgd78059 
176022eb7cb5Sgd78059 	dmfep = (void *)arg;
17615c1d0199Sgd78059 
1762bdb9230aSGarrett D'Amore 	mutex_enter(dmfep->oplock);
1763bdb9230aSGarrett D'Amore 	if (dmfep->suspended) {
1764bdb9230aSGarrett D'Amore 		mutex_exit(dmfep->oplock);
1765bdb9230aSGarrett D'Amore 		return (DDI_INTR_UNCLAIMED);
1766bdb9230aSGarrett D'Amore 	}
1767bdb9230aSGarrett D'Amore 
17685c1d0199Sgd78059 	/*
17695c1d0199Sgd78059 	 * A quick check as to whether the interrupt was from this
17705c1d0199Sgd78059 	 * device, before we even finish setting up all our local
17715c1d0199Sgd78059 	 * variables.  Note that reading the interrupt status register
17725c1d0199Sgd78059 	 * doesn't have any unpleasant side effects such as clearing
17735c1d0199Sgd78059 	 * the bits read, so it's quite OK to re-read it once we have
17745c1d0199Sgd78059 	 * determined that we are going to service this interrupt and
17755c1d0199Sgd78059 	 * grabbed the mutexen.
17765c1d0199Sgd78059 	 */
17775c1d0199Sgd78059 	istat = dmfe_chip_get32(dmfep, STATUS_REG);
1778bdb9230aSGarrett D'Amore 	if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0) {
17795c1d0199Sgd78059 
1780bdb9230aSGarrett D'Amore 		mutex_exit(dmfep->oplock);
1781bdb9230aSGarrett D'Amore 		return (DDI_INTR_UNCLAIMED);
17825c1d0199Sgd78059 	}
17835c1d0199Sgd78059 
17845c1d0199Sgd78059 	DRV_KS_INC(dmfep, KS_INTERRUPT);
17855c1d0199Sgd78059 
17865c1d0199Sgd78059 	/*
17875c1d0199Sgd78059 	 * Identify bits that represent enabled interrupts ...
17885c1d0199Sgd78059 	 */
17895c1d0199Sgd78059 	istat |= dmfe_chip_get32(dmfep, STATUS_REG);
17905c1d0199Sgd78059 	interrupts = istat & dmfep->imask;
17915c1d0199Sgd78059 	ASSERT(interrupts != 0);
17925c1d0199Sgd78059 
1793bdb9230aSGarrett D'Amore 	DTRACE_PROBE1(intr, uint32_t, istat);
17945c1d0199Sgd78059 
17955c1d0199Sgd78059 	/*
17965c1d0199Sgd78059 	 * Check for any interrupts other than TX/RX done.
17975c1d0199Sgd78059 	 * If there are any, they are considered Abnormal
17985c1d0199Sgd78059 	 * and will cause the chip to be reset.
17995c1d0199Sgd78059 	 */
18005c1d0199Sgd78059 	if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) {
18015c1d0199Sgd78059 		if (istat & ABNORMAL_SUMMARY_INT) {
18025c1d0199Sgd78059 			/*
18035c1d0199Sgd78059 			 * Any Abnormal interrupts will lead to us
18045c1d0199Sgd78059 			 * resetting the chip, so we don't bother
18055c1d0199Sgd78059 			 * to clear each interrupt individually.
18065c1d0199Sgd78059 			 *
18075c1d0199Sgd78059 			 * Our main task here is to identify the problem,
18085c1d0199Sgd78059 			 * by pointing out the most significant unexpected
18095c1d0199Sgd78059 			 * bit.  Additional bits may well be consequences
18105c1d0199Sgd78059 			 * of the first problem, so we consider the possible
18115c1d0199Sgd78059 			 * causes in order of severity.
18125c1d0199Sgd78059 			 */
18135c1d0199Sgd78059 			if (interrupts & SYSTEM_ERR_INT) {
18145c1d0199Sgd78059 				switch (istat & SYSTEM_ERR_BITS) {
18155c1d0199Sgd78059 				case SYSTEM_ERR_M_ABORT:
18165c1d0199Sgd78059 					msg = "Bus Master Abort";
18175c1d0199Sgd78059 					break;
18185c1d0199Sgd78059 
18195c1d0199Sgd78059 				case SYSTEM_ERR_T_ABORT:
18205c1d0199Sgd78059 					msg = "Bus Target Abort";
18215c1d0199Sgd78059 					break;
18225c1d0199Sgd78059 
18235c1d0199Sgd78059 				case SYSTEM_ERR_PARITY:
18245c1d0199Sgd78059 					msg = "Parity Error";
18255c1d0199Sgd78059 					break;
18265c1d0199Sgd78059 
18275c1d0199Sgd78059 				default:
18285c1d0199Sgd78059 					msg = "Unknown System Bus Error";
18295c1d0199Sgd78059 					break;
18305c1d0199Sgd78059 				}
18315c1d0199Sgd78059 			} else if (interrupts & RX_STOPPED_INT) {
18325c1d0199Sgd78059 				msg = "RX process stopped";
18335c1d0199Sgd78059 			} else if (interrupts & RX_UNAVAIL_INT) {
18345c1d0199Sgd78059 				msg = "RX buffer unavailable";
18355c1d0199Sgd78059 				warning_msg = B_FALSE;
18365c1d0199Sgd78059 			} else if (interrupts & RX_WATCHDOG_INT) {
18375c1d0199Sgd78059 				msg = "RX watchdog timeout?";
18385c1d0199Sgd78059 			} else if (interrupts & RX_EARLY_INT) {
18395c1d0199Sgd78059 				msg = "RX early interrupt?";
18405c1d0199Sgd78059 			} else if (interrupts & TX_STOPPED_INT) {
18415c1d0199Sgd78059 				msg = "TX process stopped";
18425c1d0199Sgd78059 			} else if (interrupts & TX_JABBER_INT) {
18435c1d0199Sgd78059 				msg = "TX jabber timeout";
18445c1d0199Sgd78059 			} else if (interrupts & TX_UNDERFLOW_INT) {
18455c1d0199Sgd78059 				msg = "TX underflow?";
18465c1d0199Sgd78059 			} else if (interrupts & TX_EARLY_INT) {
18475c1d0199Sgd78059 				msg = "TX early interrupt?";
18485c1d0199Sgd78059 
18495c1d0199Sgd78059 			} else if (interrupts & LINK_STATUS_INT) {
18505c1d0199Sgd78059 				msg = "Link status change?";
18515c1d0199Sgd78059 			} else if (interrupts & GP_TIMER_INT) {
18525c1d0199Sgd78059 				msg = "Timer expired?";
18535c1d0199Sgd78059 			}
18545c1d0199Sgd78059 
18555c1d0199Sgd78059 			if (warning_msg)
18565c1d0199Sgd78059 				dmfe_warning(dmfep, "abnormal interrupt, "
18575c1d0199Sgd78059 				    "status 0x%x: %s", istat, msg);
18585c1d0199Sgd78059 
18595c1d0199Sgd78059 			/*
18605c1d0199Sgd78059 			 * We don't want to run the entire reinitialisation
18615c1d0199Sgd78059 			 * code out of this (high-level?) interrupt, so we
18625c1d0199Sgd78059 			 * simply STOP the chip, and wake up the factotum
18635c1d0199Sgd78059 			 * to reinitalise it ...
18645c1d0199Sgd78059 			 */
18655c1d0199Sgd78059 			dmfe_stop_chip(dmfep, CHIP_ERROR);
18665c1d0199Sgd78059 			dmfe_wake_factotum(dmfep, KS_CHIP_ERROR,
18675c1d0199Sgd78059 			    "interrupt (error)");
18685c1d0199Sgd78059 		} else {
18695c1d0199Sgd78059 			/*
18705c1d0199Sgd78059 			 * We shouldn't really get here (it would mean
18715c1d0199Sgd78059 			 * there were some unprocessed enabled bits but
18725c1d0199Sgd78059 			 * they weren't Abnormal?), but we'll check just
18735c1d0199Sgd78059 			 * in case ...
18745c1d0199Sgd78059 			 */
1875bdb9230aSGarrett D'Amore 			DTRACE_PROBE1(intr__unexpected, uint32_t, istat);
18765c1d0199Sgd78059 		}
18775c1d0199Sgd78059 	}
18785c1d0199Sgd78059 
18795c1d0199Sgd78059 	/*
18805c1d0199Sgd78059 	 * Acknowledge all the original bits - except in the case of an
18815c1d0199Sgd78059 	 * error, when we leave them unacknowledged so that the recovery
18825c1d0199Sgd78059 	 * code can see what was going on when the problem occurred ...
18835c1d0199Sgd78059 	 */
18845c1d0199Sgd78059 	if (dmfep->chip_state != CHIP_ERROR) {
18855c1d0199Sgd78059 		(void) dmfe_chip_put32(dmfep, STATUS_REG, istat);
18865c1d0199Sgd78059 		/*
18875c1d0199Sgd78059 		 * Read-after-write forces completion on PCI bus.
18885c1d0199Sgd78059 		 *
18895c1d0199Sgd78059 		 */
18905c1d0199Sgd78059 		(void) dmfe_chip_get32(dmfep, STATUS_REG);
18915c1d0199Sgd78059 	}
18925c1d0199Sgd78059 
18935c1d0199Sgd78059 
18945c1d0199Sgd78059 	/*
18955c1d0199Sgd78059 	 * We've finished talking to the chip, so we can drop <oplock>
18965c1d0199Sgd78059 	 * before handling the normal interrupts, which only involve
18975c1d0199Sgd78059 	 * manipulation of descriptors ...
18985c1d0199Sgd78059 	 */
18995c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
19005c1d0199Sgd78059 
19015c1d0199Sgd78059 	if (interrupts & RX_PKTDONE_INT)
19025c1d0199Sgd78059 		if ((mp = dmfe_getp(dmfep)) != NULL)
19035c1d0199Sgd78059 			mac_rx(dmfep->mh, NULL, mp);
19045c1d0199Sgd78059 
19055c1d0199Sgd78059 	if (interrupts & TX_PKTDONE_INT) {
19065c1d0199Sgd78059 		/*
19075c1d0199Sgd78059 		 * The only reason for taking this interrupt is to give
19085c1d0199Sgd78059 		 * MAC a chance to schedule queued packets after a
19095c1d0199Sgd78059 		 * ring-full condition.  To minimise the number of
19105c1d0199Sgd78059 		 * redundant TX-Done interrupts, we only mark two of the
19115c1d0199Sgd78059 		 * ring descriptors as 'interrupt-on-complete' - all the
19125c1d0199Sgd78059 		 * others are simply handed back without an interrupt.
19135c1d0199Sgd78059 		 */
19145c1d0199Sgd78059 		if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) {
19155c1d0199Sgd78059 			(void) dmfe_reclaim_tx_desc(dmfep);
19165c1d0199Sgd78059 			mutex_exit(dmfep->txlock);
19175c1d0199Sgd78059 		}
19185c1d0199Sgd78059 		mac_tx_update(dmfep->mh);
19195c1d0199Sgd78059 	}
19205c1d0199Sgd78059 
19215c1d0199Sgd78059 	return (DDI_INTR_CLAIMED);
19225c1d0199Sgd78059 }
19235c1d0199Sgd78059 
19245c1d0199Sgd78059 /*
19255c1d0199Sgd78059  * ========== Statistics update handler ==========
19265c1d0199Sgd78059  */
19275c1d0199Sgd78059 
19285c1d0199Sgd78059 static int
dmfe_m_stat(void * arg,uint_t stat,uint64_t * val)19295c1d0199Sgd78059 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val)
19305c1d0199Sgd78059 {
19315c1d0199Sgd78059 	dmfe_t *dmfep = arg;
19325c1d0199Sgd78059 	int rv = 0;
19335c1d0199Sgd78059 
1934bdb9230aSGarrett D'Amore 	/* Let MII handle its own stats. */
1935bdb9230aSGarrett D'Amore 	if (mii_m_getstat(dmfep->mii, stat, val) == 0) {
1936bdb9230aSGarrett D'Amore 		return (0);
1937bdb9230aSGarrett D'Amore 	}
1938bdb9230aSGarrett D'Amore 
19395c1d0199Sgd78059 	mutex_enter(dmfep->oplock);
19405c1d0199Sgd78059 	mutex_enter(dmfep->rxlock);
19415c1d0199Sgd78059 	mutex_enter(dmfep->txlock);
19425c1d0199Sgd78059 
19435c1d0199Sgd78059 	/* make sure we have all the stats collected */
19445c1d0199Sgd78059 	(void) dmfe_reclaim_tx_desc(dmfep);
19455c1d0199Sgd78059 
19465c1d0199Sgd78059 	switch (stat) {
19475c1d0199Sgd78059 
19485c1d0199Sgd78059 	case MAC_STAT_IPACKETS:
19495c1d0199Sgd78059 		*val = dmfep->rx_stats_ipackets;
19505c1d0199Sgd78059 		break;
19515c1d0199Sgd78059 
19525c1d0199Sgd78059 	case MAC_STAT_MULTIRCV:
19535c1d0199Sgd78059 		*val = dmfep->rx_stats_multi;
19545c1d0199Sgd78059 		break;
19555c1d0199Sgd78059 
19565c1d0199Sgd78059 	case MAC_STAT_BRDCSTRCV:
19575c1d0199Sgd78059 		*val = dmfep->rx_stats_bcast;
19585c1d0199Sgd78059 		break;
19595c1d0199Sgd78059 
19605c1d0199Sgd78059 	case MAC_STAT_RBYTES:
19615c1d0199Sgd78059 		*val = dmfep->rx_stats_rbytes;
19625c1d0199Sgd78059 		break;
19635c1d0199Sgd78059 
19645c1d0199Sgd78059 	case MAC_STAT_IERRORS:
19655c1d0199Sgd78059 		*val = dmfep->rx_stats_ierrors;
19665c1d0199Sgd78059 		break;
19675c1d0199Sgd78059 
19685c1d0199Sgd78059 	case MAC_STAT_NORCVBUF:
19695c1d0199Sgd78059 		*val = dmfep->rx_stats_norcvbuf;
19705c1d0199Sgd78059 		break;
19715c1d0199Sgd78059 
19725c1d0199Sgd78059 	case MAC_STAT_COLLISIONS:
19735c1d0199Sgd78059 		*val = dmfep->tx_stats_collisions;
19745c1d0199Sgd78059 		break;
19755c1d0199Sgd78059 
19765c1d0199Sgd78059 	case MAC_STAT_OERRORS:
19775c1d0199Sgd78059 		*val = dmfep->tx_stats_oerrors;
19785c1d0199Sgd78059 		break;
19795c1d0199Sgd78059 
19805c1d0199Sgd78059 	case MAC_STAT_OPACKETS:
19815c1d0199Sgd78059 		*val = dmfep->tx_stats_opackets;
19825c1d0199Sgd78059 		break;
19835c1d0199Sgd78059 
19845c1d0199Sgd78059 	case MAC_STAT_MULTIXMT:
19855c1d0199Sgd78059 		*val = dmfep->tx_stats_multi;
19865c1d0199Sgd78059 		break;
19875c1d0199Sgd78059 
19885c1d0199Sgd78059 	case MAC_STAT_BRDCSTXMT:
19895c1d0199Sgd78059 		*val = dmfep->tx_stats_bcast;
19905c1d0199Sgd78059 		break;
19915c1d0199Sgd78059 
19925c1d0199Sgd78059 	case MAC_STAT_OBYTES:
19935c1d0199Sgd78059 		*val = dmfep->tx_stats_obytes;
19945c1d0199Sgd78059 		break;
19955c1d0199Sgd78059 
19965c1d0199Sgd78059 	case MAC_STAT_OVERFLOWS:
19975c1d0199Sgd78059 		*val = dmfep->rx_stats_overflow;
19985c1d0199Sgd78059 		break;
19995c1d0199Sgd78059 
20005c1d0199Sgd78059 	case MAC_STAT_UNDERFLOWS:
20015c1d0199Sgd78059 		*val = dmfep->tx_stats_underflow;
20025c1d0199Sgd78059 		break;
20035c1d0199Sgd78059 
20045c1d0199Sgd78059 	case ETHER_STAT_ALIGN_ERRORS:
20055c1d0199Sgd78059 		*val = dmfep->rx_stats_align;
20065c1d0199Sgd78059 		break;
20075c1d0199Sgd78059 
20085c1d0199Sgd78059 	case ETHER_STAT_FCS_ERRORS:
20095c1d0199Sgd78059 		*val = dmfep->rx_stats_fcs;
20105c1d0199Sgd78059 		break;
20115c1d0199Sgd78059 
20125c1d0199Sgd78059 	case ETHER_STAT_TOOLONG_ERRORS:
20135c1d0199Sgd78059 		*val = dmfep->rx_stats_toolong;
20145c1d0199Sgd78059 		break;
20155c1d0199Sgd78059 
20165c1d0199Sgd78059 	case ETHER_STAT_TOOSHORT_ERRORS:
20175c1d0199Sgd78059 		*val = dmfep->rx_stats_short;
20185c1d0199Sgd78059 		break;
20195c1d0199Sgd78059 
20205c1d0199Sgd78059 	case ETHER_STAT_MACRCV_ERRORS:
20215c1d0199Sgd78059 		*val = dmfep->rx_stats_macrcv_errors;
20225c1d0199Sgd78059 		break;
20235c1d0199Sgd78059 
20245c1d0199Sgd78059 	case ETHER_STAT_MACXMT_ERRORS:
20255c1d0199Sgd78059 		*val = dmfep->tx_stats_macxmt_errors;
20265c1d0199Sgd78059 		break;
20275c1d0199Sgd78059 
20285c1d0199Sgd78059 	case ETHER_STAT_JABBER_ERRORS:
20295c1d0199Sgd78059 		*val = dmfep->tx_stats_jabber;
20305c1d0199Sgd78059 		break;
20315c1d0199Sgd78059 
20325c1d0199Sgd78059 	case ETHER_STAT_CARRIER_ERRORS:
20335c1d0199Sgd78059 		*val = dmfep->tx_stats_nocarrier;
20345c1d0199Sgd78059 		break;
20355c1d0199Sgd78059 
20365c1d0199Sgd78059 	case ETHER_STAT_TX_LATE_COLLISIONS:
20375c1d0199Sgd78059 		*val = dmfep->tx_stats_xmtlatecoll;
20385c1d0199Sgd78059 		break;
20395c1d0199Sgd78059 
20405c1d0199Sgd78059 	case ETHER_STAT_EX_COLLISIONS:
20415c1d0199Sgd78059 		*val = dmfep->tx_stats_excoll;
20425c1d0199Sgd78059 		break;
20435c1d0199Sgd78059 
20445c1d0199Sgd78059 	case ETHER_STAT_DEFER_XMTS:
20455c1d0199Sgd78059 		*val = dmfep->tx_stats_defer;
20465c1d0199Sgd78059 		break;
20475c1d0199Sgd78059 
20485c1d0199Sgd78059 	case ETHER_STAT_FIRST_COLLISIONS:
20495c1d0199Sgd78059 		*val = dmfep->tx_stats_first_coll;
20505c1d0199Sgd78059 		break;
20515c1d0199Sgd78059 
20525c1d0199Sgd78059 	case ETHER_STAT_MULTI_COLLISIONS:
20535c1d0199Sgd78059 		*val = dmfep->tx_stats_multi_coll;
20545c1d0199Sgd78059 		break;
20555c1d0199Sgd78059 
20565c1d0199Sgd78059 	default:
20575c1d0199Sgd78059 		rv = ENOTSUP;
20585c1d0199Sgd78059 	}
20595c1d0199Sgd78059 
20605c1d0199Sgd78059 	mutex_exit(dmfep->txlock);
20615c1d0199Sgd78059 	mutex_exit(dmfep->rxlock);
20625c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
20635c1d0199Sgd78059 
20645c1d0199Sgd78059 	return (rv);
20655c1d0199Sgd78059 }
20665c1d0199Sgd78059 
20675c1d0199Sgd78059 /*
20685c1d0199Sgd78059  * ========== Ioctl handler & subfunctions ==========
20695c1d0199Sgd78059  */
20705c1d0199Sgd78059 
2071bdb9230aSGarrett D'Amore static lb_property_t dmfe_loopmodes[] = {
2072bdb9230aSGarrett D'Amore 	{ normal,	"normal",	0 },
2073bdb9230aSGarrett D'Amore 	{ internal,	"Internal",	1 },
2074bdb9230aSGarrett D'Amore 	{ external,	"External",	2 },
2075bdb9230aSGarrett D'Amore };
20765c1d0199Sgd78059 
20775c1d0199Sgd78059 /*
20785c1d0199Sgd78059  * Specific dmfe IOCTLs, the mac module handles the generic ones.
2079bdb9230aSGarrett D'Amore  * Unfortunately, the DM9102 doesn't seem to work well with MII based
2080bdb9230aSGarrett D'Amore  * loopback, so we have to do something special for it.
20815c1d0199Sgd78059  */
2082bdb9230aSGarrett D'Amore 
20835c1d0199Sgd78059 static void
dmfe_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)20845c1d0199Sgd78059 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
20855c1d0199Sgd78059 {
20865c1d0199Sgd78059 	dmfe_t		*dmfep = arg;
20875c1d0199Sgd78059 	struct iocblk	*iocp;
2088bdb9230aSGarrett D'Amore 	int		rv = 0;
2089bdb9230aSGarrett D'Amore 	lb_info_sz_t	sz;
20905c1d0199Sgd78059 	int		cmd;
2091bdb9230aSGarrett D'Amore 	uint32_t	mode;
20925c1d0199Sgd78059 
209322eb7cb5Sgd78059 	iocp = (void *)mp->b_rptr;
20945c1d0199Sgd78059 	cmd = iocp->ioc_cmd;
2095bdb9230aSGarrett D'Amore 
2096bdb9230aSGarrett D'Amore 	if (mp->b_cont == NULL) {
2097bdb9230aSGarrett D'Amore 		/*
2098bdb9230aSGarrett D'Amore 		 * All of these ioctls need data!
2099bdb9230aSGarrett D'Amore 		 */
21005c1d0199Sgd78059 		miocnak(wq, mp, 0, EINVAL);
21015c1d0199Sgd78059 		return;
21025c1d0199Sgd78059 	}
21035c1d0199Sgd78059 
21045c1d0199Sgd78059 	switch (cmd) {
2105bdb9230aSGarrett D'Amore 	case LB_GET_INFO_SIZE:
2106bdb9230aSGarrett D'Amore 		if (iocp->ioc_count != sizeof (sz)) {
2107bdb9230aSGarrett D'Amore 			rv = EINVAL;
2108bdb9230aSGarrett D'Amore 		} else {
2109bdb9230aSGarrett D'Amore 			sz = sizeof (dmfe_loopmodes);
2110bdb9230aSGarrett D'Amore 			bcopy(&sz, mp->b_cont->b_rptr, sizeof (sz));
2111bdb9230aSGarrett D'Amore 		}
2112bdb9230aSGarrett D'Amore 		break;
2113bdb9230aSGarrett D'Amore 
2114bdb9230aSGarrett D'Amore 	case LB_GET_INFO:
2115bdb9230aSGarrett D'Amore 		if (iocp->ioc_count != sizeof (dmfe_loopmodes)) {
2116bdb9230aSGarrett D'Amore 			rv = EINVAL;
2117bdb9230aSGarrett D'Amore 		} else {
2118bdb9230aSGarrett D'Amore 			bcopy(dmfe_loopmodes, mp->b_cont->b_rptr,
2119bdb9230aSGarrett D'Amore 			    iocp->ioc_count);
2120bdb9230aSGarrett D'Amore 		}
2121bdb9230aSGarrett D'Amore 		break;
2122bdb9230aSGarrett D'Amore 
2123bdb9230aSGarrett D'Amore 	case LB_GET_MODE:
2124bdb9230aSGarrett D'Amore 		if (iocp->ioc_count != sizeof (mode)) {
2125bdb9230aSGarrett D'Amore 			rv = EINVAL;
2126bdb9230aSGarrett D'Amore 		} else {
2127bdb9230aSGarrett D'Amore 			mutex_enter(dmfep->oplock);
2128bdb9230aSGarrett D'Amore 			switch (dmfep->opmode & LOOPBACK_MODE_MASK) {
2129bdb9230aSGarrett D'Amore 			case LOOPBACK_OFF:
2130bdb9230aSGarrett D'Amore 				mode = 0;
2131bdb9230aSGarrett D'Amore 				break;
2132bdb9230aSGarrett D'Amore 			case LOOPBACK_INTERNAL:
2133bdb9230aSGarrett D'Amore 				mode = 1;
2134bdb9230aSGarrett D'Amore 				break;
21355c1d0199Sgd78059 			default:
2136bdb9230aSGarrett D'Amore 				mode = 2;
21375c1d0199Sgd78059 				break;
21385c1d0199Sgd78059 			}
21395c1d0199Sgd78059 			mutex_exit(dmfep->oplock);
2140bdb9230aSGarrett D'Amore 			bcopy(&mode, mp->b_cont->b_rptr, sizeof (mode));
2141bdb9230aSGarrett D'Amore 		}
2142bdb9230aSGarrett D'Amore 		break;
21435c1d0199Sgd78059 
2144bdb9230aSGarrett D'Amore 	case LB_SET_MODE:
2145bdb9230aSGarrett D'Amore 		rv = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2146bdb9230aSGarrett D'Amore 		if (rv != 0)
2147bdb9230aSGarrett D'Amore 			break;
2148bdb9230aSGarrett D'Amore 		if (iocp->ioc_count != sizeof (mode)) {
2149bdb9230aSGarrett D'Amore 			rv = EINVAL;
2150bdb9230aSGarrett D'Amore 			break;
2151bdb9230aSGarrett D'Amore 		}
2152bdb9230aSGarrett D'Amore 		bcopy(mp->b_cont->b_rptr, &mode, sizeof (mode));
2153bdb9230aSGarrett D'Amore 
2154bdb9230aSGarrett D'Amore 		mutex_enter(dmfep->oplock);
2155bdb9230aSGarrett D'Amore 		dmfep->opmode &= ~LOOPBACK_MODE_MASK;
2156bdb9230aSGarrett D'Amore 		switch (mode) {
2157bdb9230aSGarrett D'Amore 		case 2:
2158bdb9230aSGarrett D'Amore 			dmfep->opmode |= LOOPBACK_PHY_D;
2159bdb9230aSGarrett D'Amore 			break;
2160bdb9230aSGarrett D'Amore 		case 1:
2161bdb9230aSGarrett D'Amore 			dmfep->opmode |= LOOPBACK_INTERNAL;
2162bdb9230aSGarrett D'Amore 			break;
21635c1d0199Sgd78059 		default:
2164bdb9230aSGarrett D'Amore 			break;
2165bdb9230aSGarrett D'Amore 		}
2166bdb9230aSGarrett D'Amore 		if (!dmfep->suspended) {
2167bdb9230aSGarrett D'Amore 			dmfe_restart(dmfep);
2168bdb9230aSGarrett D'Amore 		}
2169bdb9230aSGarrett D'Amore 		mutex_exit(dmfep->oplock);
21705c1d0199Sgd78059 		break;
21715c1d0199Sgd78059 
2172bdb9230aSGarrett D'Amore 	default:
2173bdb9230aSGarrett D'Amore 		rv = EINVAL;
21745c1d0199Sgd78059 		break;
2175bdb9230aSGarrett D'Amore 	}
21765c1d0199Sgd78059 
2177bdb9230aSGarrett D'Amore 	if (rv == 0) {
2178bdb9230aSGarrett D'Amore 		miocack(wq, mp, iocp->ioc_count, 0);
2179bdb9230aSGarrett D'Amore 	} else {
2180bdb9230aSGarrett D'Amore 		miocnak(wq, mp, 0, rv);
21815c1d0199Sgd78059 	}
21825c1d0199Sgd78059 }
21835c1d0199Sgd78059 
2184bdb9230aSGarrett D'Amore int
dmfe_m_getprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,void * val)2185*0dc2366fSVenugopal Iyer dmfe_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
2186*0dc2366fSVenugopal Iyer     void *val)
2187bdb9230aSGarrett D'Amore {
2188bdb9230aSGarrett D'Amore 	dmfe_t		*dmfep = arg;
2189bdb9230aSGarrett D'Amore 
2190*0dc2366fSVenugopal Iyer 	return (mii_m_getprop(dmfep->mii, name, num, sz, val));
2191bdb9230aSGarrett D'Amore }
2192bdb9230aSGarrett D'Amore 
2193bdb9230aSGarrett D'Amore int
dmfe_m_setprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,const void * val)2194bdb9230aSGarrett D'Amore dmfe_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
2195bdb9230aSGarrett D'Amore     const void *val)
2196bdb9230aSGarrett D'Amore {
2197bdb9230aSGarrett D'Amore 	dmfe_t		*dmfep = arg;
2198bdb9230aSGarrett D'Amore 
2199bdb9230aSGarrett D'Amore 	return (mii_m_setprop(dmfep->mii, name, num, sz, val));
2200bdb9230aSGarrett D'Amore }
22015c1d0199Sgd78059 
2202*0dc2366fSVenugopal Iyer static void
dmfe_m_propinfo(void * arg,const char * name,mac_prop_id_t num,mac_prop_info_handle_t mph)2203*0dc2366fSVenugopal Iyer dmfe_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
2204*0dc2366fSVenugopal Iyer     mac_prop_info_handle_t mph)
2205*0dc2366fSVenugopal Iyer {
2206*0dc2366fSVenugopal Iyer 	dmfe_t		*dmfep = arg;
2207*0dc2366fSVenugopal Iyer 
2208*0dc2366fSVenugopal Iyer 	mii_m_propinfo(dmfep->mii, name, num, mph);
2209*0dc2366fSVenugopal Iyer }
22105c1d0199Sgd78059 
22115c1d0199Sgd78059 /*
22125c1d0199Sgd78059  * ========== Per-instance setup/teardown code ==========
22135c1d0199Sgd78059  */
22145c1d0199Sgd78059 
22155c1d0199Sgd78059 /*
22165c1d0199Sgd78059  * Determine local MAC address & broadcast address for this interface
22175c1d0199Sgd78059  */
22185c1d0199Sgd78059 static void
dmfe_find_mac_address(dmfe_t * dmfep)22195c1d0199Sgd78059 dmfe_find_mac_address(dmfe_t *dmfep)
22205c1d0199Sgd78059 {
22215c1d0199Sgd78059 	uchar_t *prop;
22225c1d0199Sgd78059 	uint_t propsize;
22235c1d0199Sgd78059 	int err;
22245c1d0199Sgd78059 
22255c1d0199Sgd78059 	/*
22265c1d0199Sgd78059 	 * We have to find the "vendor's factory-set address".  This is
22275c1d0199Sgd78059 	 * the value of the property "local-mac-address", as set by OBP
22285c1d0199Sgd78059 	 * (or a .conf file!)
22295c1d0199Sgd78059 	 *
22305c1d0199Sgd78059 	 * If the property is not there, then we try to find the factory
22315c1d0199Sgd78059 	 * mac address from the devices serial EEPROM.
22325c1d0199Sgd78059 	 */
22335c1d0199Sgd78059 	bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr));
22345c1d0199Sgd78059 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo,
22355c1d0199Sgd78059 	    DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize);
22365c1d0199Sgd78059 	if (err == DDI_PROP_SUCCESS) {
22375c1d0199Sgd78059 		if (propsize == ETHERADDRL)
22385c1d0199Sgd78059 			ethaddr_copy(prop, dmfep->curr_addr);
22395c1d0199Sgd78059 		ddi_prop_free(prop);
22405c1d0199Sgd78059 	} else {
22415c1d0199Sgd78059 		/* no property set... check eeprom */
22425c1d0199Sgd78059 		dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr,
22435c1d0199Sgd78059 		    ETHERADDRL);
22445c1d0199Sgd78059 	}
22455c1d0199Sgd78059 }
22465c1d0199Sgd78059 
22475c1d0199Sgd78059 static int
dmfe_alloc_dma_mem(dmfe_t * dmfep,size_t memsize,size_t setup,size_t slop,ddi_device_acc_attr_t * attr_p,uint_t dma_flags,dma_area_t * dma_p)22485c1d0199Sgd78059 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize,
22495c1d0199Sgd78059 	size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p,
22505c1d0199Sgd78059 	uint_t dma_flags, dma_area_t *dma_p)
22515c1d0199Sgd78059 {
22525c1d0199Sgd78059 	ddi_dma_cookie_t dma_cookie;
22535c1d0199Sgd78059 	uint_t ncookies;
22545c1d0199Sgd78059 	int err;
22555c1d0199Sgd78059 
22565c1d0199Sgd78059 	/*
22575c1d0199Sgd78059 	 * Allocate handle
22585c1d0199Sgd78059 	 */
22595c1d0199Sgd78059 	err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr,
22605c1d0199Sgd78059 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
2261bdb9230aSGarrett D'Amore 	if (err != DDI_SUCCESS) {
2262bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "DMA handle allocation failed");
22635c1d0199Sgd78059 		return (DDI_FAILURE);
2264bdb9230aSGarrett D'Amore 	}
22655c1d0199Sgd78059 
22665c1d0199Sgd78059 	/*
22675c1d0199Sgd78059 	 * Allocate memory
22685c1d0199Sgd78059 	 */
22695c1d0199Sgd78059 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop,
22705c1d0199Sgd78059 	    attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
22715c1d0199Sgd78059 	    DDI_DMA_SLEEP, NULL,
22725c1d0199Sgd78059 	    &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl);
2273bdb9230aSGarrett D'Amore 	if (err != DDI_SUCCESS) {
2274bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "DMA memory allocation failed: %d", err);
22755c1d0199Sgd78059 		return (DDI_FAILURE);
2276bdb9230aSGarrett D'Amore 	}
22775c1d0199Sgd78059 
22785c1d0199Sgd78059 	/*
22795c1d0199Sgd78059 	 * Bind the two together
22805c1d0199Sgd78059 	 */
22815c1d0199Sgd78059 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
22825c1d0199Sgd78059 	    dma_p->mem_va, dma_p->alength, dma_flags,
22835c1d0199Sgd78059 	    DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies);
2284bdb9230aSGarrett D'Amore 	if (err != DDI_DMA_MAPPED) {
2285bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "DMA mapping failed: %d", err);
22865c1d0199Sgd78059 		return (DDI_FAILURE);
2287bdb9230aSGarrett D'Amore 	}
2288bdb9230aSGarrett D'Amore 	if ((dma_p->ncookies = ncookies) != 1) {
2289bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "Too many DMA cookeis: %d", ncookies);
22905c1d0199Sgd78059 		return (DDI_FAILURE);
2291bdb9230aSGarrett D'Amore 	}
22925c1d0199Sgd78059 
22935c1d0199Sgd78059 	dma_p->mem_dvma = dma_cookie.dmac_address;
22945c1d0199Sgd78059 	if (setup > 0) {
22955c1d0199Sgd78059 		dma_p->setup_dvma = dma_p->mem_dvma + memsize;
22965c1d0199Sgd78059 		dma_p->setup_va = dma_p->mem_va + memsize;
22975c1d0199Sgd78059 	} else {
22985c1d0199Sgd78059 		dma_p->setup_dvma = 0;
22995c1d0199Sgd78059 		dma_p->setup_va = NULL;
23005c1d0199Sgd78059 	}
23015c1d0199Sgd78059 
23025c1d0199Sgd78059 	return (DDI_SUCCESS);
23035c1d0199Sgd78059 }
23045c1d0199Sgd78059 
23055c1d0199Sgd78059 /*
23065c1d0199Sgd78059  * This function allocates the transmit and receive buffers and descriptors.
23075c1d0199Sgd78059  */
23085c1d0199Sgd78059 static int
dmfe_alloc_bufs(dmfe_t * dmfep)23095c1d0199Sgd78059 dmfe_alloc_bufs(dmfe_t *dmfep)
23105c1d0199Sgd78059 {
23115c1d0199Sgd78059 	size_t memsize;
23125c1d0199Sgd78059 	int err;
23135c1d0199Sgd78059 
23145c1d0199Sgd78059 	/*
23155c1d0199Sgd78059 	 * Allocate memory & handles for TX descriptor ring
23165c1d0199Sgd78059 	 */
23175c1d0199Sgd78059 	memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type);
23185c1d0199Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP,
23195c1d0199Sgd78059 	    &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
23205c1d0199Sgd78059 	    &dmfep->tx_desc);
2321bdb9230aSGarrett D'Amore 	if (err != DDI_SUCCESS) {
2322bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "TX descriptor allocation failed");
23235c1d0199Sgd78059 		return (DDI_FAILURE);
2324bdb9230aSGarrett D'Amore 	}
23255c1d0199Sgd78059 
23265c1d0199Sgd78059 	/*
23275c1d0199Sgd78059 	 * Allocate memory & handles for TX buffers
23285c1d0199Sgd78059 	 */
23295c1d0199Sgd78059 	memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE;
23305c1d0199Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
23315c1d0199Sgd78059 	    &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE,
23325c1d0199Sgd78059 	    &dmfep->tx_buff);
2333bdb9230aSGarrett D'Amore 	if (err != DDI_SUCCESS) {
2334bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "TX buffer allocation failed");
23355c1d0199Sgd78059 		return (DDI_FAILURE);
2336bdb9230aSGarrett D'Amore 	}
23375c1d0199Sgd78059 
23385c1d0199Sgd78059 	/*
23395c1d0199Sgd78059 	 * Allocate memory & handles for RX descriptor ring
23405c1d0199Sgd78059 	 */
23415c1d0199Sgd78059 	memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type);
23425c1d0199Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP,
23435c1d0199Sgd78059 	    &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
23445c1d0199Sgd78059 	    &dmfep->rx_desc);
2345bdb9230aSGarrett D'Amore 	if (err != DDI_SUCCESS) {
2346bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "RX descriptor allocation failed");
23475c1d0199Sgd78059 		return (DDI_FAILURE);
2348bdb9230aSGarrett D'Amore 	}
23495c1d0199Sgd78059 
23505c1d0199Sgd78059 	/*
23515c1d0199Sgd78059 	 * Allocate memory & handles for RX buffers
23525c1d0199Sgd78059 	 */
23535c1d0199Sgd78059 	memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE;
23545c1d0199Sgd78059 	err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
23555c1d0199Sgd78059 	    &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff);
2356bdb9230aSGarrett D'Amore 	if (err != DDI_SUCCESS) {
2357bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "RX buffer allocation failed");
23585c1d0199Sgd78059 		return (DDI_FAILURE);
2359bdb9230aSGarrett D'Amore 	}
23605c1d0199Sgd78059 
23615c1d0199Sgd78059 	/*
23625c1d0199Sgd78059 	 * Allocate bitmasks for tx packet type tracking
23635c1d0199Sgd78059 	 */
23645c1d0199Sgd78059 	dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
23655c1d0199Sgd78059 	dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
23665c1d0199Sgd78059 
23675c1d0199Sgd78059 	return (DDI_SUCCESS);
23685c1d0199Sgd78059 }
23695c1d0199Sgd78059 
23705c1d0199Sgd78059 static void
dmfe_free_dma_mem(dma_area_t * dma_p)23715c1d0199Sgd78059 dmfe_free_dma_mem(dma_area_t *dma_p)
23725c1d0199Sgd78059 {
23735c1d0199Sgd78059 	if (dma_p->dma_hdl != NULL) {
23745c1d0199Sgd78059 		if (dma_p->ncookies) {
23755c1d0199Sgd78059 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
23765c1d0199Sgd78059 			dma_p->ncookies = 0;
23775c1d0199Sgd78059 		}
23785c1d0199Sgd78059 		ddi_dma_free_handle(&dma_p->dma_hdl);
23795c1d0199Sgd78059 		dma_p->dma_hdl = NULL;
23805c1d0199Sgd78059 		dma_p->mem_dvma = 0;
23815c1d0199Sgd78059 		dma_p->setup_dvma = 0;
23825c1d0199Sgd78059 	}
23835c1d0199Sgd78059 
23845c1d0199Sgd78059 	if (dma_p->acc_hdl != NULL) {
23855c1d0199Sgd78059 		ddi_dma_mem_free(&dma_p->acc_hdl);
23865c1d0199Sgd78059 		dma_p->acc_hdl = NULL;
23875c1d0199Sgd78059 		dma_p->mem_va = NULL;
23885c1d0199Sgd78059 		dma_p->setup_va = NULL;
23895c1d0199Sgd78059 	}
23905c1d0199Sgd78059 }
23915c1d0199Sgd78059 
23925c1d0199Sgd78059 /*
23935c1d0199Sgd78059  * This routine frees the transmit and receive buffers and descriptors.
23945c1d0199Sgd78059  * Make sure the chip is stopped before calling it!
23955c1d0199Sgd78059  */
23965c1d0199Sgd78059 static void
dmfe_free_bufs(dmfe_t * dmfep)23975c1d0199Sgd78059 dmfe_free_bufs(dmfe_t *dmfep)
23985c1d0199Sgd78059 {
23995c1d0199Sgd78059 	dmfe_free_dma_mem(&dmfep->rx_buff);
24005c1d0199Sgd78059 	dmfe_free_dma_mem(&dmfep->rx_desc);
24015c1d0199Sgd78059 	dmfe_free_dma_mem(&dmfep->tx_buff);
24025c1d0199Sgd78059 	dmfe_free_dma_mem(&dmfep->tx_desc);
2403bdb9230aSGarrett D'Amore 	if (dmfep->tx_mcast)
24045c1d0199Sgd78059 		kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY);
2405bdb9230aSGarrett D'Amore 	if (dmfep->tx_bcast)
24065c1d0199Sgd78059 		kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY);
24075c1d0199Sgd78059 }
24085c1d0199Sgd78059 
24095c1d0199Sgd78059 static void
dmfe_unattach(dmfe_t * dmfep)24105c1d0199Sgd78059 dmfe_unattach(dmfe_t *dmfep)
24115c1d0199Sgd78059 {
24125c1d0199Sgd78059 	/*
24135c1d0199Sgd78059 	 * Clean up and free all DMFE data structures
24145c1d0199Sgd78059 	 */
24155c1d0199Sgd78059 	if (dmfep->cycid != NULL) {
24165c1d0199Sgd78059 		ddi_periodic_delete(dmfep->cycid);
24175c1d0199Sgd78059 		dmfep->cycid = NULL;
24185c1d0199Sgd78059 	}
24195c1d0199Sgd78059 
24205c1d0199Sgd78059 	if (dmfep->ksp_drv != NULL)
24215c1d0199Sgd78059 		kstat_delete(dmfep->ksp_drv);
24225c1d0199Sgd78059 	if (dmfep->progress & PROGRESS_HWINT) {
24235c1d0199Sgd78059 		ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk);
2424bdb9230aSGarrett D'Amore 	}
2425bdb9230aSGarrett D'Amore 	if (dmfep->progress & PROGRESS_SOFTINT)
2426bdb9230aSGarrett D'Amore 		ddi_remove_softintr(dmfep->factotum_id);
2427bdb9230aSGarrett D'Amore 	if (dmfep->mii != NULL)
2428bdb9230aSGarrett D'Amore 		mii_free(dmfep->mii);
2429bdb9230aSGarrett D'Amore 	if (dmfep->progress & PROGRESS_MUTEX) {
24305c1d0199Sgd78059 		mutex_destroy(dmfep->txlock);
24315c1d0199Sgd78059 		mutex_destroy(dmfep->rxlock);
24325c1d0199Sgd78059 		mutex_destroy(dmfep->oplock);
24335c1d0199Sgd78059 	}
24345c1d0199Sgd78059 	dmfe_free_bufs(dmfep);
2435bdb9230aSGarrett D'Amore 	if (dmfep->io_handle != NULL)
24365c1d0199Sgd78059 		ddi_regs_map_free(&dmfep->io_handle);
24375c1d0199Sgd78059 
24385c1d0199Sgd78059 	kmem_free(dmfep, sizeof (*dmfep));
24395c1d0199Sgd78059 }
24405c1d0199Sgd78059 
24415c1d0199Sgd78059 static int
dmfe_config_init(dmfe_t * dmfep,chip_id_t * idp)24425c1d0199Sgd78059 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp)
24435c1d0199Sgd78059 {
24445c1d0199Sgd78059 	ddi_acc_handle_t handle;
24455c1d0199Sgd78059 	uint32_t regval;
24465c1d0199Sgd78059 
24475c1d0199Sgd78059 	if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS)
24485c1d0199Sgd78059 		return (DDI_FAILURE);
24495c1d0199Sgd78059 
24505c1d0199Sgd78059 	/*
24515c1d0199Sgd78059 	 * Get vendor/device/revision.  We expect (but don't check) that
24525c1d0199Sgd78059 	 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102)
24535c1d0199Sgd78059 	 */
24545c1d0199Sgd78059 	idp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
24555c1d0199Sgd78059 	idp->device = pci_config_get16(handle, PCI_CONF_DEVID);
24565c1d0199Sgd78059 	idp->revision = pci_config_get8(handle, PCI_CONF_REVID);
24575c1d0199Sgd78059 
24585c1d0199Sgd78059 	/*
24595c1d0199Sgd78059 	 * Turn on Bus Master Enable bit and ensure the device is not asleep
24605c1d0199Sgd78059 	 */
24615c1d0199Sgd78059 	regval = pci_config_get32(handle, PCI_CONF_COMM);
24625c1d0199Sgd78059 	pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME));
24635c1d0199Sgd78059 
24645c1d0199Sgd78059 	regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD);
24655c1d0199Sgd78059 	pci_config_put32(handle, PCI_DMFE_CONF_CFDD,
24665c1d0199Sgd78059 	    regval & ~(CFDD_SLEEP | CFDD_SNOOZE));
24675c1d0199Sgd78059 
24685c1d0199Sgd78059 	pci_config_teardown(&handle);
24695c1d0199Sgd78059 	return (DDI_SUCCESS);
24705c1d0199Sgd78059 }
24715c1d0199Sgd78059 
24725c1d0199Sgd78059 struct ks_index {
24735c1d0199Sgd78059 	int index;
24745c1d0199Sgd78059 	char *name;
24755c1d0199Sgd78059 };
24765c1d0199Sgd78059 
24775c1d0199Sgd78059 static const struct ks_index ks_drv_names[] = {
24785c1d0199Sgd78059 	{	KS_INTERRUPT,			"intr"			},
24795c1d0199Sgd78059 	{	KS_CYCLIC_RUN,			"cyclic_run"		},
24805c1d0199Sgd78059 
24815c1d0199Sgd78059 	{	KS_TX_STALL,			"tx_stall_detect"	},
24825c1d0199Sgd78059 	{	KS_CHIP_ERROR,			"chip_error_interrupt"	},
24835c1d0199Sgd78059 
24845c1d0199Sgd78059 	{	KS_FACTOTUM_RUN,		"factotum_run"		},
24855c1d0199Sgd78059 	{	KS_RECOVERY,			"factotum_recover"	},
24865c1d0199Sgd78059 
24875c1d0199Sgd78059 	{	-1,				NULL			}
24885c1d0199Sgd78059 };
24895c1d0199Sgd78059 
24905c1d0199Sgd78059 static void
dmfe_init_kstats(dmfe_t * dmfep,int instance)24915c1d0199Sgd78059 dmfe_init_kstats(dmfe_t *dmfep, int instance)
24925c1d0199Sgd78059 {
24935c1d0199Sgd78059 	kstat_t *ksp;
24945c1d0199Sgd78059 	kstat_named_t *knp;
24955c1d0199Sgd78059 	const struct ks_index *ksip;
24965c1d0199Sgd78059 
24975c1d0199Sgd78059 	/* no need to create MII stats, the mac module already does it */
24985c1d0199Sgd78059 
24995c1d0199Sgd78059 	/* Create and initialise driver-defined kstats */
25005c1d0199Sgd78059 	ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net",
25015c1d0199Sgd78059 	    KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT);
25025c1d0199Sgd78059 	if (ksp != NULL) {
25035c1d0199Sgd78059 		for (knp = ksp->ks_data, ksip = ks_drv_names;
25045c1d0199Sgd78059 		    ksip->name != NULL; ++ksip) {
25055c1d0199Sgd78059 			kstat_named_init(&knp[ksip->index], ksip->name,
25065c1d0199Sgd78059 			    KSTAT_DATA_UINT64);
25075c1d0199Sgd78059 		}
25085c1d0199Sgd78059 		dmfep->ksp_drv = ksp;
25095c1d0199Sgd78059 		dmfep->knp_drv = knp;
25105c1d0199Sgd78059 		kstat_install(ksp);
25115c1d0199Sgd78059 	} else {
25125c1d0199Sgd78059 		dmfe_error(dmfep, "kstat_create() for dmfe_events failed");
25135c1d0199Sgd78059 	}
25145c1d0199Sgd78059 }
25155c1d0199Sgd78059 
25165c1d0199Sgd78059 static int
dmfe_resume(dev_info_t * devinfo)25175c1d0199Sgd78059 dmfe_resume(dev_info_t *devinfo)
25185c1d0199Sgd78059 {
25195c1d0199Sgd78059 	dmfe_t *dmfep;				/* Our private data	*/
25205c1d0199Sgd78059 	chip_id_t chipid;
2521bdb9230aSGarrett D'Amore 	boolean_t restart = B_FALSE;
25225c1d0199Sgd78059 
25235c1d0199Sgd78059 	dmfep = ddi_get_driver_private(devinfo);
25245c1d0199Sgd78059 	if (dmfep == NULL)
25255c1d0199Sgd78059 		return (DDI_FAILURE);
25265c1d0199Sgd78059 
25275c1d0199Sgd78059 	/*
25285c1d0199Sgd78059 	 * Refuse to resume if the data structures aren't consistent
25295c1d0199Sgd78059 	 */
25305c1d0199Sgd78059 	if (dmfep->devinfo != devinfo)
25315c1d0199Sgd78059 		return (DDI_FAILURE);
25325c1d0199Sgd78059 
25335c1d0199Sgd78059 	/*
25345c1d0199Sgd78059 	 * Refuse to resume if the chip's changed its identity (*boggle*)
25355c1d0199Sgd78059 	 */
25365c1d0199Sgd78059 	if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS)
25375c1d0199Sgd78059 		return (DDI_FAILURE);
25385c1d0199Sgd78059 	if (chipid.vendor != dmfep->chipid.vendor)
25395c1d0199Sgd78059 		return (DDI_FAILURE);
25405c1d0199Sgd78059 	if (chipid.device != dmfep->chipid.device)
25415c1d0199Sgd78059 		return (DDI_FAILURE);
25425c1d0199Sgd78059 	if (chipid.revision != dmfep->chipid.revision)
25435c1d0199Sgd78059 		return (DDI_FAILURE);
25445c1d0199Sgd78059 
2545bdb9230aSGarrett D'Amore 	mutex_enter(dmfep->oplock);
2546bdb9230aSGarrett D'Amore 	mutex_enter(dmfep->txlock);
2547bdb9230aSGarrett D'Amore 	dmfep->suspended = B_FALSE;
2548bdb9230aSGarrett D'Amore 	mutex_exit(dmfep->txlock);
2549bdb9230aSGarrett D'Amore 
25505c1d0199Sgd78059 	/*
25515c1d0199Sgd78059 	 * All OK, reinitialise h/w & kick off MAC scheduling
25525c1d0199Sgd78059 	 */
2553bdb9230aSGarrett D'Amore 	if (dmfep->mac_state == DMFE_MAC_STARTED) {
25545c1d0199Sgd78059 		dmfe_restart(dmfep);
2555bdb9230aSGarrett D'Amore 		restart = B_TRUE;
2556bdb9230aSGarrett D'Amore 	}
25575c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
2558bdb9230aSGarrett D'Amore 
2559bdb9230aSGarrett D'Amore 	if (restart) {
2560bdb9230aSGarrett D'Amore 		mii_resume(dmfep->mii);
25615c1d0199Sgd78059 		mac_tx_update(dmfep->mh);
2562bdb9230aSGarrett D'Amore 	}
25635c1d0199Sgd78059 	return (DDI_SUCCESS);
25645c1d0199Sgd78059 }
25655c1d0199Sgd78059 
25665c1d0199Sgd78059 /*
25675c1d0199Sgd78059  * attach(9E) -- Attach a device to the system
25685c1d0199Sgd78059  *
25695c1d0199Sgd78059  * Called once for each board successfully probed.
25705c1d0199Sgd78059  */
25715c1d0199Sgd78059 static int
dmfe_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)25725c1d0199Sgd78059 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
25735c1d0199Sgd78059 {
25745c1d0199Sgd78059 	mac_register_t *macp;
25755c1d0199Sgd78059 	dmfe_t *dmfep;				/* Our private data	*/
25765c1d0199Sgd78059 	uint32_t csr6;
25775c1d0199Sgd78059 	int instance;
25785c1d0199Sgd78059 	int err;
25795c1d0199Sgd78059 
25805c1d0199Sgd78059 	instance = ddi_get_instance(devinfo);
25815c1d0199Sgd78059 
25825c1d0199Sgd78059 	switch (cmd) {
25835c1d0199Sgd78059 	default:
25845c1d0199Sgd78059 		return (DDI_FAILURE);
25855c1d0199Sgd78059 
25865c1d0199Sgd78059 	case DDI_RESUME:
25875c1d0199Sgd78059 		return (dmfe_resume(devinfo));
25885c1d0199Sgd78059 
25895c1d0199Sgd78059 	case DDI_ATTACH:
25905c1d0199Sgd78059 		break;
25915c1d0199Sgd78059 	}
25925c1d0199Sgd78059 
25935c1d0199Sgd78059 	dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP);
25945c1d0199Sgd78059 	ddi_set_driver_private(devinfo, dmfep);
25955c1d0199Sgd78059 	dmfep->devinfo = devinfo;
25965c1d0199Sgd78059 	dmfep->dmfe_guard = DMFE_GUARD;
25975c1d0199Sgd78059 
25985c1d0199Sgd78059 	/*
25995c1d0199Sgd78059 	 * Initialize more fields in DMFE private data
26005c1d0199Sgd78059 	 * Determine the local MAC address
26015c1d0199Sgd78059 	 */
26025c1d0199Sgd78059 #if	DMFEDEBUG
26035c1d0199Sgd78059 	dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0,
26045c1d0199Sgd78059 	    debug_propname, dmfe_debug);
26055c1d0199Sgd78059 #endif	/* DMFEDEBUG */
26065c1d0199Sgd78059 	dmfep->cycid = NULL;
26075c1d0199Sgd78059 	(void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d",
26085c1d0199Sgd78059 	    instance);
26095c1d0199Sgd78059 
26105c1d0199Sgd78059 	/*
26115c1d0199Sgd78059 	 * Check for custom "opmode-reg-value" property;
26125c1d0199Sgd78059 	 * if none, use the defaults below for CSR6 ...
26135c1d0199Sgd78059 	 */
26145c1d0199Sgd78059 	csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1;
26155c1d0199Sgd78059 	dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
26165c1d0199Sgd78059 	    DDI_PROP_DONTPASS, opmode_propname, csr6);
26175c1d0199Sgd78059 
26185c1d0199Sgd78059 	/*
26195c1d0199Sgd78059 	 * Read chip ID & set up config space command register(s)
26205c1d0199Sgd78059 	 */
26215c1d0199Sgd78059 	if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) {
26225c1d0199Sgd78059 		dmfe_error(dmfep, "dmfe_config_init() failed");
26235c1d0199Sgd78059 		goto attach_fail;
26245c1d0199Sgd78059 	}
26255c1d0199Sgd78059 
26265c1d0199Sgd78059 	/*
26275c1d0199Sgd78059 	 * Map operating registers
26285c1d0199Sgd78059 	 */
26295c1d0199Sgd78059 	err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER,
26305c1d0199Sgd78059 	    &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle);
26315c1d0199Sgd78059 	if (err != DDI_SUCCESS) {
26325c1d0199Sgd78059 		dmfe_error(dmfep, "ddi_regs_map_setup() failed");
26335c1d0199Sgd78059 		goto attach_fail;
26345c1d0199Sgd78059 	}
26355c1d0199Sgd78059 
26365c1d0199Sgd78059 	/*
26375c1d0199Sgd78059 	 * Get our MAC address.
26385c1d0199Sgd78059 	 */
26395c1d0199Sgd78059 	dmfe_find_mac_address(dmfep);
26405c1d0199Sgd78059 
26415c1d0199Sgd78059 	/*
26425c1d0199Sgd78059 	 * Allocate the TX and RX descriptors/buffers.
26435c1d0199Sgd78059 	 */
26445c1d0199Sgd78059 	dmfep->tx.n_desc = dmfe_tx_desc;
26455c1d0199Sgd78059 	dmfep->rx.n_desc = dmfe_rx_desc;
26465c1d0199Sgd78059 	err = dmfe_alloc_bufs(dmfep);
26475c1d0199Sgd78059 	if (err != DDI_SUCCESS) {
26485c1d0199Sgd78059 		goto attach_fail;
26495c1d0199Sgd78059 	}
26505c1d0199Sgd78059 
26515c1d0199Sgd78059 	/*
26525c1d0199Sgd78059 	 * Add the softint handler
26535c1d0199Sgd78059 	 */
26545c1d0199Sgd78059 	if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id,
26555c1d0199Sgd78059 	    NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) {
26565c1d0199Sgd78059 		dmfe_error(dmfep, "ddi_add_softintr() failed");
26575c1d0199Sgd78059 		goto attach_fail;
26585c1d0199Sgd78059 	}
26595c1d0199Sgd78059 	dmfep->progress |= PROGRESS_SOFTINT;
26605c1d0199Sgd78059 
26615c1d0199Sgd78059 	/*
26625c1d0199Sgd78059 	 * Add the h/w interrupt handler & initialise mutexen
26635c1d0199Sgd78059 	 */
2664bdb9230aSGarrett D'Amore 	if (ddi_get_iblock_cookie(devinfo, 0, &dmfep->iblk) != DDI_SUCCESS) {
2665bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "ddi_get_iblock_cookie() failed");
26665c1d0199Sgd78059 		goto attach_fail;
26675c1d0199Sgd78059 	}
2668bdb9230aSGarrett D'Amore 
26695c1d0199Sgd78059 	mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL);
26705c1d0199Sgd78059 	mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk);
26715c1d0199Sgd78059 	mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk);
26725c1d0199Sgd78059 	mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk);
2673bdb9230aSGarrett D'Amore 	dmfep->progress |= PROGRESS_MUTEX;
2674bdb9230aSGarrett D'Amore 
2675bdb9230aSGarrett D'Amore 	if (ddi_add_intr(devinfo, 0, NULL, NULL,
2676bdb9230aSGarrett D'Amore 	    dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) {
2677bdb9230aSGarrett D'Amore 		dmfe_error(dmfep, "ddi_add_intr() failed");
2678bdb9230aSGarrett D'Amore 		goto attach_fail;
2679bdb9230aSGarrett D'Amore 	}
26805c1d0199Sgd78059 	dmfep->progress |= PROGRESS_HWINT;
26815c1d0199Sgd78059 
26825c1d0199Sgd78059 	/*
26835c1d0199Sgd78059 	 * Create & initialise named kstats
26845c1d0199Sgd78059 	 */
26855c1d0199Sgd78059 	dmfe_init_kstats(dmfep, instance);
26865c1d0199Sgd78059 
26875c1d0199Sgd78059 	/*
26885c1d0199Sgd78059 	 * Reset & initialise the chip and the ring buffers
26895c1d0199Sgd78059 	 * Initialise the (internal) PHY
26905c1d0199Sgd78059 	 */
26915c1d0199Sgd78059 	mutex_enter(dmfep->oplock);
26925c1d0199Sgd78059 	mutex_enter(dmfep->rxlock);
26935c1d0199Sgd78059 	mutex_enter(dmfep->txlock);
26945c1d0199Sgd78059 
26955c1d0199Sgd78059 	dmfe_reset(dmfep);
26965c1d0199Sgd78059 
26975c1d0199Sgd78059 	/*
26985c1d0199Sgd78059 	 * Prepare the setup packet
26995c1d0199Sgd78059 	 */
27005c1d0199Sgd78059 	bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE);
27015c1d0199Sgd78059 	bzero(dmfep->mcast_refs, MCASTBUF_SIZE);
27025c1d0199Sgd78059 	dmfep->addr_set = B_FALSE;
27035c1d0199Sgd78059 	dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
27045c1d0199Sgd78059 	dmfep->mac_state = DMFE_MAC_RESET;
27055c1d0199Sgd78059 
27065c1d0199Sgd78059 	mutex_exit(dmfep->txlock);
27075c1d0199Sgd78059 	mutex_exit(dmfep->rxlock);
27085c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
27095c1d0199Sgd78059 
27105c1d0199Sgd78059 	if (dmfe_init_phy(dmfep) != B_TRUE)
27115c1d0199Sgd78059 		goto attach_fail;
27125c1d0199Sgd78059 
27135c1d0199Sgd78059 	/*
27145c1d0199Sgd78059 	 * Send a reasonable setup frame.  This configures our starting
27155c1d0199Sgd78059 	 * address and the broadcast address.
27165c1d0199Sgd78059 	 */
27175c1d0199Sgd78059 	(void) dmfe_m_unicst(dmfep, dmfep->curr_addr);
27185c1d0199Sgd78059 
27195c1d0199Sgd78059 	/*
27205c1d0199Sgd78059 	 * Initialize pointers to device specific functions which
27215c1d0199Sgd78059 	 * will be used by the generic layer.
27225c1d0199Sgd78059 	 */
27235c1d0199Sgd78059 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
27245c1d0199Sgd78059 		goto attach_fail;
27255c1d0199Sgd78059 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
27265c1d0199Sgd78059 	macp->m_driver = dmfep;
27275c1d0199Sgd78059 	macp->m_dip = devinfo;
27285c1d0199Sgd78059 	macp->m_src_addr = dmfep->curr_addr;
27295c1d0199Sgd78059 	macp->m_callbacks = &dmfe_m_callbacks;
27305c1d0199Sgd78059 	macp->m_min_sdu = 0;
27315c1d0199Sgd78059 	macp->m_max_sdu = ETHERMTU;
2732d62bc4baSyz147064 	macp->m_margin = VLAN_TAGSZ;
27335c1d0199Sgd78059 
27345c1d0199Sgd78059 	/*
27355c1d0199Sgd78059 	 * Finally, we're ready to register ourselves with the MAC layer
27365c1d0199Sgd78059 	 * interface; if this succeeds, we're all ready to start()
27375c1d0199Sgd78059 	 */
27385c1d0199Sgd78059 	err = mac_register(macp, &dmfep->mh);
27395c1d0199Sgd78059 	mac_free(macp);
27405c1d0199Sgd78059 	if (err != 0)
27415c1d0199Sgd78059 		goto attach_fail;
27425c1d0199Sgd78059 	ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
27435c1d0199Sgd78059 
27445c1d0199Sgd78059 	/*
27455c1d0199Sgd78059 	 * Install the cyclic callback that we use to check for link
27465c1d0199Sgd78059 	 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic())
27475c1d0199Sgd78059 	 * is invoked in kernel context then.
27485c1d0199Sgd78059 	 */
27495c1d0199Sgd78059 	ASSERT(dmfep->cycid == NULL);
27505c1d0199Sgd78059 	dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep,
27515c1d0199Sgd78059 	    dmfe_tick_us * 1000, DDI_IPL_0);
27525c1d0199Sgd78059 	return (DDI_SUCCESS);
27535c1d0199Sgd78059 
27545c1d0199Sgd78059 attach_fail:
27555c1d0199Sgd78059 	dmfe_unattach(dmfep);
27565c1d0199Sgd78059 	return (DDI_FAILURE);
27575c1d0199Sgd78059 }
27585c1d0199Sgd78059 
27595c1d0199Sgd78059 /*
27605c1d0199Sgd78059  *	dmfe_suspend() -- suspend transmit/receive for powerdown
27615c1d0199Sgd78059  */
27625c1d0199Sgd78059 static int
dmfe_suspend(dmfe_t * dmfep)27635c1d0199Sgd78059 dmfe_suspend(dmfe_t *dmfep)
27645c1d0199Sgd78059 {
27655c1d0199Sgd78059 	/*
27665c1d0199Sgd78059 	 * Just stop processing ...
27675c1d0199Sgd78059 	 */
2768bdb9230aSGarrett D'Amore 	mii_suspend(dmfep->mii);
27695c1d0199Sgd78059 	mutex_enter(dmfep->oplock);
27705c1d0199Sgd78059 	dmfe_stop(dmfep);
2771bdb9230aSGarrett D'Amore 
2772bdb9230aSGarrett D'Amore 	mutex_enter(dmfep->txlock);
2773bdb9230aSGarrett D'Amore 	dmfep->suspended = B_TRUE;
2774bdb9230aSGarrett D'Amore 	mutex_exit(dmfep->txlock);
27755c1d0199Sgd78059 	mutex_exit(dmfep->oplock);
27765c1d0199Sgd78059 
27775c1d0199Sgd78059 	return (DDI_SUCCESS);
27785c1d0199Sgd78059 }
27795c1d0199Sgd78059 
27805c1d0199Sgd78059 /*
27815c1d0199Sgd78059  * detach(9E) -- Detach a device from the system
27825c1d0199Sgd78059  */
27835c1d0199Sgd78059 static int
dmfe_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)27845c1d0199Sgd78059 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
27855c1d0199Sgd78059 {
27865c1d0199Sgd78059 	dmfe_t *dmfep;
27875c1d0199Sgd78059 
27885c1d0199Sgd78059 	dmfep = ddi_get_driver_private(devinfo);
27895c1d0199Sgd78059 
27905c1d0199Sgd78059 	switch (cmd) {
27915c1d0199Sgd78059 	default:
27925c1d0199Sgd78059 		return (DDI_FAILURE);
27935c1d0199Sgd78059 
27945c1d0199Sgd78059 	case DDI_SUSPEND:
27955c1d0199Sgd78059 		return (dmfe_suspend(dmfep));
27965c1d0199Sgd78059 
27975c1d0199Sgd78059 	case DDI_DETACH:
27985c1d0199Sgd78059 		break;
27995c1d0199Sgd78059 	}
28005c1d0199Sgd78059 
28015c1d0199Sgd78059 	/*
28025c1d0199Sgd78059 	 * Unregister from the MAC subsystem.  This can fail, in
28035c1d0199Sgd78059 	 * particular if there are DLPI style-2 streams still open -
28045c1d0199Sgd78059 	 * in which case we just return failure without shutting
28055c1d0199Sgd78059 	 * down chip operations.
28065c1d0199Sgd78059 	 */
28075c1d0199Sgd78059 	if (mac_unregister(dmfep->mh) != DDI_SUCCESS)
28085c1d0199Sgd78059 		return (DDI_FAILURE);
28095c1d0199Sgd78059 
28105c1d0199Sgd78059 	/*
28115c1d0199Sgd78059 	 * All activity stopped, so we can clean up & exit
28125c1d0199Sgd78059 	 */
28135c1d0199Sgd78059 	dmfe_unattach(dmfep);
28145c1d0199Sgd78059 	return (DDI_SUCCESS);
28155c1d0199Sgd78059 }
28165c1d0199Sgd78059 
28175c1d0199Sgd78059 
28185c1d0199Sgd78059 /*
28195c1d0199Sgd78059  * ========== Module Loading Data & Entry Points ==========
28205c1d0199Sgd78059  */
28215c1d0199Sgd78059 
28225c1d0199Sgd78059 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach,
282319397407SSherry Moore 	nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
28245c1d0199Sgd78059 
28255c1d0199Sgd78059 static struct modldrv dmfe_modldrv = {
28265c1d0199Sgd78059 	&mod_driverops,		/* Type of module.  This one is a driver */
28275c1d0199Sgd78059 	dmfe_ident,		/* short description */
28285c1d0199Sgd78059 	&dmfe_dev_ops		/* driver specific ops */
28295c1d0199Sgd78059 };
28305c1d0199Sgd78059 
28315c1d0199Sgd78059 static struct modlinkage modlinkage = {
28325c1d0199Sgd78059 	MODREV_1, (void *)&dmfe_modldrv, NULL
28335c1d0199Sgd78059 };
28345c1d0199Sgd78059 
28355c1d0199Sgd78059 int
_info(struct modinfo * modinfop)28365c1d0199Sgd78059 _info(struct modinfo *modinfop)
28375c1d0199Sgd78059 {
28385c1d0199Sgd78059 	return (mod_info(&modlinkage, modinfop));
28395c1d0199Sgd78059 }
28405c1d0199Sgd78059 
28415c1d0199Sgd78059 int
_init(void)28425c1d0199Sgd78059 _init(void)
28435c1d0199Sgd78059 {
28445c1d0199Sgd78059 	uint32_t tmp100;
28455c1d0199Sgd78059 	uint32_t tmp10;
28465c1d0199Sgd78059 	int i;
28475c1d0199Sgd78059 	int status;
28485c1d0199Sgd78059 
28495c1d0199Sgd78059 	/* Calculate global timing parameters */
28505c1d0199Sgd78059 	tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
28515c1d0199Sgd78059 	tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
28525c1d0199Sgd78059 
28535c1d0199Sgd78059 	for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) {
28545c1d0199Sgd78059 		switch (i) {
28555c1d0199Sgd78059 		case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA):
28565c1d0199Sgd78059 		case TX_PROCESS_STATE(TX_PROCESS_WAIT_END):
28575c1d0199Sgd78059 			/*
28585c1d0199Sgd78059 			 * The chip doesn't spontaneously recover from
28595c1d0199Sgd78059 			 * a stall in these states, so we reset early
28605c1d0199Sgd78059 			 */
28615c1d0199Sgd78059 			stall_100_tix[i] = tmp100;
28625c1d0199Sgd78059 			stall_10_tix[i] = tmp10;
28635c1d0199Sgd78059 			break;
28645c1d0199Sgd78059 
28655c1d0199Sgd78059 		case TX_PROCESS_STATE(TX_PROCESS_SUSPEND):
28665c1d0199Sgd78059 		default:
28675c1d0199Sgd78059 			/*
28685c1d0199Sgd78059 			 * The chip has been seen to spontaneously recover
28695c1d0199Sgd78059 			 * after an apparent stall in the SUSPEND state,
28705c1d0199Sgd78059 			 * so we'll allow it rather longer to do so.  As
28715c1d0199Sgd78059 			 * stalls in other states have not been observed,
28725c1d0199Sgd78059 			 * we'll use long timeouts for them too ...
28735c1d0199Sgd78059 			 */
28745c1d0199Sgd78059 			stall_100_tix[i] = tmp100 * 20;
28755c1d0199Sgd78059 			stall_10_tix[i] = tmp10 * 20;
28765c1d0199Sgd78059 			break;
28775c1d0199Sgd78059 		}
28785c1d0199Sgd78059 	}
28795c1d0199Sgd78059 
28805c1d0199Sgd78059 	mac_init_ops(&dmfe_dev_ops, "dmfe");
28815c1d0199Sgd78059 	status = mod_install(&modlinkage);
28825c1d0199Sgd78059 	if (status == DDI_SUCCESS)
28835c1d0199Sgd78059 		dmfe_log_init();
28845c1d0199Sgd78059 
28855c1d0199Sgd78059 	return (status);
28865c1d0199Sgd78059 }
28875c1d0199Sgd78059 
28885c1d0199Sgd78059 int
_fini(void)28895c1d0199Sgd78059 _fini(void)
28905c1d0199Sgd78059 {
28915c1d0199Sgd78059 	int status;
28925c1d0199Sgd78059 
28935c1d0199Sgd78059 	status = mod_remove(&modlinkage);
28945c1d0199Sgd78059 	if (status == DDI_SUCCESS) {
28955c1d0199Sgd78059 		mac_fini_ops(&dmfe_dev_ops);
28965c1d0199Sgd78059 		dmfe_log_fini();
28975c1d0199Sgd78059 	}
28985c1d0199Sgd78059 
28995c1d0199Sgd78059 	return (status);
29005c1d0199Sgd78059 }
2901