xref: /illumos-gate/usr/src/uts/common/io/bnx/bnxrcv.c (revision eef4f27b)
1*eef4f27bSRobert Mustacchi /*
2*eef4f27bSRobert Mustacchi  * Copyright 2014-2017 Cavium, Inc.
3*eef4f27bSRobert Mustacchi  * The contents of this file are subject to the terms of the Common Development
4*eef4f27bSRobert Mustacchi  * and Distribution License, v.1,  (the "License").
5*eef4f27bSRobert Mustacchi  *
6*eef4f27bSRobert Mustacchi  * You may not use this file except in compliance with the License.
7*eef4f27bSRobert Mustacchi  *
8*eef4f27bSRobert Mustacchi  * You can obtain a copy of the License at available
9*eef4f27bSRobert Mustacchi  * at http://opensource.org/licenses/CDDL-1.0
10*eef4f27bSRobert Mustacchi  *
11*eef4f27bSRobert Mustacchi  * See the License for the specific language governing permissions and
12*eef4f27bSRobert Mustacchi  * limitations under the License.
13*eef4f27bSRobert Mustacchi  */
14*eef4f27bSRobert Mustacchi 
15*eef4f27bSRobert Mustacchi /*
16*eef4f27bSRobert Mustacchi  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
17*eef4f27bSRobert Mustacchi  * Copyright (c) 2019, Joyent, Inc.
18*eef4f27bSRobert Mustacchi  */
19*eef4f27bSRobert Mustacchi 
20*eef4f27bSRobert Mustacchi #include "bnxrcv.h"
21*eef4f27bSRobert Mustacchi 
22*eef4f27bSRobert Mustacchi 
23*eef4f27bSRobert Mustacchi #define	BNX_RECV_INIT_FAIL_THRESH 1
24*eef4f27bSRobert Mustacchi 
25*eef4f27bSRobert Mustacchi #ifndef	NUM_RX_CHAIN
26*eef4f27bSRobert Mustacchi #error NUM_RX_CHAIN is not defined.
27*eef4f27bSRobert Mustacchi #else
28*eef4f27bSRobert Mustacchi /*
29*eef4f27bSRobert Mustacchi  * Range check NUM_RX_CHAIN.  Technically the LM controls this definition,
30*eef4f27bSRobert Mustacchi  * but it makes sense to use what the LM uses.
31*eef4f27bSRobert Mustacchi  */
32*eef4f27bSRobert Mustacchi #if NUM_RX_CHAIN < 0
33*eef4f27bSRobert Mustacchi #error Invalid NUM_RX_CHAIN definition.
34*eef4f27bSRobert Mustacchi #elif NUM_RX_CHAIN > 1
35*eef4f27bSRobert Mustacchi #warning NUM_RX_CHAIN is greater than 1.
36*eef4f27bSRobert Mustacchi #endif
37*eef4f27bSRobert Mustacchi #endif
38*eef4f27bSRobert Mustacchi 
39*eef4f27bSRobert Mustacchi 
40*eef4f27bSRobert Mustacchi static ddi_dma_attr_t bnx_rx_jmb_dma_attrib = {
41*eef4f27bSRobert Mustacchi 	DMA_ATTR_V0,			/* dma_attr_version */
42*eef4f27bSRobert Mustacchi 	0,				/* dma_attr_addr_lo */
43*eef4f27bSRobert Mustacchi 	0xffffffffffffffff,		/* dma_attr_addr_hi */
44*eef4f27bSRobert Mustacchi 	0x0ffffff,			/* dma_attr_count_max */
45*eef4f27bSRobert Mustacchi 	BNX_DMA_ALIGNMENT,		/* dma_attr_align */
46*eef4f27bSRobert Mustacchi 	0xffffffff,			/* dma_attr_burstsizes */
47*eef4f27bSRobert Mustacchi 	1,				/* dma_attr_minxfer */
48*eef4f27bSRobert Mustacchi 	0x00ffffff,			/* dma_attr_maxxfer */
49*eef4f27bSRobert Mustacchi 	0xffffffff,			/* dma_attr_seg */
50*eef4f27bSRobert Mustacchi 	BNX_RECV_MAX_FRAGS,		/* dma_attr_sgllen */
51*eef4f27bSRobert Mustacchi 	BNX_MIN_BYTES_PER_FRAGMENT,	/* dma_attr_granular */
52*eef4f27bSRobert Mustacchi 	0,				/* dma_attr_flags */
53*eef4f27bSRobert Mustacchi };
54*eef4f27bSRobert Mustacchi 
55*eef4f27bSRobert Mustacchi static int
bnx_rxbuffer_alloc(um_device_t * const umdevice,um_rxpacket_t * const umpacket)56*eef4f27bSRobert Mustacchi bnx_rxbuffer_alloc(um_device_t *const umdevice, um_rxpacket_t *const umpacket)
57*eef4f27bSRobert Mustacchi {
58*eef4f27bSRobert Mustacchi 	int rc;
59*eef4f27bSRobert Mustacchi 	size_t pktsize;
60*eef4f27bSRobert Mustacchi 	size_t reallen;
61*eef4f27bSRobert Mustacchi 	uint_t dc_count;
62*eef4f27bSRobert Mustacchi 	lm_packet_t *lmpacket;
63*eef4f27bSRobert Mustacchi 	ddi_dma_cookie_t cookie;
64*eef4f27bSRobert Mustacchi 
65*eef4f27bSRobert Mustacchi 	lmpacket = &(umpacket->lmpacket);
66*eef4f27bSRobert Mustacchi 
67*eef4f27bSRobert Mustacchi 	rc = ddi_dma_alloc_handle(umdevice->os_param.dip,
68*eef4f27bSRobert Mustacchi 	    &bnx_rx_jmb_dma_attrib, DDI_DMA_DONTWAIT,
69*eef4f27bSRobert Mustacchi 	    (void *)0, &(umpacket->dma_handle));
70*eef4f27bSRobert Mustacchi 	if (rc != DDI_SUCCESS) {
71*eef4f27bSRobert Mustacchi 		return (-1);
72*eef4f27bSRobert Mustacchi 	}
73*eef4f27bSRobert Mustacchi 
74*eef4f27bSRobert Mustacchi 	/*
75*eef4f27bSRobert Mustacchi 	 * The buffer size as set by the lower module is the actual buffer
76*eef4f27bSRobert Mustacchi 	 * size plus room for a small, 16 byte inline rx buffer descriptor
77*eef4f27bSRobert Mustacchi 	 * header plus an implied two byte TCP shift optimization.  We
78*eef4f27bSRobert Mustacchi 	 * don't need to adjust the size at all.
79*eef4f27bSRobert Mustacchi 	 */
80*eef4f27bSRobert Mustacchi 	pktsize = lmpacket->u1.rx.buf_size;
81*eef4f27bSRobert Mustacchi 
82*eef4f27bSRobert Mustacchi 	rc = ddi_dma_mem_alloc(umpacket->dma_handle, pktsize,
83*eef4f27bSRobert Mustacchi 	    &bnxAccessAttribBUF, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
84*eef4f27bSRobert Mustacchi 	    (void *)0, (caddr_t *)&lmpacket->u1.rx.mem_virt, &reallen,
85*eef4f27bSRobert Mustacchi 	    &umpacket->dma_acc_handle);
86*eef4f27bSRobert Mustacchi 	if (rc != DDI_SUCCESS) {
87*eef4f27bSRobert Mustacchi 		goto error1;
88*eef4f27bSRobert Mustacchi 	}
89*eef4f27bSRobert Mustacchi 
90*eef4f27bSRobert Mustacchi 	/* Bind the message block buffer address to the handle. */
91*eef4f27bSRobert Mustacchi 	rc = ddi_dma_addr_bind_handle(umpacket->dma_handle, NULL,
92*eef4f27bSRobert Mustacchi 	    (caddr_t)lmpacket->u1.rx.mem_virt, pktsize,
93*eef4f27bSRobert Mustacchi 	    DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
94*eef4f27bSRobert Mustacchi 	    &cookie, &dc_count);
95*eef4f27bSRobert Mustacchi 	if (rc != DDI_DMA_MAPPED) {
96*eef4f27bSRobert Mustacchi 		goto error2;
97*eef4f27bSRobert Mustacchi 	}
98*eef4f27bSRobert Mustacchi 
99*eef4f27bSRobert Mustacchi 	lmpacket->u1.rx.mem_phy.as_u64 = cookie.dmac_laddress;
100*eef4f27bSRobert Mustacchi 
101*eef4f27bSRobert Mustacchi 	return (0);
102*eef4f27bSRobert Mustacchi 
103*eef4f27bSRobert Mustacchi error2:
104*eef4f27bSRobert Mustacchi 	ddi_dma_mem_free(&(umpacket->dma_acc_handle));
105*eef4f27bSRobert Mustacchi 
106*eef4f27bSRobert Mustacchi error1:
107*eef4f27bSRobert Mustacchi 	ddi_dma_free_handle(&(umpacket->dma_handle));
108*eef4f27bSRobert Mustacchi 
109*eef4f27bSRobert Mustacchi 	return (-1);
110*eef4f27bSRobert Mustacchi }
111*eef4f27bSRobert Mustacchi 
112*eef4f27bSRobert Mustacchi static void
bnx_rxbuffer_free(um_device_t * const umdevice,um_rxpacket_t * const umpacket)113*eef4f27bSRobert Mustacchi bnx_rxbuffer_free(um_device_t * const umdevice, um_rxpacket_t * const umpacket)
114*eef4f27bSRobert Mustacchi {
115*eef4f27bSRobert Mustacchi 	lm_packet_t *lmpacket;
116*eef4f27bSRobert Mustacchi 
117*eef4f27bSRobert Mustacchi 	lmpacket = &(umpacket->lmpacket);
118*eef4f27bSRobert Mustacchi 
119*eef4f27bSRobert Mustacchi 	lmpacket->u1.rx.mem_phy.as_u64 = 0;
120*eef4f27bSRobert Mustacchi 	lmpacket->u1.rx.buf_size = 0;
121*eef4f27bSRobert Mustacchi 
122*eef4f27bSRobert Mustacchi 	(void) ddi_dma_unbind_handle(umpacket->dma_handle);
123*eef4f27bSRobert Mustacchi 
124*eef4f27bSRobert Mustacchi 	lmpacket->u1.rx.mem_virt = NULL;
125*eef4f27bSRobert Mustacchi 	ddi_dma_mem_free(&umpacket->dma_acc_handle);
126*eef4f27bSRobert Mustacchi 
127*eef4f27bSRobert Mustacchi 	ddi_dma_free_handle(&(umpacket->dma_handle));
128*eef4f27bSRobert Mustacchi }
129*eef4f27bSRobert Mustacchi 
130*eef4f27bSRobert Mustacchi static void
bnx_recv_ring_init(um_device_t * const umdevice,const unsigned int ringidx)131*eef4f27bSRobert Mustacchi bnx_recv_ring_init(um_device_t * const umdevice, const unsigned int ringidx)
132*eef4f27bSRobert Mustacchi {
133*eef4f27bSRobert Mustacchi 	s_list_t *srcq;
134*eef4f27bSRobert Mustacchi 	s_list_t *dstq;
135*eef4f27bSRobert Mustacchi 	lm_rx_chain_t *lmrxring;
136*eef4f27bSRobert Mustacchi 	um_recv_qinfo *recvinfo;
137*eef4f27bSRobert Mustacchi 	um_rxpacket_t *umpacket;
138*eef4f27bSRobert Mustacchi 
139*eef4f27bSRobert Mustacchi 	recvinfo = &_RX_QINFO(umdevice, ringidx);
140*eef4f27bSRobert Mustacchi 
141*eef4f27bSRobert Mustacchi 	recvinfo->processing = B_FALSE;
142*eef4f27bSRobert Mustacchi 
143*eef4f27bSRobert Mustacchi 	lmrxring = &umdevice->lm_dev.rx_info.chain[ringidx];
144*eef4f27bSRobert Mustacchi 
145*eef4f27bSRobert Mustacchi 	srcq = &(lmrxring->free_descq);
146*eef4f27bSRobert Mustacchi 
147*eef4f27bSRobert Mustacchi 	dstq = &(recvinfo->buffq);
148*eef4f27bSRobert Mustacchi 
149*eef4f27bSRobert Mustacchi 	s_list_init(dstq, NULL, NULL, 0);
150*eef4f27bSRobert Mustacchi 
151*eef4f27bSRobert Mustacchi 	/* CONSTANTCONDITION */
152*eef4f27bSRobert Mustacchi 	/*
153*eef4f27bSRobert Mustacchi 	 * Put all available packet descriptors in our special wait queue.
154*eef4f27bSRobert Mustacchi 	 * The wait queue is an area to store packet descriptors that do
155*eef4f27bSRobert Mustacchi 	 * not yet have buffers associated with them.
156*eef4f27bSRobert Mustacchi 	 */
157*eef4f27bSRobert Mustacchi 	while (1) {
158*eef4f27bSRobert Mustacchi 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
159*eef4f27bSRobert Mustacchi 		if (umpacket == NULL) {
160*eef4f27bSRobert Mustacchi 			break;
161*eef4f27bSRobert Mustacchi 		}
162*eef4f27bSRobert Mustacchi 
163*eef4f27bSRobert Mustacchi 		s_list_push_tail(dstq, &(umpacket->lmpacket.link));
164*eef4f27bSRobert Mustacchi 	}
165*eef4f27bSRobert Mustacchi 
166*eef4f27bSRobert Mustacchi 	dstq  = &(recvinfo->waitq);
167*eef4f27bSRobert Mustacchi 
168*eef4f27bSRobert Mustacchi 	s_list_init(dstq, NULL, NULL, 0);
169*eef4f27bSRobert Mustacchi }
170*eef4f27bSRobert Mustacchi 
171*eef4f27bSRobert Mustacchi static void
bnx_recv_ring_fill(um_device_t * const umdevice,const unsigned int ringidx)172*eef4f27bSRobert Mustacchi bnx_recv_ring_fill(um_device_t * const umdevice, const unsigned int ringidx)
173*eef4f27bSRobert Mustacchi {
174*eef4f27bSRobert Mustacchi 	s_list_t *srcq;
175*eef4f27bSRobert Mustacchi 	s_list_t *dstq;
176*eef4f27bSRobert Mustacchi 	um_rxpacket_t *umpacket;
177*eef4f27bSRobert Mustacchi 	um_recv_qinfo *recvinfo;
178*eef4f27bSRobert Mustacchi 
179*eef4f27bSRobert Mustacchi 	recvinfo = &(_RX_QINFO(umdevice, ringidx));
180*eef4f27bSRobert Mustacchi 
181*eef4f27bSRobert Mustacchi 	srcq = &(recvinfo->buffq);
182*eef4f27bSRobert Mustacchi 
183*eef4f27bSRobert Mustacchi 	dstq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
184*eef4f27bSRobert Mustacchi 
185*eef4f27bSRobert Mustacchi 	/* CONSTANTCONDITION */
186*eef4f27bSRobert Mustacchi 	/* Populate as many of the packet descriptors as we can. */
187*eef4f27bSRobert Mustacchi 	while (1) {
188*eef4f27bSRobert Mustacchi 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
189*eef4f27bSRobert Mustacchi 		if (umpacket == NULL) {
190*eef4f27bSRobert Mustacchi 			break;
191*eef4f27bSRobert Mustacchi 		}
192*eef4f27bSRobert Mustacchi 
193*eef4f27bSRobert Mustacchi 		if (bnx_rxbuffer_alloc(umdevice, umpacket) != 0) {
194*eef4f27bSRobert Mustacchi 			s_list_push_head(srcq, &umpacket->lmpacket.link);
195*eef4f27bSRobert Mustacchi 			break;
196*eef4f27bSRobert Mustacchi 		}
197*eef4f27bSRobert Mustacchi 
198*eef4f27bSRobert Mustacchi 		s_list_push_tail(dstq, &umpacket->lmpacket.link);
199*eef4f27bSRobert Mustacchi 	}
200*eef4f27bSRobert Mustacchi }
201*eef4f27bSRobert Mustacchi 
202*eef4f27bSRobert Mustacchi /*
203*eef4f27bSRobert Mustacchi  * NOTE!!!  This function assumes the rcv_mutex is already held.
204*eef4f27bSRobert Mustacchi  */
205*eef4f27bSRobert Mustacchi static void
bnx_recv_ring_recv(um_device_t * const umdevice,const unsigned int ringidx)206*eef4f27bSRobert Mustacchi bnx_recv_ring_recv(um_device_t *const umdevice, const unsigned int ringidx)
207*eef4f27bSRobert Mustacchi {
208*eef4f27bSRobert Mustacchi 	mblk_t *head = NULL;
209*eef4f27bSRobert Mustacchi 	mblk_t *tail = NULL;
210*eef4f27bSRobert Mustacchi 	s_list_t *srcq;
211*eef4f27bSRobert Mustacchi 	s_list_t *recvq;
212*eef4f27bSRobert Mustacchi 	s_list_t *freeq;
213*eef4f27bSRobert Mustacchi 	boolean_t dcopy;
214*eef4f27bSRobert Mustacchi 	boolean_t lm_rcvq_empty;
215*eef4f27bSRobert Mustacchi 	lm_packet_t *lmpacket;
216*eef4f27bSRobert Mustacchi 	um_rxpacket_t *umpacket;
217*eef4f27bSRobert Mustacchi 	um_recv_qinfo *recvinfo;
218*eef4f27bSRobert Mustacchi 
219*eef4f27bSRobert Mustacchi 	recvinfo = &(_RX_QINFO(umdevice, ringidx));
220*eef4f27bSRobert Mustacchi 
221*eef4f27bSRobert Mustacchi 	/*
222*eef4f27bSRobert Mustacchi 	 * We can't hold the receive mutex across the receive function or
223*eef4f27bSRobert Mustacchi 	 * deadlock results.  So that other threads know we are still doing
224*eef4f27bSRobert Mustacchi 	 * business, toggle a flag they can look at.  If the flag says,
225*eef4f27bSRobert Mustacchi 	 * we're processing, other threads should back off.
226*eef4f27bSRobert Mustacchi 	 */
227*eef4f27bSRobert Mustacchi 	recvinfo->processing = B_TRUE;
228*eef4f27bSRobert Mustacchi 
229*eef4f27bSRobert Mustacchi 	srcq  = &(recvinfo->waitq);
230*eef4f27bSRobert Mustacchi 	freeq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
231*eef4f27bSRobert Mustacchi 
232*eef4f27bSRobert Mustacchi 	recvq = &(umdevice->lm_dev.rx_info.chain[ringidx].active_descq);
233*eef4f27bSRobert Mustacchi 	if (s_list_entry_cnt(recvq)) {
234*eef4f27bSRobert Mustacchi 		lm_rcvq_empty = B_FALSE;
235*eef4f27bSRobert Mustacchi 	} else {
236*eef4f27bSRobert Mustacchi 		lm_rcvq_empty = B_TRUE;
237*eef4f27bSRobert Mustacchi 	}
238*eef4f27bSRobert Mustacchi 
239*eef4f27bSRobert Mustacchi 	/* CONSTANTCONDITION */
240*eef4f27bSRobert Mustacchi 	/* Send the rx packets up. */
241*eef4f27bSRobert Mustacchi 	while (1) {
242*eef4f27bSRobert Mustacchi 		mblk_t *mp = NULL;
243*eef4f27bSRobert Mustacchi 		unsigned int pktlen;
244*eef4f27bSRobert Mustacchi 		int ofld_flags;
245*eef4f27bSRobert Mustacchi 
246*eef4f27bSRobert Mustacchi 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
247*eef4f27bSRobert Mustacchi 		if (umpacket == NULL) {
248*eef4f27bSRobert Mustacchi 			break;
249*eef4f27bSRobert Mustacchi 		}
250*eef4f27bSRobert Mustacchi 
251*eef4f27bSRobert Mustacchi 		lmpacket = &(umpacket->lmpacket);
252*eef4f27bSRobert Mustacchi 
253*eef4f27bSRobert Mustacchi 		if (lmpacket->status != LM_STATUS_SUCCESS) {
254*eef4f27bSRobert Mustacchi 			s_list_push_tail(freeq, &(lmpacket->link));
255*eef4f27bSRobert Mustacchi 			continue;
256*eef4f27bSRobert Mustacchi 		}
257*eef4f27bSRobert Mustacchi 
258*eef4f27bSRobert Mustacchi 		pktlen = lmpacket->size;
259*eef4f27bSRobert Mustacchi 
260*eef4f27bSRobert Mustacchi 		/*
261*eef4f27bSRobert Mustacchi 		 * FIXME -- Implement mm_flush_cache().
262*eef4f27bSRobert Mustacchi 		 *
263*eef4f27bSRobert Mustacchi 		 * The LM uses mm_flush_cache() to make sure the processor is
264*eef4f27bSRobert Mustacchi 		 * working with current data.  The call to ddi_dma_sync should
265*eef4f27bSRobert Mustacchi 		 * go there instead.  How mm_flush_cache() should be
266*eef4f27bSRobert Mustacchi 		 * implemented depends on what test mode we are in.
267*eef4f27bSRobert Mustacchi 		 *
268*eef4f27bSRobert Mustacchi 		 * if (lmdevice->params.test_mode & TEST_MODE_VERIFY_RX_CRC) {
269*eef4f27bSRobert Mustacchi 		 *	// The LM will need access to the complete rx buffer.
270*eef4f27bSRobert Mustacchi 		 * } else {
271*eef4f27bSRobert Mustacchi 		 *	// The LM only needs access to the 16 byte inline rx BD.
272*eef4f27bSRobert Mustacchi 		 *	// Be sure in this case to ddi_dma_sync() as many
273*eef4f27bSRobert Mustacchi 		 *	// fragments as necessary to get the full rx BD in
274*eef4f27bSRobert Mustacchi 		 *	// host memory.
275*eef4f27bSRobert Mustacchi 		 * }
276*eef4f27bSRobert Mustacchi 		 */
277*eef4f27bSRobert Mustacchi 		(void) ddi_dma_sync(umpacket->dma_handle, 0,
278*eef4f27bSRobert Mustacchi 		    pktlen + L2RX_FRAME_HDR_LEN, DDI_DMA_SYNC_FORKERNEL);
279*eef4f27bSRobert Mustacchi 
280*eef4f27bSRobert Mustacchi 		dcopy = B_FALSE;
281*eef4f27bSRobert Mustacchi 
282*eef4f27bSRobert Mustacchi 		if (pktlen < umdevice->rx_copy_threshold) {
283*eef4f27bSRobert Mustacchi 			lm_device_t *lmdevice;
284*eef4f27bSRobert Mustacchi 			lmdevice = &(umdevice->lm_dev);
285*eef4f27bSRobert Mustacchi 
286*eef4f27bSRobert Mustacchi 			if ((lmdevice->params.keep_vlan_tag == 0) &&
287*eef4f27bSRobert Mustacchi 			    (lmpacket->u1.rx.flags &
288*eef4f27bSRobert Mustacchi 			    LM_RX_FLAG_VALID_VLAN_TAG)) {
289*eef4f27bSRobert Mustacchi 
290*eef4f27bSRobert Mustacchi 				/*
291*eef4f27bSRobert Mustacchi 				 * The hardware stripped the VLAN tag
292*eef4f27bSRobert Mustacchi 				 * we must now reinsert the tag.  This is
293*eef4f27bSRobert Mustacchi 				 * done to be compatiable with older firmware
294*eef4f27bSRobert Mustacchi 				 * who could not handle VLAN tags
295*eef4f27bSRobert Mustacchi 				 */
296*eef4f27bSRobert Mustacchi 				mp = allocb(pktlen + 6, BPRI_MED);
297*eef4f27bSRobert Mustacchi 				if (mp != NULL) {
298*eef4f27bSRobert Mustacchi 					uint8_t *dataptr;
299*eef4f27bSRobert Mustacchi 					const uint16_t tpid = htons(0x8100);
300*eef4f27bSRobert Mustacchi 					uint16_t vlan_tag;
301*eef4f27bSRobert Mustacchi 
302*eef4f27bSRobert Mustacchi 					vlan_tag =
303*eef4f27bSRobert Mustacchi 					    htons(lmpacket->u1.rx.vlan_tag);
304*eef4f27bSRobert Mustacchi 
305*eef4f27bSRobert Mustacchi 					/*
306*eef4f27bSRobert Mustacchi 					 * For analysis of the packet contents,
307*eef4f27bSRobert Mustacchi 					 * we first need to advance
308*eef4f27bSRobert Mustacchi 					 * the pointer beyond the inlined return
309*eef4f27bSRobert Mustacchi 					 * buffer descriptor.
310*eef4f27bSRobert Mustacchi 					 */
311*eef4f27bSRobert Mustacchi 					dataptr = lmpacket->u1.rx.mem_virt +
312*eef4f27bSRobert Mustacchi 					    L2RX_FRAME_HDR_LEN;
313*eef4f27bSRobert Mustacchi 
314*eef4f27bSRobert Mustacchi 					/* TCP alignment optimization. */
315*eef4f27bSRobert Mustacchi 					mp->b_rptr += 2;
316*eef4f27bSRobert Mustacchi 
317*eef4f27bSRobert Mustacchi 					/*
318*eef4f27bSRobert Mustacchi 					 * First copy the dest/source MAC
319*eef4f27bSRobert Mustacchi 					 * addresses
320*eef4f27bSRobert Mustacchi 					 */
321*eef4f27bSRobert Mustacchi 					bcopy(dataptr, mp->b_rptr, 12);
322*eef4f27bSRobert Mustacchi 
323*eef4f27bSRobert Mustacchi 					/* Second copy the VLAN tag */
324*eef4f27bSRobert Mustacchi 					bcopy(&tpid, mp->b_rptr + 12, 2);
325*eef4f27bSRobert Mustacchi 					bcopy(&vlan_tag, mp->b_rptr + 14, 2);
326*eef4f27bSRobert Mustacchi 
327*eef4f27bSRobert Mustacchi 					/* Third copy the reset of the packet */
328*eef4f27bSRobert Mustacchi 					dataptr = dataptr + 12;
329*eef4f27bSRobert Mustacchi 
330*eef4f27bSRobert Mustacchi 					bcopy(dataptr, mp->b_rptr + 16,
331*eef4f27bSRobert Mustacchi 					    pktlen - 12);
332*eef4f27bSRobert Mustacchi 					mp->b_wptr = mp->b_rptr + pktlen + 4;
333*eef4f27bSRobert Mustacchi 
334*eef4f27bSRobert Mustacchi 					dcopy = B_TRUE;
335*eef4f27bSRobert Mustacchi 
336*eef4f27bSRobert Mustacchi 					goto sendup;
337*eef4f27bSRobert Mustacchi 				}
338*eef4f27bSRobert Mustacchi 			} else {
339*eef4f27bSRobert Mustacchi 				/*  The hardware didn't strip the VLAN tag  */
340*eef4f27bSRobert Mustacchi 				mp = allocb(pktlen + 2, BPRI_MED);
341*eef4f27bSRobert Mustacchi 				if (mp != NULL) {
342*eef4f27bSRobert Mustacchi 					uint8_t *dataptr;
343*eef4f27bSRobert Mustacchi 
344*eef4f27bSRobert Mustacchi 					/*
345*eef4f27bSRobert Mustacchi 					 * For analysis of the packet contents,
346*eef4f27bSRobert Mustacchi 					 * we first need to advance
347*eef4f27bSRobert Mustacchi 					 * the pointer beyond the inlined return
348*eef4f27bSRobert Mustacchi 					 * buffer descriptor.
349*eef4f27bSRobert Mustacchi 					 */
350*eef4f27bSRobert Mustacchi 					dataptr = lmpacket->u1.rx.mem_virt +
351*eef4f27bSRobert Mustacchi 					    L2RX_FRAME_HDR_LEN;
352*eef4f27bSRobert Mustacchi 
353*eef4f27bSRobert Mustacchi 					/* TCP alignment optimization. */
354*eef4f27bSRobert Mustacchi 					mp->b_rptr += 2;
355*eef4f27bSRobert Mustacchi 
356*eef4f27bSRobert Mustacchi 					bcopy(dataptr, mp->b_rptr, pktlen);
357*eef4f27bSRobert Mustacchi 					mp->b_wptr = mp->b_rptr + pktlen;
358*eef4f27bSRobert Mustacchi 
359*eef4f27bSRobert Mustacchi 					dcopy = B_TRUE;
360*eef4f27bSRobert Mustacchi 
361*eef4f27bSRobert Mustacchi 					goto sendup;
362*eef4f27bSRobert Mustacchi 				}
363*eef4f27bSRobert Mustacchi 			}
364*eef4f27bSRobert Mustacchi 
365*eef4f27bSRobert Mustacchi 			umdevice->recv_discards++;
366*eef4f27bSRobert Mustacchi 
367*eef4f27bSRobert Mustacchi 			s_list_push_tail(freeq, &(lmpacket->link));
368*eef4f27bSRobert Mustacchi 
369*eef4f27bSRobert Mustacchi 			continue;
370*eef4f27bSRobert Mustacchi 		}
371*eef4f27bSRobert Mustacchi 
372*eef4f27bSRobert Mustacchi 		if (lm_rcvq_empty == B_TRUE && !(s_list_entry_cnt(srcq))) {
373*eef4f27bSRobert Mustacchi 			/*
374*eef4f27bSRobert Mustacchi 			 * If the hardware is out of receive buffers and we are
375*eef4f27bSRobert Mustacchi 			 * on the last receive packet, we need to drop the
376*eef4f27bSRobert Mustacchi 			 * packet.  We do this because we might not be able to
377*eef4f27bSRobert Mustacchi 			 * allocate _any_ new receive buffers before the ISR
378*eef4f27bSRobert Mustacchi 			 * completes.  If this happens, the driver will enter
379*eef4f27bSRobert Mustacchi 			 * an infinite interrupt loop where the hardware is
380*eef4f27bSRobert Mustacchi 			 * requesting rx buffers the driver cannot allocate.
381*eef4f27bSRobert Mustacchi 			 * So that the system doesn't livelock, we leave one
382*eef4f27bSRobert Mustacchi 			 * buffer perpetually available.  Note that we do this
383*eef4f27bSRobert Mustacchi 			 * _after_ giving the double copy code a chance to
384*eef4f27bSRobert Mustacchi 			 * claim the packet.
385*eef4f27bSRobert Mustacchi 			 */
386*eef4f27bSRobert Mustacchi 
387*eef4f27bSRobert Mustacchi 			/*
388*eef4f27bSRobert Mustacchi 			 * FIXME -- Make sure to add one more to the rx packet
389*eef4f27bSRobert Mustacchi 			 * descriptor count before allocating them.
390*eef4f27bSRobert Mustacchi 			 */
391*eef4f27bSRobert Mustacchi 
392*eef4f27bSRobert Mustacchi 			umdevice->recv_discards++;
393*eef4f27bSRobert Mustacchi 
394*eef4f27bSRobert Mustacchi 			s_list_push_tail(freeq, &(lmpacket->link));
395*eef4f27bSRobert Mustacchi 
396*eef4f27bSRobert Mustacchi 			continue;
397*eef4f27bSRobert Mustacchi 		}
398*eef4f27bSRobert Mustacchi 
399*eef4f27bSRobert Mustacchi sendup:
400*eef4f27bSRobert Mustacchi 
401*eef4f27bSRobert Mustacchi 		/*
402*eef4f27bSRobert Mustacchi 		 * Check if the checksum was offloaded.
403*eef4f27bSRobert Mustacchi 		 * If so, pass the result to stack.
404*eef4f27bSRobert Mustacchi 		 */
405*eef4f27bSRobert Mustacchi 		ofld_flags = 0;
406*eef4f27bSRobert Mustacchi 		if ((umdevice->dev_var.enabled_oflds &
407*eef4f27bSRobert Mustacchi 		    LM_OFFLOAD_RX_IP_CKSUM) &&
408*eef4f27bSRobert Mustacchi 		    (lmpacket->u1.rx.flags & LM_RX_FLAG_IP_CKSUM_IS_GOOD)) {
409*eef4f27bSRobert Mustacchi 			ofld_flags |= HCK_IPV4_HDRCKSUM_OK;
410*eef4f27bSRobert Mustacchi 		}
411*eef4f27bSRobert Mustacchi 
412*eef4f27bSRobert Mustacchi 		if (((umdevice->dev_var.enabled_oflds &
413*eef4f27bSRobert Mustacchi 		    LM_OFFLOAD_RX_TCP_CKSUM) &&
414*eef4f27bSRobert Mustacchi 		    (lmpacket->u1.rx.flags & LM_RX_FLAG_TCP_CKSUM_IS_GOOD)) ||
415*eef4f27bSRobert Mustacchi 		    ((umdevice->dev_var.enabled_oflds &
416*eef4f27bSRobert Mustacchi 		    LM_OFFLOAD_RX_UDP_CKSUM) &&
417*eef4f27bSRobert Mustacchi 		    (lmpacket->u1.rx.flags & LM_RX_FLAG_UDP_CKSUM_IS_GOOD))) {
418*eef4f27bSRobert Mustacchi 			ofld_flags |= HCK_FULLCKSUM_OK;
419*eef4f27bSRobert Mustacchi 		}
420*eef4f27bSRobert Mustacchi 
421*eef4f27bSRobert Mustacchi 		if (ofld_flags != 0) {
422*eef4f27bSRobert Mustacchi 			mac_hcksum_set(mp, 0, 0, 0, 0, ofld_flags);
423*eef4f27bSRobert Mustacchi 		}
424*eef4f27bSRobert Mustacchi 
425*eef4f27bSRobert Mustacchi 		/*
426*eef4f27bSRobert Mustacchi 		 * Push the packet descriptor onto one of the queues before we
427*eef4f27bSRobert Mustacchi 		 * attempt to send the packet up.  If the send-up function
428*eef4f27bSRobert Mustacchi 		 * hangs during driver unload, we want all our packet
429*eef4f27bSRobert Mustacchi 		 * descriptors to be available for deallocation.
430*eef4f27bSRobert Mustacchi 		 */
431*eef4f27bSRobert Mustacchi 		if (dcopy == B_TRUE) {
432*eef4f27bSRobert Mustacchi 			s_list_push_tail(freeq, &(lmpacket->link));
433*eef4f27bSRobert Mustacchi 		}
434*eef4f27bSRobert Mustacchi 
435*eef4f27bSRobert Mustacchi 		if (head == NULL) {
436*eef4f27bSRobert Mustacchi 			head = mp;
437*eef4f27bSRobert Mustacchi 			tail = mp;
438*eef4f27bSRobert Mustacchi 		} else {
439*eef4f27bSRobert Mustacchi 			tail->b_next = mp;
440*eef4f27bSRobert Mustacchi 			tail = mp;
441*eef4f27bSRobert Mustacchi 		}
442*eef4f27bSRobert Mustacchi 		tail->b_next = NULL;
443*eef4f27bSRobert Mustacchi 	}
444*eef4f27bSRobert Mustacchi 
445*eef4f27bSRobert Mustacchi 	if (head) {
446*eef4f27bSRobert Mustacchi 		mutex_exit(&umdevice->os_param.rcv_mutex);
447*eef4f27bSRobert Mustacchi 
448*eef4f27bSRobert Mustacchi 		mac_rx(umdevice->os_param.macp,
449*eef4f27bSRobert Mustacchi 		    umdevice->os_param.rx_resc_handle[ringidx], head);
450*eef4f27bSRobert Mustacchi 
451*eef4f27bSRobert Mustacchi 		mutex_enter(&umdevice->os_param.rcv_mutex);
452*eef4f27bSRobert Mustacchi 	}
453*eef4f27bSRobert Mustacchi 
454*eef4f27bSRobert Mustacchi 	recvinfo->processing = B_FALSE;
455*eef4f27bSRobert Mustacchi }
456*eef4f27bSRobert Mustacchi 
457*eef4f27bSRobert Mustacchi static void
bnx_recv_ring_dump(um_device_t * const umdevice,const unsigned int ringidx)458*eef4f27bSRobert Mustacchi bnx_recv_ring_dump(um_device_t *const umdevice, const unsigned int ringidx)
459*eef4f27bSRobert Mustacchi {
460*eef4f27bSRobert Mustacchi 	s_list_t *srcq;
461*eef4f27bSRobert Mustacchi 	s_list_t *dstq;
462*eef4f27bSRobert Mustacchi 	um_rxpacket_t *umpacket;
463*eef4f27bSRobert Mustacchi 
464*eef4f27bSRobert Mustacchi 	srcq = &(_RX_QINFO(umdevice, ringidx).waitq);
465*eef4f27bSRobert Mustacchi 	dstq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
466*eef4f27bSRobert Mustacchi 
467*eef4f27bSRobert Mustacchi 	/* CONSTANTCONDITION */
468*eef4f27bSRobert Mustacchi 	/* Dump all the packets pending a send-up. */
469*eef4f27bSRobert Mustacchi 	while (1) {
470*eef4f27bSRobert Mustacchi 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
471*eef4f27bSRobert Mustacchi 		if (umpacket == NULL) {
472*eef4f27bSRobert Mustacchi 			break;
473*eef4f27bSRobert Mustacchi 		}
474*eef4f27bSRobert Mustacchi 
475*eef4f27bSRobert Mustacchi 		s_list_push_tail(dstq, &(umpacket->lmpacket.link));
476*eef4f27bSRobert Mustacchi 	}
477*eef4f27bSRobert Mustacchi }
478*eef4f27bSRobert Mustacchi 
479*eef4f27bSRobert Mustacchi static void
bnx_recv_ring_free(um_device_t * const umdevice,const unsigned int ringidx)480*eef4f27bSRobert Mustacchi bnx_recv_ring_free(um_device_t *const umdevice, const unsigned int ringidx)
481*eef4f27bSRobert Mustacchi {
482*eef4f27bSRobert Mustacchi 	s_list_t *srcq;
483*eef4f27bSRobert Mustacchi 	s_list_t *dstq;
484*eef4f27bSRobert Mustacchi 	um_rxpacket_t *umpacket;
485*eef4f27bSRobert Mustacchi 
486*eef4f27bSRobert Mustacchi 	srcq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
487*eef4f27bSRobert Mustacchi 
488*eef4f27bSRobert Mustacchi 	dstq = &(_RX_QINFO(umdevice, ringidx).buffq);
489*eef4f27bSRobert Mustacchi 
490*eef4f27bSRobert Mustacchi 	/* CONSTANTCONDITION */
491*eef4f27bSRobert Mustacchi 	/*
492*eef4f27bSRobert Mustacchi 	 * Back out all the packets submitted to the "available for hardware
493*eef4f27bSRobert Mustacchi 	 * use" queue.  Free the buffers associated with the descriptors as
494*eef4f27bSRobert Mustacchi 	 * we go.
495*eef4f27bSRobert Mustacchi 	 */
496*eef4f27bSRobert Mustacchi 	while (1) {
497*eef4f27bSRobert Mustacchi 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
498*eef4f27bSRobert Mustacchi 		if (umpacket == NULL) {
499*eef4f27bSRobert Mustacchi 			break;
500*eef4f27bSRobert Mustacchi 		}
501*eef4f27bSRobert Mustacchi 
502*eef4f27bSRobert Mustacchi 		bnx_rxbuffer_free(umdevice, umpacket);
503*eef4f27bSRobert Mustacchi 
504*eef4f27bSRobert Mustacchi 		s_list_push_tail(dstq, &umpacket->lmpacket.link);
505*eef4f27bSRobert Mustacchi 	}
506*eef4f27bSRobert Mustacchi }
507*eef4f27bSRobert Mustacchi 
508*eef4f27bSRobert Mustacchi static void
bnx_recv_ring_fini(um_device_t * const umdevice,const unsigned int ringidx)509*eef4f27bSRobert Mustacchi bnx_recv_ring_fini(um_device_t *const umdevice, const unsigned int ringidx)
510*eef4f27bSRobert Mustacchi {
511*eef4f27bSRobert Mustacchi 	s_list_t *srcq;
512*eef4f27bSRobert Mustacchi 	um_rxpacket_t *umpacket;
513*eef4f27bSRobert Mustacchi 	um_recv_qinfo *recvinfo;
514*eef4f27bSRobert Mustacchi 
515*eef4f27bSRobert Mustacchi 	recvinfo = &(_RX_QINFO(umdevice, ringidx));
516*eef4f27bSRobert Mustacchi 
517*eef4f27bSRobert Mustacchi 	srcq = &(recvinfo->buffq);
518*eef4f27bSRobert Mustacchi 
519*eef4f27bSRobert Mustacchi 	/* CONSTANTCONDITION */
520*eef4f27bSRobert Mustacchi 	while (1) {
521*eef4f27bSRobert Mustacchi 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
522*eef4f27bSRobert Mustacchi 		if (umpacket == NULL) {
523*eef4f27bSRobert Mustacchi 			break;
524*eef4f27bSRobert Mustacchi 		}
525*eef4f27bSRobert Mustacchi 
526*eef4f27bSRobert Mustacchi 		/*
527*eef4f27bSRobert Mustacchi 		 * Intentionally throw the packet away.  The memory was
528*eef4f27bSRobert Mustacchi 		 * allocated by the lower module and will be reclaimed when
529*eef4f27bSRobert Mustacchi 		 * we do our final memory cleanup.
530*eef4f27bSRobert Mustacchi 		 */
531*eef4f27bSRobert Mustacchi 	}
532*eef4f27bSRobert Mustacchi }
533*eef4f27bSRobert Mustacchi 
534*eef4f27bSRobert Mustacchi int
bnx_rxpkts_init(um_device_t * const umdevice)535*eef4f27bSRobert Mustacchi bnx_rxpkts_init(um_device_t *const umdevice)
536*eef4f27bSRobert Mustacchi {
537*eef4f27bSRobert Mustacchi 	int i;
538*eef4f27bSRobert Mustacchi 	int alloccnt;
539*eef4f27bSRobert Mustacchi 	lm_device_t *lmdevice;
540*eef4f27bSRobert Mustacchi 
541*eef4f27bSRobert Mustacchi 	lmdevice = &(umdevice->lm_dev);
542*eef4f27bSRobert Mustacchi 
543*eef4f27bSRobert Mustacchi 	alloccnt = 0;
544*eef4f27bSRobert Mustacchi 
545*eef4f27bSRobert Mustacchi 	for (i = RX_CHAIN_IDX0; i < NUM_RX_CHAIN; i++) {
546*eef4f27bSRobert Mustacchi 		int post_count = 0;
547*eef4f27bSRobert Mustacchi 
548*eef4f27bSRobert Mustacchi 		bnx_recv_ring_init(umdevice, i);
549*eef4f27bSRobert Mustacchi 
550*eef4f27bSRobert Mustacchi 		bnx_recv_ring_fill(umdevice, i);
551*eef4f27bSRobert Mustacchi 
552*eef4f27bSRobert Mustacchi 		post_count =
553*eef4f27bSRobert Mustacchi 		    s_list_entry_cnt(&lmdevice->rx_info.chain[i].free_descq);
554*eef4f27bSRobert Mustacchi 
555*eef4f27bSRobert Mustacchi 		if (post_count != lmdevice->params.l2_rx_desc_cnt[i]) {
556*eef4f27bSRobert Mustacchi 			cmn_err(CE_NOTE,
557*eef4f27bSRobert Mustacchi 			    "!%s: %d rx buffers requested.  %d allocated.\n",
558*eef4f27bSRobert Mustacchi 			    umdevice->dev_name,
559*eef4f27bSRobert Mustacchi 			    umdevice->lm_dev.params.l2_rx_desc_cnt[i],
560*eef4f27bSRobert Mustacchi 			    post_count);
561*eef4f27bSRobert Mustacchi 		}
562*eef4f27bSRobert Mustacchi 
563*eef4f27bSRobert Mustacchi 		alloccnt += post_count;
564*eef4f27bSRobert Mustacchi 	}
565*eef4f27bSRobert Mustacchi 
566*eef4f27bSRobert Mustacchi 	/* FIXME -- Set rxbuffer allocation failure threshold. */
567*eef4f27bSRobert Mustacchi 	if (alloccnt < BNX_RECV_INIT_FAIL_THRESH) {
568*eef4f27bSRobert Mustacchi 		cmn_err(CE_WARN,
569*eef4f27bSRobert Mustacchi 		    "%s: Failed to allocate minimum number of RX buffers.\n",
570*eef4f27bSRobert Mustacchi 		    umdevice->dev_name);
571*eef4f27bSRobert Mustacchi 
572*eef4f27bSRobert Mustacchi /* BEGIN CSTYLED */
573*eef4f27bSRobert Mustacchi #if BNX_RECV_INIT_FAIL_THRESH > 1
574*eef4f27bSRobert Mustacchi #warning Need to implement code to free previously allocated rx buffers in bnx_rxpkts_init error path.
575*eef4f27bSRobert Mustacchi #endif
576*eef4f27bSRobert Mustacchi /* END CSTYLED */
577*eef4f27bSRobert Mustacchi 
578*eef4f27bSRobert Mustacchi 		return (-1);
579*eef4f27bSRobert Mustacchi 	}
580*eef4f27bSRobert Mustacchi 
581*eef4f27bSRobert Mustacchi 	return (0);
582*eef4f27bSRobert Mustacchi }
583*eef4f27bSRobert Mustacchi 
584*eef4f27bSRobert Mustacchi void
bnx_rxpkts_intr(um_device_t * const umdevice)585*eef4f27bSRobert Mustacchi bnx_rxpkts_intr(um_device_t *const umdevice)
586*eef4f27bSRobert Mustacchi {
587*eef4f27bSRobert Mustacchi 	int i;
588*eef4f27bSRobert Mustacchi 	um_recv_qinfo * recvinfo;
589*eef4f27bSRobert Mustacchi 
590*eef4f27bSRobert Mustacchi 	for (i = RX_CHAIN_IDX0; i < NUM_RX_CHAIN; i++) {
591*eef4f27bSRobert Mustacchi 		recvinfo = &(_RX_QINFO(umdevice, i));
592*eef4f27bSRobert Mustacchi 
593*eef4f27bSRobert Mustacchi 		if (recvinfo->processing == B_FALSE) {
594*eef4f27bSRobert Mustacchi 			/* Send the packets up the stack. */
595*eef4f27bSRobert Mustacchi 			bnx_recv_ring_recv(umdevice, i);
596*eef4f27bSRobert Mustacchi 		}
597*eef4f27bSRobert Mustacchi 	}
598*eef4f27bSRobert Mustacchi }
599*eef4f27bSRobert Mustacchi 
600*eef4f27bSRobert Mustacchi void
bnx_rxpkts_post(um_device_t * const umdevice)601*eef4f27bSRobert Mustacchi bnx_rxpkts_post(um_device_t *const umdevice)
602*eef4f27bSRobert Mustacchi {
603*eef4f27bSRobert Mustacchi 	int i;
604*eef4f27bSRobert Mustacchi 	um_recv_qinfo *recvinfo;
605*eef4f27bSRobert Mustacchi 
606*eef4f27bSRobert Mustacchi 	for (i = RX_CHAIN_IDX0; i < NUM_RX_CHAIN; i++) {
607*eef4f27bSRobert Mustacchi 		recvinfo = &(_RX_QINFO(umdevice, i));
608*eef4f27bSRobert Mustacchi 
609*eef4f27bSRobert Mustacchi 		if (recvinfo->processing == B_FALSE) {
610*eef4f27bSRobert Mustacchi 			/* Allocate new rx buffers. */
611*eef4f27bSRobert Mustacchi 			bnx_recv_ring_fill(umdevice, i);
612*eef4f27bSRobert Mustacchi 
613*eef4f27bSRobert Mustacchi 			/* Submit the rx buffers to the hardware. */
614*eef4f27bSRobert Mustacchi 			(void) lm_post_buffers(&(umdevice->lm_dev), i, NULL);
615*eef4f27bSRobert Mustacchi 		}
616*eef4f27bSRobert Mustacchi 	}
617*eef4f27bSRobert Mustacchi }
618*eef4f27bSRobert Mustacchi 
619*eef4f27bSRobert Mustacchi void
bnx_rxpkts_recycle(um_device_t * const umdevice)620*eef4f27bSRobert Mustacchi bnx_rxpkts_recycle(um_device_t *const umdevice)
621*eef4f27bSRobert Mustacchi {
622*eef4f27bSRobert Mustacchi 	int i;
623*eef4f27bSRobert Mustacchi 
624*eef4f27bSRobert Mustacchi 	for (i = NUM_RX_CHAIN - 1; i >= RX_CHAIN_IDX0; i--) {
625*eef4f27bSRobert Mustacchi 		bnx_recv_ring_dump(umdevice, i);
626*eef4f27bSRobert Mustacchi 
627*eef4f27bSRobert Mustacchi 		lm_abort(&(umdevice->lm_dev), ABORT_OP_RX_CHAIN, i);
628*eef4f27bSRobert Mustacchi 	}
629*eef4f27bSRobert Mustacchi }
630*eef4f27bSRobert Mustacchi 
631*eef4f27bSRobert Mustacchi void
bnx_rxpkts_fini(um_device_t * const umdevice)632*eef4f27bSRobert Mustacchi bnx_rxpkts_fini(um_device_t *const umdevice)
633*eef4f27bSRobert Mustacchi {
634*eef4f27bSRobert Mustacchi 	int i;
635*eef4f27bSRobert Mustacchi 
636*eef4f27bSRobert Mustacchi 	for (i = NUM_RX_CHAIN - 1; i >= RX_CHAIN_IDX0; i--) {
637*eef4f27bSRobert Mustacchi 		/* Dump shouldn't be necessary, but just to be safe... */
638*eef4f27bSRobert Mustacchi 		bnx_recv_ring_dump(umdevice, i);
639*eef4f27bSRobert Mustacchi 
640*eef4f27bSRobert Mustacchi 		/* Recycle shouldn't be necessary, but just to be safe... */
641*eef4f27bSRobert Mustacchi 		lm_abort(&(umdevice->lm_dev), ABORT_OP_RX_CHAIN, i);
642*eef4f27bSRobert Mustacchi 
643*eef4f27bSRobert Mustacchi 		bnx_recv_ring_free(umdevice, i);
644*eef4f27bSRobert Mustacchi 		bnx_recv_ring_fini(umdevice, i);
645*eef4f27bSRobert Mustacchi 	}
646*eef4f27bSRobert Mustacchi }
647