1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <hxge_impl.h>
27 #include <hxge_rxdma.h>
28 
29 /*
30  * Globals: tunable parameters (/etc/system or adb)
31  *
32  */
33 extern uint32_t hxge_rbr_size;
34 extern uint32_t hxge_rcr_size;
35 extern uint32_t hxge_rbr_spare_size;
36 extern uint32_t hxge_mblks_pending;
37 
38 /*
39  * Tunable to reduce the amount of time spent in the
40  * ISR doing Rx Processing.
41  */
42 extern uint32_t hxge_max_rx_pkts;
43 
44 /*
45  * Tunables to manage the receive buffer blocks.
46  *
47  * hxge_rx_threshold_hi: copy all buffers.
48  * hxge_rx_bcopy_size_type: receive buffer block size type.
49  * hxge_rx_threshold_lo: copy only up to tunable block size type.
50  */
51 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi;
52 extern hxge_rxbuf_type_t hxge_rx_buf_size_type;
53 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo;
54 
55 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep);
56 static void hxge_unmap_rxdma(p_hxge_t hxgep);
57 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep);
58 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep);
59 static void hxge_rxdma_hw_stop(p_hxge_t hxgep);
60 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
61     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
62     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
63     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
64     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
65 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
66 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
67 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep,
68     uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p,
69     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
70     p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
71 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
72 	p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
73 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep,
74 	uint16_t channel, p_hxge_dma_common_t *dma_buf_p,
75 	p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks);
76 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
77 	p_rx_rbr_ring_t rbr_p);
78 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
79 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p);
80 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel);
81 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
82 	p_rx_rcr_ring_t	*rcr_p, rdc_stat_t cs);
83 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
84 	p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p,
85 	mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry);
86 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep,
87 	uint16_t channel);
88 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t);
89 static void hxge_freeb(p_rx_msg_t);
90 static void hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex,
91     p_hxge_ldv_t ldvp, rdc_stat_t cs);
92 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index,
93 	p_hxge_ldv_t ldvp, rdc_stat_t cs);
94 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep,
95 	p_rx_rbr_ring_t rx_dmap);
96 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep,
97 	uint16_t channel);
98 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep);
99 
100 hxge_status_t
101 hxge_init_rxdma_channels(p_hxge_t hxgep)
102 {
103 	hxge_status_t		status = HXGE_OK;
104 	block_reset_t		reset_reg;
105 
106 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels"));
107 
108 	/* Reset RDC block from PEU to clear any previous state */
109 	reset_reg.value = 0;
110 	reset_reg.bits.rdc_rst = 1;
111 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
112 	HXGE_DELAY(1000);
113 
114 	status = hxge_map_rxdma(hxgep);
115 	if (status != HXGE_OK) {
116 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
117 		    "<== hxge_init_rxdma: status 0x%x", status));
118 		return (status);
119 	}
120 
121 	status = hxge_rxdma_hw_start_common(hxgep);
122 	if (status != HXGE_OK) {
123 		hxge_unmap_rxdma(hxgep);
124 	}
125 
126 	status = hxge_rxdma_hw_start(hxgep);
127 	if (status != HXGE_OK) {
128 		hxge_unmap_rxdma(hxgep);
129 	}
130 
131 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
132 	    "<== hxge_init_rxdma_channels: status 0x%x", status));
133 	return (status);
134 }
135 
136 void
137 hxge_uninit_rxdma_channels(p_hxge_t hxgep)
138 {
139 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels"));
140 
141 	hxge_rxdma_hw_stop(hxgep);
142 	hxge_unmap_rxdma(hxgep);
143 
144 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels"));
145 }
146 
147 hxge_status_t
148 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel,
149     rdc_stat_t *cs_p)
150 {
151 	hpi_handle_t	handle;
152 	hpi_status_t	rs = HPI_SUCCESS;
153 	hxge_status_t	status = HXGE_OK;
154 
155 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
156 	    "<== hxge_init_rxdma_channel_cntl_stat"));
157 
158 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
159 	rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p);
160 
161 	if (rs != HPI_SUCCESS) {
162 		status = HXGE_ERROR | rs;
163 	}
164 	return (status);
165 }
166 
167 
168 hxge_status_t
169 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
170     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
171 {
172 	hpi_handle_t		handle;
173 	rdc_desc_cfg_t 		rdc_desc;
174 	rdc_rcr_cfg_b_t		*cfgb_p;
175 	hpi_status_t		rs = HPI_SUCCESS;
176 
177 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel"));
178 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
179 
180 	/*
181 	 * Use configuration data composed at init time. Write to hardware the
182 	 * receive ring configurations.
183 	 */
184 	rdc_desc.mbox_enable = 1;
185 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
186 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
187 	    "==> hxge_enable_rxdma_channel: mboxp $%p($%p)",
188 	    mbox_p->mbox_addr, rdc_desc.mbox_addr));
189 
190 	rdc_desc.rbr_len = rbr_p->rbb_max;
191 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
192 
193 	switch (hxgep->rx_bksize_code) {
194 	case RBR_BKSIZE_4K:
195 		rdc_desc.page_size = SIZE_4KB;
196 		break;
197 	case RBR_BKSIZE_8K:
198 		rdc_desc.page_size = SIZE_8KB;
199 		break;
200 	}
201 
202 	rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0;
203 	rdc_desc.valid0 = 1;
204 
205 	rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1;
206 	rdc_desc.valid1 = 1;
207 
208 	rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2;
209 	rdc_desc.valid2 = 1;
210 
211 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
212 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
213 
214 	rdc_desc.rcr_len = rcr_p->comp_size;
215 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
216 
217 	cfgb_p = &(rcr_p->rcr_cfgb);
218 	rdc_desc.rcr_threshold = cfgb_p->bits.pthres;
219 	rdc_desc.rcr_timeout = cfgb_p->bits.timeout;
220 	rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout;
221 
222 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
223 	    "rbr_len qlen %d pagesize code %d rcr_len %d",
224 	    rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
225 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
226 	    "size 0 %d size 1 %d size 2 %d",
227 	    rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1,
228 	    rbr_p->hpi_pkt_buf_size2));
229 
230 	rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
231 	if (rs != HPI_SUCCESS) {
232 		return (HXGE_ERROR | rs);
233 	}
234 
235 	/*
236 	 * Enable the timeout and threshold.
237 	 */
238 	rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
239 	    rdc_desc.rcr_threshold);
240 	if (rs != HPI_SUCCESS) {
241 		return (HXGE_ERROR | rs);
242 	}
243 
244 	rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
245 	    rdc_desc.rcr_timeout);
246 	if (rs != HPI_SUCCESS) {
247 		return (HXGE_ERROR | rs);
248 	}
249 
250 	/* Enable the DMA */
251 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
252 	if (rs != HPI_SUCCESS) {
253 		return (HXGE_ERROR | rs);
254 	}
255 
256 	/* Kick the DMA engine */
257 	hpi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
258 
259 	/* Clear the rbr empty bit */
260 	(void) hpi_rxdma_channel_rbr_empty_clear(handle, channel);
261 
262 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel"));
263 
264 	return (HXGE_OK);
265 }
266 
267 static hxge_status_t
268 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel)
269 {
270 	hpi_handle_t handle;
271 	hpi_status_t rs = HPI_SUCCESS;
272 
273 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel"));
274 
275 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
276 
277 	/* disable the DMA */
278 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
279 	if (rs != HPI_SUCCESS) {
280 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
281 		    "<== hxge_disable_rxdma_channel:failed (0x%x)", rs));
282 		return (HXGE_ERROR | rs);
283 	}
284 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel"));
285 	return (HXGE_OK);
286 }
287 
288 hxge_status_t
289 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel)
290 {
291 	hpi_handle_t	handle;
292 	hxge_status_t	status = HXGE_OK;
293 
294 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
295 	    "==> hxge_rxdma_channel_rcrflush"));
296 
297 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
298 	hpi_rxdma_rdc_rcr_flush(handle, channel);
299 
300 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
301 	    "<== hxge_rxdma_channel_rcrflush"));
302 	return (status);
303 
304 }
305 
306 #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
307 
308 #define	TO_LEFT -1
309 #define	TO_RIGHT 1
310 #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
311 #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
312 #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
313 #define	NO_HINT 0xffffffff
314 
315 /*ARGSUSED*/
316 hxge_status_t
317 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p,
318     uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
319     uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
320 {
321 	int			bufsize;
322 	uint64_t		pktbuf_pp;
323 	uint64_t		dvma_addr;
324 	rxring_info_t		*ring_info;
325 	int			base_side, end_side;
326 	int			r_index, l_index, anchor_index;
327 	int			found, search_done;
328 	uint32_t		offset, chunk_size, block_size, page_size_mask;
329 	uint32_t		chunk_index, block_index, total_index;
330 	int			max_iterations, iteration;
331 	rxbuf_index_info_t	*bufinfo;
332 
333 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp"));
334 
335 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
336 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
337 	    pkt_buf_addr_pp, pktbufsz_type));
338 
339 #if defined(__i386)
340 	pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
341 #else
342 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
343 #endif
344 
345 	switch (pktbufsz_type) {
346 	case 0:
347 		bufsize = rbr_p->pkt_buf_size0;
348 		break;
349 	case 1:
350 		bufsize = rbr_p->pkt_buf_size1;
351 		break;
352 	case 2:
353 		bufsize = rbr_p->pkt_buf_size2;
354 		break;
355 	case RCR_SINGLE_BLOCK:
356 		bufsize = 0;
357 		anchor_index = 0;
358 		break;
359 	default:
360 		return (HXGE_ERROR);
361 	}
362 
363 	if (rbr_p->num_blocks == 1) {
364 		anchor_index = 0;
365 		ring_info = rbr_p->ring_info;
366 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
367 
368 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
369 		    "==> hxge_rxbuf_pp_to_vp: (found, 1 block) "
370 		    "buf_pp $%p btype %d anchor_index %d bufinfo $%p",
371 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo));
372 
373 		goto found_index;
374 	}
375 
376 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
377 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d",
378 	    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
379 
380 	ring_info = rbr_p->ring_info;
381 	found = B_FALSE;
382 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
383 	iteration = 0;
384 	max_iterations = ring_info->max_iterations;
385 
386 	/*
387 	 * First check if this block have been seen recently. This is indicated
388 	 * by a hint which is initialized when the first buffer of the block is
389 	 * seen. The hint is reset when the last buffer of the block has been
390 	 * processed. As three block sizes are supported, three hints are kept.
391 	 * The idea behind the hints is that once the hardware  uses a block
392 	 * for a buffer  of that size, it will use it exclusively for that size
393 	 * and will use it until it is exhausted. It is assumed that there
394 	 * would a single block being used for the same buffer sizes at any
395 	 * given time.
396 	 */
397 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
398 		anchor_index = ring_info->hint[pktbufsz_type];
399 		dvma_addr = bufinfo[anchor_index].dvma_addr;
400 		chunk_size = bufinfo[anchor_index].buf_size;
401 		if ((pktbuf_pp >= dvma_addr) &&
402 		    (pktbuf_pp < (dvma_addr + chunk_size))) {
403 			found = B_TRUE;
404 			/*
405 			 * check if this is the last buffer in the block If so,
406 			 * then reset the hint for the size;
407 			 */
408 
409 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
410 				ring_info->hint[pktbufsz_type] = NO_HINT;
411 		}
412 	}
413 
414 	if (found == B_FALSE) {
415 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
416 		    "==> hxge_rxbuf_pp_to_vp: (!found)"
417 		    "buf_pp $%p btype %d anchor_index %d",
418 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
419 
420 		/*
421 		 * This is the first buffer of the block of this size. Need to
422 		 * search the whole information array. the search algorithm
423 		 * uses a binary tree search algorithm. It assumes that the
424 		 * information is already sorted with increasing order info[0]
425 		 * < info[1] < info[2]  .... < info[n-1] where n is the size of
426 		 * the information array
427 		 */
428 		r_index = rbr_p->num_blocks - 1;
429 		l_index = 0;
430 		search_done = B_FALSE;
431 		anchor_index = MID_INDEX(r_index, l_index);
432 		while (search_done == B_FALSE) {
433 			if ((r_index == l_index) ||
434 			    (iteration >= max_iterations))
435 				search_done = B_TRUE;
436 
437 			end_side = TO_RIGHT;	/* to the right */
438 			base_side = TO_LEFT;	/* to the left */
439 			/* read the DVMA address information and sort it */
440 			dvma_addr = bufinfo[anchor_index].dvma_addr;
441 			chunk_size = bufinfo[anchor_index].buf_size;
442 
443 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
444 			    "==> hxge_rxbuf_pp_to_vp: (searching)"
445 			    "buf_pp $%p btype %d "
446 			    "anchor_index %d chunk_size %d dvmaaddr $%p",
447 			    pkt_buf_addr_pp, pktbufsz_type, anchor_index,
448 			    chunk_size, dvma_addr));
449 
450 			if (pktbuf_pp >= dvma_addr)
451 				base_side = TO_RIGHT;	/* to the right */
452 			if (pktbuf_pp < (dvma_addr + chunk_size))
453 				end_side = TO_LEFT;	/* to the left */
454 
455 			switch (base_side + end_side) {
456 			case IN_MIDDLE:
457 				/* found */
458 				found = B_TRUE;
459 				search_done = B_TRUE;
460 				if ((pktbuf_pp + bufsize) <
461 				    (dvma_addr + chunk_size))
462 					ring_info->hint[pktbufsz_type] =
463 					    bufinfo[anchor_index].buf_index;
464 				break;
465 			case BOTH_RIGHT:
466 				/* not found: go to the right */
467 				l_index = anchor_index + 1;
468 				anchor_index = MID_INDEX(r_index, l_index);
469 				break;
470 
471 			case BOTH_LEFT:
472 				/* not found: go to the left */
473 				r_index = anchor_index - 1;
474 				anchor_index = MID_INDEX(r_index, l_index);
475 				break;
476 			default:	/* should not come here */
477 				return (HXGE_ERROR);
478 			}
479 			iteration++;
480 		}
481 
482 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
483 		    "==> hxge_rxbuf_pp_to_vp: (search done)"
484 		    "buf_pp $%p btype %d anchor_index %d",
485 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
486 	}
487 
488 	if (found == B_FALSE) {
489 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
490 		    "==> hxge_rxbuf_pp_to_vp: (search failed)"
491 		    "buf_pp $%p btype %d anchor_index %d",
492 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
493 		return (HXGE_ERROR);
494 	}
495 
496 found_index:
497 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
498 	    "==> hxge_rxbuf_pp_to_vp: (FOUND1)"
499 	    "buf_pp $%p btype %d bufsize %d anchor_index %d",
500 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index));
501 
502 	/* index of the first block in this chunk */
503 	chunk_index = bufinfo[anchor_index].start_index;
504 	dvma_addr = bufinfo[anchor_index].dvma_addr;
505 	page_size_mask = ring_info->block_size_mask;
506 
507 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
508 	    "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
509 	    "buf_pp $%p btype %d bufsize %d "
510 	    "anchor_index %d chunk_index %d dvma $%p",
511 	    pkt_buf_addr_pp, pktbufsz_type, bufsize,
512 	    anchor_index, chunk_index, dvma_addr));
513 
514 	offset = pktbuf_pp - dvma_addr;	/* offset within the chunk */
515 	block_size = rbr_p->block_size;	/* System  block(page) size */
516 
517 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
518 	    "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
519 	    "buf_pp $%p btype %d bufsize %d "
520 	    "anchor_index %d chunk_index %d dvma $%p "
521 	    "offset %d block_size %d",
522 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index,
523 	    chunk_index, dvma_addr, offset, block_size));
524 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index"));
525 
526 	block_index = (offset / block_size);	/* index within chunk */
527 	total_index = chunk_index + block_index;
528 
529 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
530 	    "==> hxge_rxbuf_pp_to_vp: "
531 	    "total_index %d dvma_addr $%p "
532 	    "offset %d block_size %d "
533 	    "block_index %d ",
534 	    total_index, dvma_addr, offset, block_size, block_index));
535 
536 #if defined(__i386)
537 	*pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
538 	    (uint32_t)offset);
539 #else
540 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
541 	    offset);
542 #endif
543 
544 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
545 	    "==> hxge_rxbuf_pp_to_vp: "
546 	    "total_index %d dvma_addr $%p "
547 	    "offset %d block_size %d "
548 	    "block_index %d "
549 	    "*pkt_buf_addr_p $%p",
550 	    total_index, dvma_addr, offset, block_size,
551 	    block_index, *pkt_buf_addr_p));
552 
553 	*msg_index = total_index;
554 	*bufoffset = (offset & page_size_mask);
555 
556 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
557 	    "==> hxge_rxbuf_pp_to_vp: get msg index: "
558 	    "msg_index %d bufoffset_index %d",
559 	    *msg_index, *bufoffset));
560 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp"));
561 
562 	return (HXGE_OK);
563 }
564 
565 
566 /*
567  * used by quick sort (qsort) function
568  * to perform comparison
569  */
570 static int
571 hxge_sort_compare(const void *p1, const void *p2)
572 {
573 
574 	rxbuf_index_info_t *a, *b;
575 
576 	a = (rxbuf_index_info_t *)p1;
577 	b = (rxbuf_index_info_t *)p2;
578 
579 	if (a->dvma_addr > b->dvma_addr)
580 		return (1);
581 	if (a->dvma_addr < b->dvma_addr)
582 		return (-1);
583 	return (0);
584 }
585 
586 /*
587  * Grabbed this sort implementation from common/syscall/avl.c
588  *
589  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
590  * v = Ptr to array/vector of objs
591  * n = # objs in the array
592  * s = size of each obj (must be multiples of a word size)
593  * f = ptr to function to compare two objs
594  *	returns (-1 = less than, 0 = equal, 1 = greater than
595  */
596 void
597 hxge_ksort(caddr_t v, int n, int s, int (*f) ())
598 {
599 	int		g, i, j, ii;
600 	unsigned int	*p1, *p2;
601 	unsigned int	tmp;
602 
603 	/* No work to do */
604 	if (v == NULL || n <= 1)
605 		return;
606 	/* Sanity check on arguments */
607 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
608 	ASSERT(s > 0);
609 
610 	for (g = n / 2; g > 0; g /= 2) {
611 		for (i = g; i < n; i++) {
612 			for (j = i - g; j >= 0 &&
613 			    (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) {
614 				p1 = (unsigned *)(v + j * s);
615 				p2 = (unsigned *)(v + (j + g) * s);
616 				for (ii = 0; ii < s / 4; ii++) {
617 					tmp = *p1;
618 					*p1++ = *p2;
619 					*p2++ = tmp;
620 				}
621 			}
622 		}
623 	}
624 }
625 
626 /*
627  * Initialize data structures required for rxdma
628  * buffer dvma->vmem address lookup
629  */
630 /*ARGSUSED*/
631 static hxge_status_t
632 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp)
633 {
634 	int		index;
635 	rxring_info_t	*ring_info;
636 	int		max_iteration = 0, max_index = 0;
637 
638 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init"));
639 
640 	ring_info = rbrp->ring_info;
641 	ring_info->hint[0] = NO_HINT;
642 	ring_info->hint[1] = NO_HINT;
643 	ring_info->hint[2] = NO_HINT;
644 	max_index = rbrp->num_blocks;
645 
646 	/* read the DVMA address information and sort it */
647 	/* do init of the information array */
648 
649 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
650 	    " hxge_rxbuf_index_info_init Sort ptrs"));
651 
652 	/* sort the array */
653 	hxge_ksort((void *) ring_info->buffer, max_index,
654 	    sizeof (rxbuf_index_info_t), hxge_sort_compare);
655 
656 	for (index = 0; index < max_index; index++) {
657 		HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
658 		    " hxge_rxbuf_index_info_init: sorted chunk %d "
659 		    " ioaddr $%p kaddr $%p size %x",
660 		    index, ring_info->buffer[index].dvma_addr,
661 		    ring_info->buffer[index].kaddr,
662 		    ring_info->buffer[index].buf_size));
663 	}
664 
665 	max_iteration = 0;
666 	while (max_index >= (1ULL << max_iteration))
667 		max_iteration++;
668 	ring_info->max_iterations = max_iteration + 1;
669 
670 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
671 	    " hxge_rxbuf_index_info_init Find max iter %d",
672 	    ring_info->max_iterations));
673 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init"));
674 
675 	return (HXGE_OK);
676 }
677 
678 /*ARGSUSED*/
679 void
680 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p)
681 {
682 #ifdef	HXGE_DEBUG
683 
684 	uint32_t bptr;
685 	uint64_t pp;
686 
687 	bptr = entry_p->bits.pkt_buf_addr;
688 
689 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
690 	    "\trcr entry $%p "
691 	    "\trcr entry 0x%0llx "
692 	    "\trcr entry 0x%08x "
693 	    "\trcr entry 0x%08x "
694 	    "\tvalue 0x%0llx\n"
695 	    "\tmulti = %d\n"
696 	    "\tpkt_type = 0x%x\n"
697 	    "\terror = 0x%04x\n"
698 	    "\tl2_len = %d\n"
699 	    "\tpktbufsize = %d\n"
700 	    "\tpkt_buf_addr = $%p\n"
701 	    "\tpkt_buf_addr (<< 6) = $%p\n",
702 	    entry_p,
703 	    *(int64_t *)entry_p,
704 	    *(int32_t *)entry_p,
705 	    *(int32_t *)((char *)entry_p + 32),
706 	    entry_p->value,
707 	    entry_p->bits.multi,
708 	    entry_p->bits.pkt_type,
709 	    entry_p->bits.error,
710 	    entry_p->bits.l2_len,
711 	    entry_p->bits.pktbufsz,
712 	    bptr,
713 	    entry_p->bits.pkt_buf_addr_l));
714 
715 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
716 	    RCR_PKT_BUF_ADDR_SHIFT;
717 
718 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
719 	    pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
720 #endif
721 }
722 
723 /*ARGSUSED*/
724 void
725 hxge_rxdma_stop(p_hxge_t hxgep)
726 {
727 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop"));
728 
729 	(void) hxge_rx_vmac_disable(hxgep);
730 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
731 
732 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop"));
733 }
734 
735 void
736 hxge_rxdma_stop_reinit(p_hxge_t hxgep)
737 {
738 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit"));
739 
740 	(void) hxge_rxdma_stop(hxgep);
741 	(void) hxge_uninit_rxdma_channels(hxgep);
742 	(void) hxge_init_rxdma_channels(hxgep);
743 
744 	(void) hxge_rx_vmac_enable(hxgep);
745 
746 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit"));
747 }
748 
749 hxge_status_t
750 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
751 {
752 	int			i, ndmas;
753 	uint16_t		channel;
754 	p_rx_rbr_rings_t	rx_rbr_rings;
755 	p_rx_rbr_ring_t		*rbr_rings;
756 	hpi_handle_t		handle;
757 	hpi_status_t		rs = HPI_SUCCESS;
758 	hxge_status_t		status = HXGE_OK;
759 
760 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
761 	    "==> hxge_rxdma_hw_mode: mode %d", enable));
762 
763 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
764 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
765 		    "<== hxge_rxdma_mode: not initialized"));
766 		return (HXGE_ERROR);
767 	}
768 
769 	rx_rbr_rings = hxgep->rx_rbr_rings;
770 	if (rx_rbr_rings == NULL) {
771 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
772 		    "<== hxge_rxdma_mode: NULL ring pointer"));
773 		return (HXGE_ERROR);
774 	}
775 
776 	if (rx_rbr_rings->rbr_rings == NULL) {
777 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
778 		    "<== hxge_rxdma_mode: NULL rbr rings pointer"));
779 		return (HXGE_ERROR);
780 	}
781 
782 	ndmas = rx_rbr_rings->ndmas;
783 	if (!ndmas) {
784 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
785 		    "<== hxge_rxdma_mode: no channel"));
786 		return (HXGE_ERROR);
787 	}
788 
789 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
790 	    "==> hxge_rxdma_mode (ndmas %d)", ndmas));
791 
792 	rbr_rings = rx_rbr_rings->rbr_rings;
793 
794 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
795 
796 	for (i = 0; i < ndmas; i++) {
797 		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
798 			continue;
799 		}
800 		channel = rbr_rings[i]->rdc;
801 		if (enable) {
802 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
803 			    "==> hxge_rxdma_hw_mode: channel %d (enable)",
804 			    channel));
805 			rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
806 		} else {
807 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
808 			    "==> hxge_rxdma_hw_mode: channel %d (disable)",
809 			    channel));
810 			rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
811 		}
812 	}
813 
814 	status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
815 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
816 	    "<== hxge_rxdma_hw_mode: status 0x%x", status));
817 
818 	return (status);
819 }
820 
821 int
822 hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel)
823 {
824 	int			i, ndmas;
825 	uint16_t		rdc;
826 	p_rx_rbr_rings_t 	rx_rbr_rings;
827 	p_rx_rbr_ring_t		*rbr_rings;
828 
829 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
830 	    "==> hxge_rxdma_get_ring_index: channel %d", channel));
831 
832 	rx_rbr_rings = hxgep->rx_rbr_rings;
833 	if (rx_rbr_rings == NULL) {
834 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
835 		    "<== hxge_rxdma_get_ring_index: NULL ring pointer"));
836 		return (-1);
837 	}
838 
839 	ndmas = rx_rbr_rings->ndmas;
840 	if (!ndmas) {
841 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
842 		    "<== hxge_rxdma_get_ring_index: no channel"));
843 		return (-1);
844 	}
845 
846 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
847 	    "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas));
848 
849 	rbr_rings = rx_rbr_rings->rbr_rings;
850 	for (i = 0; i < ndmas; i++) {
851 		rdc = rbr_rings[i]->rdc;
852 		if (channel == rdc) {
853 			HXGE_DEBUG_MSG((hxgep, RX_CTL,
854 			    "==> hxge_rxdma_get_rbr_ring: "
855 			    "channel %d (index %d) "
856 			    "ring %d", channel, i, rbr_rings[i]));
857 
858 			return (i);
859 		}
860 	}
861 
862 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
863 	    "<== hxge_rxdma_get_rbr_ring_index: not found"));
864 
865 	return (-1);
866 }
867 
868 /*
869  * Static functions start here.
870  */
871 static p_rx_msg_t
872 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p)
873 {
874 	p_rx_msg_t		hxge_mp = NULL;
875 	p_hxge_dma_common_t	dmamsg_p;
876 	uchar_t			*buffer;
877 
878 	hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
879 	if (hxge_mp == NULL) {
880 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
881 		    "Allocation of a rx msg failed."));
882 		goto hxge_allocb_exit;
883 	}
884 
885 	hxge_mp->use_buf_pool = B_FALSE;
886 	if (dmabuf_p) {
887 		hxge_mp->use_buf_pool = B_TRUE;
888 
889 		dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma;
890 		*dmamsg_p = *dmabuf_p;
891 		dmamsg_p->nblocks = 1;
892 		dmamsg_p->block_size = size;
893 		dmamsg_p->alength = size;
894 		buffer = (uchar_t *)dmabuf_p->kaddrp;
895 
896 		dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size);
897 		dmabuf_p->ioaddr_pp = (void *)
898 		    ((char *)dmabuf_p->ioaddr_pp + size);
899 
900 		dmabuf_p->alength -= size;
901 		dmabuf_p->offset += size;
902 		dmabuf_p->dma_cookie.dmac_laddress += size;
903 		dmabuf_p->dma_cookie.dmac_size -= size;
904 	} else {
905 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
906 		if (buffer == NULL) {
907 			HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
908 			    "Allocation of a receive page failed."));
909 			goto hxge_allocb_fail1;
910 		}
911 	}
912 
913 	hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb);
914 	if (hxge_mp->rx_mblk_p == NULL) {
915 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed."));
916 		goto hxge_allocb_fail2;
917 	}
918 	hxge_mp->buffer = buffer;
919 	hxge_mp->block_size = size;
920 	hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb;
921 	hxge_mp->freeb.free_arg = (caddr_t)hxge_mp;
922 	hxge_mp->ref_cnt = 1;
923 	hxge_mp->free = B_TRUE;
924 	hxge_mp->rx_use_bcopy = B_FALSE;
925 
926 	atomic_inc_32(&hxge_mblks_pending);
927 
928 	goto hxge_allocb_exit;
929 
930 hxge_allocb_fail2:
931 	if (!hxge_mp->use_buf_pool) {
932 		KMEM_FREE(buffer, size);
933 	}
934 hxge_allocb_fail1:
935 	KMEM_FREE(hxge_mp, sizeof (rx_msg_t));
936 	hxge_mp = NULL;
937 
938 hxge_allocb_exit:
939 	return (hxge_mp);
940 }
941 
942 p_mblk_t
943 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
944 {
945 	p_mblk_t mp;
946 
947 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb"));
948 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p "
949 	    "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size));
950 
951 	mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb);
952 	if (mp == NULL) {
953 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
954 		goto hxge_dupb_exit;
955 	}
956 
957 	atomic_inc_32(&hxge_mp->ref_cnt);
958 
959 hxge_dupb_exit:
960 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
961 	return (mp);
962 }
963 
964 p_mblk_t
965 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
966 {
967 	p_mblk_t	mp;
968 	uchar_t		*dp;
969 
970 	mp = allocb(size + HXGE_RXBUF_EXTRA, 0);
971 	if (mp == NULL) {
972 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
973 		goto hxge_dupb_bcopy_exit;
974 	}
975 	dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA;
976 	bcopy((void *) &hxge_mp->buffer[offset], dp, size);
977 	mp->b_wptr = dp + size;
978 
979 hxge_dupb_bcopy_exit:
980 
981 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
982 
983 	return (mp);
984 }
985 
986 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p,
987     p_rx_msg_t rx_msg_p);
988 
989 void
990 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
991 {
992 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page"));
993 
994 	/* Reuse this buffer */
995 	rx_msg_p->free = B_FALSE;
996 	rx_msg_p->cur_usage_cnt = 0;
997 	rx_msg_p->max_usage_cnt = 0;
998 	rx_msg_p->pkt_buf_size = 0;
999 
1000 	if (rx_rbr_p->rbr_use_bcopy) {
1001 		rx_msg_p->rx_use_bcopy = B_FALSE;
1002 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
1003 	}
1004 
1005 	/*
1006 	 * Get the rbr header pointer and its offset index.
1007 	 */
1008 	MUTEX_ENTER(&rx_rbr_p->post_lock);
1009 	rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
1010 	    rx_rbr_p->rbr_wrap_mask);
1011 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1012 	MUTEX_EXIT(&rx_rbr_p->post_lock);
1013 
1014 	hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1);
1015 
1016 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1017 	    "<== hxge_post_page (channel %d post_next_index %d)",
1018 	    rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1019 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page"));
1020 }
1021 
1022 void
1023 hxge_freeb(p_rx_msg_t rx_msg_p)
1024 {
1025 	size_t		size;
1026 	uchar_t		*buffer = NULL;
1027 	int		ref_cnt;
1028 	boolean_t	free_state = B_FALSE;
1029 	rx_rbr_ring_t	*ring = rx_msg_p->rx_rbr_p;
1030 
1031 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb"));
1032 	HXGE_DEBUG_MSG((NULL, MEM2_CTL,
1033 	    "hxge_freeb:rx_msg_p = $%p (block pending %d)",
1034 	    rx_msg_p, hxge_mblks_pending));
1035 
1036 	/*
1037 	 * First we need to get the free state, then
1038 	 * atomic decrement the reference count to prevent
1039 	 * the race condition with the interrupt thread that
1040 	 * is processing a loaned up buffer block.
1041 	 */
1042 	free_state = rx_msg_p->free;
1043 	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
1044 	if (!ref_cnt) {
1045 		atomic_dec_32(&hxge_mblks_pending);
1046 
1047 		buffer = rx_msg_p->buffer;
1048 		size = rx_msg_p->block_size;
1049 
1050 		HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: "
1051 		    "will free: rx_msg_p = $%p (block pending %d)",
1052 		    rx_msg_p, hxge_mblks_pending));
1053 
1054 		if (!rx_msg_p->use_buf_pool) {
1055 			KMEM_FREE(buffer, size);
1056 		}
1057 
1058 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1059 		if (ring) {
1060 			/*
1061 			 * Decrement the receive buffer ring's reference
1062 			 * count, too.
1063 			 */
1064 			atomic_dec_32(&ring->rbr_ref_cnt);
1065 
1066 			/*
1067 			 * Free the receive buffer ring, iff
1068 			 * 1. all the receive buffers have been freed
1069 			 * 2. and we are in the proper state (that is,
1070 			 *    we are not UNMAPPING).
1071 			 */
1072 			if (ring->rbr_ref_cnt == 0 &&
1073 			    ring->rbr_state == RBR_UNMAPPED) {
1074 				KMEM_FREE(ring, sizeof (*ring));
1075 			}
1076 		}
1077 		goto hxge_freeb_exit;
1078 	}
1079 
1080 	/*
1081 	 * Repost buffer.
1082 	 */
1083 	if ((ring != NULL) && free_state && (ref_cnt == 1)) {
1084 		HXGE_DEBUG_MSG((NULL, RX_CTL,
1085 		    "hxge_freeb: post page $%p:", rx_msg_p));
1086 		if (ring->rbr_state == RBR_POSTING)
1087 			hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p);
1088 	}
1089 
1090 hxge_freeb_exit:
1091 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb"));
1092 }
1093 
1094 uint_t
1095 hxge_rx_intr(caddr_t arg1, caddr_t arg2)
1096 {
1097 	p_hxge_ldv_t		ldvp = (p_hxge_ldv_t)arg1;
1098 	p_hxge_t		hxgep = (p_hxge_t)arg2;
1099 	p_hxge_ldg_t		ldgp;
1100 	uint8_t			channel;
1101 	hpi_handle_t		handle;
1102 	rdc_stat_t		cs;
1103 	uint_t			serviced = DDI_INTR_UNCLAIMED;
1104 
1105 	if (ldvp == NULL) {
1106 		HXGE_DEBUG_MSG((NULL, RX_INT_CTL,
1107 		    "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1108 		return (DDI_INTR_UNCLAIMED);
1109 	}
1110 
1111 	if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
1112 		hxgep = ldvp->hxgep;
1113 	}
1114 
1115 	/*
1116 	 * If the interface is not started, just swallow the interrupt
1117 	 * for the logical device and don't rearm it.
1118 	 */
1119 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED)
1120 		return (DDI_INTR_CLAIMED);
1121 
1122 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1123 	    "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1124 
1125 	/*
1126 	 * This interrupt handler is for a specific receive dma channel.
1127 	 */
1128 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1129 
1130 	/*
1131 	 * Get the control and status for this channel.
1132 	 */
1133 	channel = ldvp->channel;
1134 	ldgp = ldvp->ldgp;
1135 	RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
1136 
1137 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_intr:channel %d "
1138 	    "cs 0x%016llx rcrto 0x%x rcrthres %x",
1139 	    channel, cs.value, cs.bits.rcr_to, cs.bits.rcr_thres));
1140 
1141 	hxge_rx_pkts_vring(hxgep, ldvp->vdma_index, ldvp, cs);
1142 	serviced = DDI_INTR_CLAIMED;
1143 
1144 	/* error events. */
1145 	if (cs.value & RDC_STAT_ERROR) {
1146 		(void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
1147 	}
1148 
1149 hxge_intr_exit:
1150 	/*
1151 	 * Enable the mailbox update interrupt if we want to use mailbox. We
1152 	 * probably don't need to use mailbox as it only saves us one pio read.
1153 	 * Also write 1 to rcrthres and rcrto to clear these two edge triggered
1154 	 * bits.
1155 	 */
1156 	cs.value &= RDC_STAT_WR1C;
1157 	cs.bits.mex = 1;
1158 	cs.bits.ptrread = 0;
1159 	cs.bits.pktread = 0;
1160 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1161 
1162 	/*
1163 	 * Rearm this logical group if this is a single device group.
1164 	 */
1165 	if (ldgp->nldvs == 1) {
1166 		ld_intr_mgmt_t mgm;
1167 
1168 		mgm.value = 0;
1169 		mgm.bits.arm = 1;
1170 		mgm.bits.timer = ldgp->ldg_timer;
1171 		HXGE_REG_WR32(handle,
1172 		    LD_INTR_MGMT + LDSV_OFFSET(ldgp->ldg), mgm.value);
1173 	}
1174 
1175 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1176 	    "<== hxge_rx_intr: serviced %d", serviced));
1177 
1178 	return (serviced);
1179 }
1180 
1181 static void
1182 hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
1183     rdc_stat_t cs)
1184 {
1185 	p_mblk_t		mp;
1186 	p_rx_rcr_ring_t		rcrp;
1187 
1188 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts_vring"));
1189 	if ((mp = hxge_rx_pkts(hxgep, vindex, ldvp, &rcrp, cs)) == NULL) {
1190 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1191 		    "<== hxge_rx_pkts_vring: no mp"));
1192 		return;
1193 	}
1194 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts_vring: $%p", mp));
1195 
1196 #ifdef  HXGE_DEBUG
1197 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1198 	    "==> hxge_rx_pkts_vring:calling mac_rx (NEMO) "
1199 	    "LEN %d mp $%p mp->b_next $%p rcrp $%p "
1200 	    "mac_handle $%p",
1201 	    (mp->b_wptr - mp->b_rptr), mp, mp->b_next,
1202 	    rcrp, rcrp->rcr_mac_handle));
1203 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1204 	    "==> hxge_rx_pkts_vring: dump packets "
1205 	    "(mp $%p b_rptr $%p b_wptr $%p):\n %s",
1206 	    mp, mp->b_rptr, mp->b_wptr,
1207 	    hxge_dump_packet((char *)mp->b_rptr, 64)));
1208 
1209 	if (mp->b_cont) {
1210 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1211 		    "==> hxge_rx_pkts_vring: dump b_cont packets "
1212 		    "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
1213 		    mp->b_cont, mp->b_cont->b_rptr, mp->b_cont->b_wptr,
1214 		    hxge_dump_packet((char *)mp->b_cont->b_rptr,
1215 		    mp->b_cont->b_wptr - mp->b_cont->b_rptr)));
1216 		}
1217 	if (mp->b_next) {
1218 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1219 		    "==> hxge_rx_pkts_vring: dump next packets "
1220 		    "(b_rptr $%p): %s",
1221 		    mp->b_next->b_rptr,
1222 		    hxge_dump_packet((char *)mp->b_next->b_rptr, 64)));
1223 	}
1224 #endif
1225 
1226 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1227 	    "==> hxge_rx_pkts_vring: send packet to stack"));
1228 	mac_rx(hxgep->mach, rcrp->rcr_mac_handle, mp);
1229 
1230 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_pkts_vring"));
1231 }
1232 
1233 /*ARGSUSED*/
1234 mblk_t *
1235 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
1236     p_rx_rcr_ring_t *rcrp, rdc_stat_t cs)
1237 {
1238 	hpi_handle_t		handle;
1239 	uint8_t			channel;
1240 	p_rx_rcr_rings_t	rx_rcr_rings;
1241 	p_rx_rcr_ring_t		rcr_p;
1242 	uint32_t		comp_rd_index;
1243 	p_rcr_entry_t		rcr_desc_rd_head_p;
1244 	p_rcr_entry_t		rcr_desc_rd_head_pp;
1245 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
1246 	uint16_t		qlen, nrcr_read, npkt_read;
1247 	uint32_t		qlen_hw, qlen_sw;
1248 	uint32_t		invalid_rcr_entry;
1249 	boolean_t		multi;
1250 	rdc_rcr_cfg_b_t		rcr_cfg_b;
1251 	p_rx_mbox_t		rx_mboxp;
1252 	p_rxdma_mailbox_t	mboxp;
1253 	uint64_t		rcr_head_index, rcr_tail_index;
1254 	uint64_t		rcr_tail;
1255 	uint64_t		value;
1256 	rdc_rcr_tail_t		rcr_tail_reg;
1257 	p_hxge_rx_ring_stats_t	rdc_stats;
1258 
1259 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d "
1260 	    "channel %d", vindex, ldvp->channel));
1261 
1262 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1263 		return (NULL);
1264 	}
1265 
1266 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1267 	rx_rcr_rings = hxgep->rx_rcr_rings;
1268 	rcr_p = rx_rcr_rings->rcr_rings[vindex];
1269 	channel = rcr_p->rdc;
1270 	if (channel != ldvp->channel) {
1271 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d "
1272 		    "channel %d, and rcr channel %d not matched.",
1273 		    vindex, ldvp->channel, channel));
1274 		return (NULL);
1275 	}
1276 
1277 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1278 	    "==> hxge_rx_pkts: START: rcr channel %d "
1279 	    "head_p $%p head_pp $%p  index %d ",
1280 	    channel, rcr_p->rcr_desc_rd_head_p,
1281 	    rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index));
1282 
1283 	rx_mboxp = hxgep->rx_mbox_areas_p->rxmbox_areas[channel];
1284 	mboxp = (p_rxdma_mailbox_t)rx_mboxp->rx_mbox.kaddrp;
1285 
1286 	(void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
1287 	RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value);
1288 	rcr_tail = rcr_tail_reg.bits.tail;
1289 
1290 	if (!qlen) {
1291 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1292 		    "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)",
1293 		    channel, qlen));
1294 		return (NULL);
1295 	}
1296 
1297 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d "
1298 	    "qlen %d", channel, qlen));
1299 
1300 	comp_rd_index = rcr_p->comp_rd_index;
1301 
1302 	rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
1303 	rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
1304 	nrcr_read = npkt_read = 0;
1305 
1306 	/*
1307 	 * Number of packets queued (The jumbo or multi packet will be counted
1308 	 * as only one paccket and it may take up more than one completion
1309 	 * entry).
1310 	 */
1311 	qlen_hw = (qlen < hxge_max_rx_pkts) ? qlen : hxge_max_rx_pkts;
1312 	head_mp = NULL;
1313 	tail_mp = &head_mp;
1314 	nmp = mp_cont = NULL;
1315 	multi = B_FALSE;
1316 
1317 	rcr_head_index = rcr_p->rcr_desc_rd_head_p - rcr_p->rcr_desc_first_p;
1318 	rcr_tail_index = rcr_tail - rcr_p->rcr_tail_begin;
1319 
1320 	if (rcr_tail_index >= rcr_head_index) {
1321 		qlen_sw = rcr_tail_index - rcr_head_index;
1322 	} else {
1323 		/* rcr_tail has wrapped around */
1324 		qlen_sw = (rcr_p->comp_size - rcr_head_index) + rcr_tail_index;
1325 	}
1326 
1327 	if (qlen_hw > qlen_sw) {
1328 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1329 		    "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n",
1330 		    channel, qlen_hw, qlen_sw));
1331 		qlen_hw = qlen_sw;
1332 	}
1333 
1334 	while (qlen_hw) {
1335 #ifdef HXGE_DEBUG
1336 		hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p);
1337 #endif
1338 		/*
1339 		 * Process one completion ring entry.
1340 		 */
1341 		invalid_rcr_entry = 0;
1342 		hxge_receive_packet(hxgep,
1343 		    rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont,
1344 		    &invalid_rcr_entry);
1345 		if (invalid_rcr_entry != 0) {
1346 			rdc_stats = rcr_p->rdc_stats;
1347 			rdc_stats->rcr_invalids++;
1348 			HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1349 			    "Channel %d could only read 0x%x packets, "
1350 			    "but 0x%x pending\n", channel, npkt_read, qlen_hw));
1351 			break;
1352 		}
1353 
1354 		/*
1355 		 * message chaining modes (nemo msg chaining)
1356 		 */
1357 		if (nmp) {
1358 			nmp->b_next = NULL;
1359 			if (!multi && !mp_cont) { /* frame fits a partition */
1360 				*tail_mp = nmp;
1361 				tail_mp = &nmp->b_next;
1362 				nmp = NULL;
1363 			} else if (multi && !mp_cont) { /* first segment */
1364 				*tail_mp = nmp;
1365 				tail_mp = &nmp->b_cont;
1366 			} else if (multi && mp_cont) {	/* mid of multi segs */
1367 				*tail_mp = mp_cont;
1368 				tail_mp = &mp_cont->b_cont;
1369 			} else if (!multi && mp_cont) { /* last segment */
1370 				*tail_mp = mp_cont;
1371 				tail_mp = &nmp->b_next;
1372 				nmp = NULL;
1373 			}
1374 		}
1375 
1376 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1377 		    "==> hxge_rx_pkts: loop: rcr channel %d "
1378 		    "before updating: multi %d "
1379 		    "nrcr_read %d "
1380 		    "npk read %d "
1381 		    "head_pp $%p  index %d ",
1382 		    channel, multi,
1383 		    nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index));
1384 
1385 		if (!multi) {
1386 			qlen_hw--;
1387 			npkt_read++;
1388 		}
1389 
1390 		/*
1391 		 * Update the next read entry.
1392 		 */
1393 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
1394 		    rcr_p->comp_wrap_mask);
1395 
1396 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
1397 		    rcr_p->rcr_desc_first_p, rcr_p->rcr_desc_last_p);
1398 
1399 		nrcr_read++;
1400 
1401 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1402 		    "<== hxge_rx_pkts: (SAM, process one packet) "
1403 		    "nrcr_read %d", nrcr_read));
1404 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1405 		    "==> hxge_rx_pkts: loop: rcr channel %d "
1406 		    "multi %d nrcr_read %d npk read %d head_pp $%p  index %d ",
1407 		    channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp,
1408 		    comp_rd_index));
1409 	}
1410 
1411 	rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
1412 	rcr_p->comp_rd_index = comp_rd_index;
1413 	rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
1414 
1415 	/* Adjust the mailbox queue length for a hardware bug workaround */
1416 	mboxp->rcrstat_a.bits.qlen -= npkt_read;
1417 
1418 	if ((hxgep->intr_timeout != rcr_p->intr_timeout) ||
1419 	    (hxgep->intr_threshold != rcr_p->intr_threshold)) {
1420 		rcr_p->intr_timeout = hxgep->intr_timeout;
1421 		rcr_p->intr_threshold = hxgep->intr_threshold;
1422 		rcr_cfg_b.value = 0x0ULL;
1423 		if (rcr_p->intr_timeout)
1424 			rcr_cfg_b.bits.entout = 1;
1425 		rcr_cfg_b.bits.timeout = rcr_p->intr_timeout;
1426 		rcr_cfg_b.bits.pthres = rcr_p->intr_threshold;
1427 		RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B,
1428 		    channel, rcr_cfg_b.value);
1429 	}
1430 
1431 	cs.bits.pktread = npkt_read;
1432 	cs.bits.ptrread = nrcr_read;
1433 	value = cs.value;
1434 	cs.value &= 0xffffffffULL;
1435 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1436 
1437 	cs.value = value & ~0xffffffffULL;
1438 	cs.bits.pktread = 0;
1439 	cs.bits.ptrread = 0;
1440 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1441 
1442 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1443 	    "==> hxge_rx_pkts: EXIT: rcr channel %d "
1444 	    "head_pp $%p  index %016llx ",
1445 	    channel, rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index));
1446 
1447 	/*
1448 	 * Update RCR buffer pointer read and number of packets read.
1449 	 */
1450 
1451 	*rcrp = rcr_p;
1452 
1453 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts"));
1454 
1455 	return (head_mp);
1456 }
1457 
1458 #define	RCR_ENTRY_PATTERN	0x5a5a6b6b7c7c8d8dULL
1459 
1460 /*ARGSUSED*/
1461 void
1462 hxge_receive_packet(p_hxge_t hxgep,
1463     p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
1464     boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont,
1465     uint32_t *invalid_rcr_entry)
1466 {
1467 	p_mblk_t		nmp = NULL;
1468 	uint64_t		multi;
1469 	uint8_t			channel;
1470 
1471 	boolean_t first_entry = B_TRUE;
1472 	boolean_t is_tcp_udp = B_FALSE;
1473 	boolean_t buffer_free = B_FALSE;
1474 	boolean_t error_send_up = B_FALSE;
1475 	uint8_t error_type;
1476 	uint16_t l2_len;
1477 	uint16_t skip_len;
1478 	uint8_t pktbufsz_type;
1479 	uint64_t rcr_entry;
1480 	uint64_t *pkt_buf_addr_pp;
1481 	uint64_t *pkt_buf_addr_p;
1482 	uint32_t buf_offset;
1483 	uint32_t bsize;
1484 	uint32_t msg_index;
1485 	p_rx_rbr_ring_t rx_rbr_p;
1486 	p_rx_msg_t *rx_msg_ring_p;
1487 	p_rx_msg_t rx_msg_p;
1488 
1489 	uint16_t sw_offset_bytes = 0, hdr_size = 0;
1490 	hxge_status_t status = HXGE_OK;
1491 	boolean_t is_valid = B_FALSE;
1492 	p_hxge_rx_ring_stats_t rdc_stats;
1493 	uint32_t bytes_read;
1494 
1495 	uint64_t pkt_type;
1496 
1497 	channel = rcr_p->rdc;
1498 
1499 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet"));
1500 
1501 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
1502 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
1503 
1504 	/* Verify the content of the rcr_entry for a hardware bug workaround */
1505 	if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) {
1506 		*invalid_rcr_entry = 1;
1507 		HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet "
1508 		    "Channel %d invalid RCR entry 0x%llx found, returning\n",
1509 		    channel, (long long) rcr_entry));
1510 		return;
1511 	}
1512 	*((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN;
1513 
1514 	multi = (rcr_entry & RCR_MULTI_MASK);
1515 	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
1516 
1517 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
1518 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
1519 
1520 	/*
1521 	 * Hardware does not strip the CRC due bug ID 11451 where
1522 	 * the hardware mis handles minimum size packets.
1523 	 */
1524 	l2_len -= ETHERFCSL;
1525 
1526 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
1527 	    RCR_PKTBUFSZ_SHIFT);
1528 #if defined(__i386)
1529 	pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
1530 	    RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
1531 #else
1532 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
1533 	    RCR_PKT_BUF_ADDR_SHIFT);
1534 #endif
1535 
1536 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1537 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1538 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
1539 	    "error_type 0x%x pkt_type 0x%x  "
1540 	    "pktbufsz_type %d ",
1541 	    rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len,
1542 	    multi, error_type, pkt_type, pktbufsz_type));
1543 
1544 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1545 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1546 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
1547 	    "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
1548 	    rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type, pkt_type));
1549 
1550 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1551 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1552 	    "full pkt_buf_addr_pp $%p l2_len %d",
1553 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1554 
1555 	/* get the stats ptr */
1556 	rdc_stats = rcr_p->rdc_stats;
1557 
1558 	if (!l2_len) {
1559 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1560 		    "<== hxge_receive_packet: failed: l2 length is 0."));
1561 		return;
1562 	}
1563 
1564 	/* shift 6 bits to get the full io address */
1565 #if defined(__i386)
1566 	pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
1567 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
1568 #else
1569 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
1570 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
1571 #endif
1572 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1573 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1574 	    "full pkt_buf_addr_pp $%p l2_len %d",
1575 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1576 
1577 	rx_rbr_p = rcr_p->rx_rbr_p;
1578 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
1579 
1580 	if (first_entry) {
1581 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
1582 		    RXDMA_HDR_SIZE_DEFAULT);
1583 
1584 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1585 		    "==> hxge_receive_packet: first entry 0x%016llx "
1586 		    "pkt_buf_addr_pp $%p l2_len %d hdr %d",
1587 		    rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size));
1588 	}
1589 
1590 	MUTEX_ENTER(&rcr_p->lock);
1591 	MUTEX_ENTER(&rx_rbr_p->lock);
1592 
1593 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1594 	    "==> (rbr 1) hxge_receive_packet: entry 0x%0llx "
1595 	    "full pkt_buf_addr_pp $%p l2_len %d",
1596 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1597 
1598 	/*
1599 	 * Packet buffer address in the completion entry points to the starting
1600 	 * buffer address (offset 0). Use the starting buffer address to locate
1601 	 * the corresponding kernel address.
1602 	 */
1603 	status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p,
1604 	    pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
1605 	    &buf_offset, &msg_index);
1606 
1607 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1608 	    "==> (rbr 2) hxge_receive_packet: entry 0x%0llx "
1609 	    "full pkt_buf_addr_pp $%p l2_len %d",
1610 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1611 
1612 	if (status != HXGE_OK) {
1613 		MUTEX_EXIT(&rx_rbr_p->lock);
1614 		MUTEX_EXIT(&rcr_p->lock);
1615 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1616 		    "<== hxge_receive_packet: found vaddr failed %d", status));
1617 		return;
1618 	}
1619 
1620 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1621 	    "==> (rbr 3) hxge_receive_packet: entry 0x%0llx "
1622 	    "full pkt_buf_addr_pp $%p l2_len %d",
1623 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1624 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1625 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1626 	    "full pkt_buf_addr_pp $%p l2_len %d",
1627 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1628 
1629 	if (msg_index >= rx_rbr_p->tnblocks) {
1630 		MUTEX_EXIT(&rx_rbr_p->lock);
1631 		MUTEX_EXIT(&rcr_p->lock);
1632 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1633 		    "==> hxge_receive_packet: FATAL msg_index (%d) "
1634 		    "should be smaller than tnblocks (%d)\n",
1635 		    msg_index, rx_rbr_p->tnblocks));
1636 		return;
1637 	}
1638 
1639 	rx_msg_p = rx_msg_ring_p[msg_index];
1640 
1641 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1642 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1643 	    "full pkt_buf_addr_pp $%p l2_len %d",
1644 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1645 
1646 	switch (pktbufsz_type) {
1647 	case RCR_PKTBUFSZ_0:
1648 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
1649 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1650 		    "==> hxge_receive_packet: 0 buf %d", bsize));
1651 		break;
1652 	case RCR_PKTBUFSZ_1:
1653 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
1654 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1655 		    "==> hxge_receive_packet: 1 buf %d", bsize));
1656 		break;
1657 	case RCR_PKTBUFSZ_2:
1658 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
1659 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1660 		    "==> hxge_receive_packet: 2 buf %d", bsize));
1661 		break;
1662 	case RCR_SINGLE_BLOCK:
1663 		bsize = rx_msg_p->block_size;
1664 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1665 		    "==> hxge_receive_packet: single %d", bsize));
1666 
1667 		break;
1668 	default:
1669 		MUTEX_EXIT(&rx_rbr_p->lock);
1670 		MUTEX_EXIT(&rcr_p->lock);
1671 		return;
1672 	}
1673 
1674 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
1675 	    (buf_offset + sw_offset_bytes), (hdr_size + l2_len),
1676 	    DDI_DMA_SYNC_FORCPU);
1677 
1678 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1679 	    "==> hxge_receive_packet: after first dump:usage count"));
1680 
1681 	if (rx_msg_p->cur_usage_cnt == 0) {
1682 		if (rx_rbr_p->rbr_use_bcopy) {
1683 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
1684 			if (rx_rbr_p->rbr_consumed <
1685 			    rx_rbr_p->rbr_threshold_hi) {
1686 				if (rx_rbr_p->rbr_threshold_lo == 0 ||
1687 				    ((rx_rbr_p->rbr_consumed >=
1688 				    rx_rbr_p->rbr_threshold_lo) &&
1689 				    (rx_rbr_p->rbr_bufsize_type >=
1690 				    pktbufsz_type))) {
1691 					rx_msg_p->rx_use_bcopy = B_TRUE;
1692 				}
1693 			} else {
1694 				rx_msg_p->rx_use_bcopy = B_TRUE;
1695 			}
1696 		}
1697 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1698 		    "==> hxge_receive_packet: buf %d (new block) ", bsize));
1699 
1700 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
1701 		rx_msg_p->pkt_buf_size = bsize;
1702 		rx_msg_p->cur_usage_cnt = 1;
1703 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
1704 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1705 			    "==> hxge_receive_packet: buf %d (single block) ",
1706 			    bsize));
1707 			/*
1708 			 * Buffer can be reused once the free function is
1709 			 * called.
1710 			 */
1711 			rx_msg_p->max_usage_cnt = 1;
1712 			buffer_free = B_TRUE;
1713 		} else {
1714 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize;
1715 			if (rx_msg_p->max_usage_cnt == 1) {
1716 				buffer_free = B_TRUE;
1717 			}
1718 		}
1719 	} else {
1720 		rx_msg_p->cur_usage_cnt++;
1721 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
1722 			buffer_free = B_TRUE;
1723 		}
1724 	}
1725 
1726 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1727 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
1728 	    msg_index, l2_len,
1729 	    rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
1730 
1731 	if (error_type) {
1732 		rdc_stats->ierrors++;
1733 		/* Update error stats */
1734 		rdc_stats->errlog.compl_err_type = error_type;
1735 		HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR);
1736 
1737 		if (error_type & RCR_CTRL_FIFO_DED) {
1738 			rdc_stats->ctrl_fifo_ecc_err++;
1739 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1740 			    " hxge_receive_packet: "
1741 			    " channel %d RCR ctrl_fifo_ded error", channel));
1742 		} else if (error_type & RCR_DATA_FIFO_DED) {
1743 			rdc_stats->data_fifo_ecc_err++;
1744 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1745 			    " hxge_receive_packet: channel %d"
1746 			    " RCR data_fifo_ded error", channel));
1747 		}
1748 
1749 		/*
1750 		 * Update and repost buffer block if max usage count is
1751 		 * reached.
1752 		 */
1753 		if (error_send_up == B_FALSE) {
1754 			atomic_inc_32(&rx_msg_p->ref_cnt);
1755 			if (buffer_free == B_TRUE) {
1756 				rx_msg_p->free = B_TRUE;
1757 			}
1758 
1759 			MUTEX_EXIT(&rx_rbr_p->lock);
1760 			MUTEX_EXIT(&rcr_p->lock);
1761 			hxge_freeb(rx_msg_p);
1762 			return;
1763 		}
1764 	}
1765 
1766 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1767 	    "==> hxge_receive_packet: DMA sync second "));
1768 
1769 	bytes_read = rcr_p->rcvd_pkt_bytes;
1770 	skip_len = sw_offset_bytes + hdr_size;
1771 	if (!rx_msg_p->rx_use_bcopy) {
1772 		/*
1773 		 * For loaned up buffers, the driver reference count
1774 		 * will be incremented first and then the free state.
1775 		 */
1776 		if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
1777 			if (first_entry) {
1778 				nmp->b_rptr = &nmp->b_rptr[skip_len];
1779 				if (l2_len < bsize - skip_len) {
1780 					nmp->b_wptr = &nmp->b_rptr[l2_len];
1781 				} else {
1782 					nmp->b_wptr = &nmp->b_rptr[bsize
1783 					    - skip_len];
1784 				}
1785 			} else {
1786 				if (l2_len - bytes_read < bsize) {
1787 					nmp->b_wptr =
1788 					    &nmp->b_rptr[l2_len - bytes_read];
1789 				} else {
1790 					nmp->b_wptr = &nmp->b_rptr[bsize];
1791 				}
1792 			}
1793 		}
1794 	} else {
1795 		if (first_entry) {
1796 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
1797 			    l2_len < bsize - skip_len ?
1798 			    l2_len : bsize - skip_len);
1799 		} else {
1800 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset,
1801 			    l2_len - bytes_read < bsize ?
1802 			    l2_len - bytes_read : bsize);
1803 		}
1804 	}
1805 
1806 	if (nmp != NULL) {
1807 		if (first_entry)
1808 			bytes_read  = nmp->b_wptr - nmp->b_rptr;
1809 		else
1810 			bytes_read += nmp->b_wptr - nmp->b_rptr;
1811 
1812 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1813 		    "==> hxge_receive_packet after dupb: "
1814 		    "rbr consumed %d "
1815 		    "pktbufsz_type %d "
1816 		    "nmp $%p rptr $%p wptr $%p "
1817 		    "buf_offset %d bzise %d l2_len %d skip_len %d",
1818 		    rx_rbr_p->rbr_consumed,
1819 		    pktbufsz_type,
1820 		    nmp, nmp->b_rptr, nmp->b_wptr,
1821 		    buf_offset, bsize, l2_len, skip_len));
1822 	} else {
1823 		cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)");
1824 
1825 		atomic_inc_32(&rx_msg_p->ref_cnt);
1826 		if (buffer_free == B_TRUE) {
1827 			rx_msg_p->free = B_TRUE;
1828 		}
1829 
1830 		MUTEX_EXIT(&rx_rbr_p->lock);
1831 		MUTEX_EXIT(&rcr_p->lock);
1832 		hxge_freeb(rx_msg_p);
1833 		return;
1834 	}
1835 
1836 	if (buffer_free == B_TRUE) {
1837 		rx_msg_p->free = B_TRUE;
1838 	}
1839 
1840 	/*
1841 	 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a
1842 	 * packet is not fragmented and no error bit is set, then L4 checksum
1843 	 * is OK.
1844 	 */
1845 	is_valid = (nmp != NULL);
1846 	if (first_entry) {
1847 		rdc_stats->ipackets++; /* count only 1st seg for jumbo */
1848 		if (l2_len > (STD_FRAME_SIZE - ETHERFCSL))
1849 			rdc_stats->jumbo_pkts++;
1850 		rdc_stats->ibytes += skip_len + l2_len < bsize ?
1851 		    l2_len : bsize;
1852 	} else {
1853 		/*
1854 		 * Add the current portion of the packet to the kstats.
1855 		 * The current portion of the packet is calculated by using
1856 		 * length of the packet and the previously received portion.
1857 		 */
1858 		rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ?
1859 		    l2_len - rcr_p->rcvd_pkt_bytes : bsize;
1860 	}
1861 
1862 	rcr_p->rcvd_pkt_bytes = bytes_read;
1863 
1864 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
1865 		atomic_inc_32(&rx_msg_p->ref_cnt);
1866 		MUTEX_EXIT(&rx_rbr_p->lock);
1867 		MUTEX_EXIT(&rcr_p->lock);
1868 		hxge_freeb(rx_msg_p);
1869 	} else {
1870 		MUTEX_EXIT(&rx_rbr_p->lock);
1871 		MUTEX_EXIT(&rcr_p->lock);
1872 	}
1873 
1874 	if (is_valid) {
1875 		nmp->b_cont = NULL;
1876 		if (first_entry) {
1877 			*mp = nmp;
1878 			*mp_cont = NULL;
1879 		} else {
1880 			*mp_cont = nmp;
1881 		}
1882 	}
1883 
1884 	/*
1885 	 * Update stats and hardware checksuming.
1886 	 */
1887 	if (is_valid && !multi) {
1888 		is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
1889 		    pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE);
1890 
1891 		HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_receive_packet: "
1892 		    "is_valid 0x%x multi %d pkt %d d error %d",
1893 		    is_valid, multi, is_tcp_udp, error_type));
1894 
1895 		if (is_tcp_udp && !error_type) {
1896 			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
1897 			    HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
1898 
1899 			HXGE_DEBUG_MSG((hxgep, RX_CTL,
1900 			    "==> hxge_receive_packet: Full tcp/udp cksum "
1901 			    "is_valid 0x%x multi %d pkt %d "
1902 			    "error %d",
1903 			    is_valid, multi, is_tcp_udp, error_type));
1904 		}
1905 	}
1906 
1907 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1908 	    "==> hxge_receive_packet: *mp 0x%016llx", *mp));
1909 
1910 	*multi_p = (multi == RCR_MULTI_MASK);
1911 
1912 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: "
1913 	    "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
1914 	    *multi_p, nmp, *mp, *mp_cont));
1915 }
1916 
1917 /*ARGSUSED*/
1918 static hxge_status_t
1919 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
1920     rdc_stat_t cs)
1921 {
1922 	p_hxge_rx_ring_stats_t	rdc_stats;
1923 	hpi_handle_t		handle;
1924 	boolean_t		rxchan_fatal = B_FALSE;
1925 	uint8_t			channel;
1926 	hxge_status_t		status = HXGE_OK;
1927 	uint64_t		cs_val;
1928 
1929 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts"));
1930 
1931 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1932 	channel = ldvp->channel;
1933 
1934 	/* Clear the interrupts */
1935 	cs.bits.pktread = 0;
1936 	cs.bits.ptrread = 0;
1937 	cs_val = cs.value & RDC_STAT_WR1C;
1938 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val);
1939 
1940 	rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index];
1941 
1942 	if (cs.bits.rbr_cpl_to) {
1943 		rdc_stats->rbr_tmout++;
1944 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1945 		    HXGE_FM_EREPORT_RDMC_RBR_CPL_TO);
1946 		rxchan_fatal = B_TRUE;
1947 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1948 		    "==> hxge_rx_err_evnts(channel %d): "
1949 		    "fatal error: rx_rbr_timeout", channel));
1950 	}
1951 
1952 	if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) {
1953 		(void) hpi_rxdma_ring_perr_stat_get(handle,
1954 		    &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par);
1955 	}
1956 
1957 	if (cs.bits.rcr_shadow_par_err) {
1958 		rdc_stats->rcr_sha_par++;
1959 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1960 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
1961 		rxchan_fatal = B_TRUE;
1962 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1963 		    "==> hxge_rx_err_evnts(channel %d): "
1964 		    "fatal error: rcr_shadow_par_err", channel));
1965 	}
1966 
1967 	if (cs.bits.rbr_prefetch_par_err) {
1968 		rdc_stats->rbr_pre_par++;
1969 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1970 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
1971 		rxchan_fatal = B_TRUE;
1972 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1973 		    "==> hxge_rx_err_evnts(channel %d): "
1974 		    "fatal error: rbr_prefetch_par_err", channel));
1975 	}
1976 
1977 	if (cs.bits.rbr_pre_empty) {
1978 		rdc_stats->rbr_pre_empty++;
1979 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1980 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY);
1981 		rxchan_fatal = B_TRUE;
1982 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1983 		    "==> hxge_rx_err_evnts(channel %d): "
1984 		    "fatal error: rbr_pre_empty", channel));
1985 	}
1986 
1987 	if (cs.bits.peu_resp_err) {
1988 		rdc_stats->peu_resp_err++;
1989 		HXGE_FM_REPORT_ERROR(hxgep, channel,
1990 		    HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR);
1991 		rxchan_fatal = B_TRUE;
1992 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1993 		    "==> hxge_rx_err_evnts(channel %d): "
1994 		    "fatal error: peu_resp_err", channel));
1995 	}
1996 
1997 	if (cs.bits.rcr_thres) {
1998 		rdc_stats->rcr_thres++;
1999 	}
2000 
2001 	if (cs.bits.rcr_to) {
2002 		rdc_stats->rcr_to++;
2003 	}
2004 
2005 	if (cs.bits.rcr_shadow_full) {
2006 		rdc_stats->rcr_shadow_full++;
2007 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2008 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL);
2009 		rxchan_fatal = B_TRUE;
2010 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2011 		    "==> hxge_rx_err_evnts(channel %d): "
2012 		    "fatal error: rcr_shadow_full", channel));
2013 	}
2014 
2015 	if (cs.bits.rcr_full) {
2016 		rdc_stats->rcrfull++;
2017 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2018 		    HXGE_FM_EREPORT_RDMC_RCRFULL);
2019 		rxchan_fatal = B_TRUE;
2020 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2021 		    "==> hxge_rx_err_evnts(channel %d): "
2022 		    "fatal error: rcrfull error", channel));
2023 	}
2024 
2025 	if (cs.bits.rbr_empty) {
2026 		rdc_stats->rbr_empty++;
2027 		if (rdc_stats->rbr_empty == 1) {
2028 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2029 			    "==> hxge_rx_err_evnts(channel %d): "
2030 			    "rbr empty error", channel));
2031 		}
2032 
2033 		/*
2034 		 * Wait for channel to be quiet.
2035 		 */
2036 		(void) hpi_rxdma_cfg_rdc_wait_for_qst(handle, channel);
2037 
2038 		/*
2039 		 * Re-enable the DMA.
2040 		 */
2041 		(void) hpi_rxdma_cfg_rdc_enable(handle, channel);
2042 	}
2043 
2044 	if (cs.bits.rbr_full) {
2045 		rdc_stats->rbrfull++;
2046 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2047 		    HXGE_FM_EREPORT_RDMC_RBRFULL);
2048 		rxchan_fatal = B_TRUE;
2049 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2050 		    "==> hxge_rx_err_evnts(channel %d): "
2051 		    "fatal error: rbr_full error", channel));
2052 	}
2053 
2054 	if (rxchan_fatal) {
2055 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2056 		    " hxge_rx_err_evnts: fatal error on Channel #%d\n",
2057 		    channel));
2058 		status = hxge_rxdma_fatal_err_recover(hxgep, channel);
2059 		if (status == HXGE_OK) {
2060 			FM_SERVICE_RESTORED(hxgep);
2061 		}
2062 	}
2063 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts"));
2064 
2065 	return (status);
2066 }
2067 
2068 static hxge_status_t
2069 hxge_map_rxdma(p_hxge_t hxgep)
2070 {
2071 	int			i, ndmas;
2072 	uint16_t		channel;
2073 	p_rx_rbr_rings_t	rx_rbr_rings;
2074 	p_rx_rbr_ring_t		*rbr_rings;
2075 	p_rx_rcr_rings_t	rx_rcr_rings;
2076 	p_rx_rcr_ring_t		*rcr_rings;
2077 	p_rx_mbox_areas_t	rx_mbox_areas_p;
2078 	p_rx_mbox_t		*rx_mbox_p;
2079 	p_hxge_dma_pool_t	dma_buf_poolp;
2080 	p_hxge_dma_common_t	*dma_buf_p;
2081 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
2082 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
2083 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
2084 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
2085 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
2086 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
2087 	uint32_t		*num_chunks;
2088 	hxge_status_t		status = HXGE_OK;
2089 
2090 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma"));
2091 
2092 	dma_buf_poolp = hxgep->rx_buf_pool_p;
2093 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
2094 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
2095 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
2096 
2097 	if (!dma_buf_poolp->buf_allocated ||
2098 	    !dma_rbr_cntl_poolp->buf_allocated ||
2099 	    !dma_rcr_cntl_poolp->buf_allocated ||
2100 	    !dma_mbox_cntl_poolp->buf_allocated) {
2101 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2102 		    "<== hxge_map_rxdma: buf not allocated"));
2103 		return (HXGE_ERROR);
2104 	}
2105 
2106 	ndmas = dma_buf_poolp->ndmas;
2107 	if (!ndmas) {
2108 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2109 		    "<== hxge_map_rxdma: no dma allocated"));
2110 		return (HXGE_ERROR);
2111 	}
2112 
2113 	num_chunks = dma_buf_poolp->num_chunks;
2114 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2115 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
2116 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
2117 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
2118 
2119 	rx_rbr_rings = (p_rx_rbr_rings_t)
2120 	    KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2121 	rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC(
2122 	    sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
2123 
2124 	rx_rcr_rings = (p_rx_rcr_rings_t)
2125 	    KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2126 	rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC(
2127 	    sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
2128 
2129 	rx_mbox_areas_p = (p_rx_mbox_areas_t)
2130 	    KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2131 	rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC(
2132 	    sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
2133 
2134 	/*
2135 	 * Timeout should be set based on the system clock divider.
2136 	 * The following timeout value of 1 assumes that the
2137 	 * granularity (1000) is 3 microseconds running at 300MHz.
2138 	 */
2139 
2140 	hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
2141 	hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
2142 
2143 	/*
2144 	 * Map descriptors from the buffer polls for each dam channel.
2145 	 */
2146 	for (i = 0; i < ndmas; i++) {
2147 		/*
2148 		 * Set up and prepare buffer blocks, descriptors and mailbox.
2149 		 */
2150 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2151 		status = hxge_map_rxdma_channel(hxgep, channel,
2152 		    (p_hxge_dma_common_t *)&dma_buf_p[i],
2153 		    (p_rx_rbr_ring_t *)&rbr_rings[i],
2154 		    num_chunks[i],
2155 		    (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i],
2156 		    (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i],
2157 		    (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i],
2158 		    (p_rx_rcr_ring_t *)&rcr_rings[i],
2159 		    (p_rx_mbox_t *)&rx_mbox_p[i]);
2160 		if (status != HXGE_OK) {
2161 			goto hxge_map_rxdma_fail1;
2162 		}
2163 		rbr_rings[i]->index = (uint16_t)i;
2164 		rcr_rings[i]->index = (uint16_t)i;
2165 		rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i];
2166 	}
2167 
2168 	rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
2169 	rx_rbr_rings->rbr_rings = rbr_rings;
2170 	hxgep->rx_rbr_rings = rx_rbr_rings;
2171 	rx_rcr_rings->rcr_rings = rcr_rings;
2172 	hxgep->rx_rcr_rings = rx_rcr_rings;
2173 
2174 	rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
2175 	hxgep->rx_mbox_areas_p = rx_mbox_areas_p;
2176 
2177 	goto hxge_map_rxdma_exit;
2178 
2179 hxge_map_rxdma_fail1:
2180 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2181 	    "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)",
2182 	    status, channel, i));
2183 	i--;
2184 	for (; i >= 0; i--) {
2185 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2186 		hxge_unmap_rxdma_channel(hxgep, channel,
2187 		    rbr_rings[i], rcr_rings[i], rx_mbox_p[i]);
2188 	}
2189 
2190 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2191 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2192 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2193 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2194 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2195 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2196 
2197 hxge_map_rxdma_exit:
2198 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2199 	    "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel));
2200 
2201 	return (status);
2202 }
2203 
2204 static void
2205 hxge_unmap_rxdma(p_hxge_t hxgep)
2206 {
2207 	int			i, ndmas;
2208 	uint16_t		channel;
2209 	p_rx_rbr_rings_t	rx_rbr_rings;
2210 	p_rx_rbr_ring_t		*rbr_rings;
2211 	p_rx_rcr_rings_t	rx_rcr_rings;
2212 	p_rx_rcr_ring_t		*rcr_rings;
2213 	p_rx_mbox_areas_t	rx_mbox_areas_p;
2214 	p_rx_mbox_t		*rx_mbox_p;
2215 	p_hxge_dma_pool_t	dma_buf_poolp;
2216 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
2217 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
2218 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
2219 	p_hxge_dma_common_t	*dma_buf_p;
2220 
2221 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma"));
2222 
2223 	dma_buf_poolp = hxgep->rx_buf_pool_p;
2224 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
2225 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
2226 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
2227 
2228 	if (!dma_buf_poolp->buf_allocated ||
2229 	    !dma_rbr_cntl_poolp->buf_allocated ||
2230 	    !dma_rcr_cntl_poolp->buf_allocated ||
2231 	    !dma_mbox_cntl_poolp->buf_allocated) {
2232 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2233 		    "<== hxge_unmap_rxdma: NULL buf pointers"));
2234 		return;
2235 	}
2236 
2237 	rx_rbr_rings = hxgep->rx_rbr_rings;
2238 	rx_rcr_rings = hxgep->rx_rcr_rings;
2239 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
2240 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2241 		    "<== hxge_unmap_rxdma: NULL pointers"));
2242 		return;
2243 	}
2244 
2245 	ndmas = rx_rbr_rings->ndmas;
2246 	if (!ndmas) {
2247 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2248 		    "<== hxge_unmap_rxdma: no channel"));
2249 		return;
2250 	}
2251 
2252 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2253 	    "==> hxge_unmap_rxdma (ndmas %d)", ndmas));
2254 
2255 	rbr_rings = rx_rbr_rings->rbr_rings;
2256 	rcr_rings = rx_rcr_rings->rcr_rings;
2257 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
2258 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
2259 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2260 
2261 	for (i = 0; i < ndmas; i++) {
2262 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2263 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2264 		    "==> hxge_unmap_rxdma (ndmas %d) channel %d",
2265 		    ndmas, channel));
2266 		(void) hxge_unmap_rxdma_channel(hxgep, channel,
2267 		    (p_rx_rbr_ring_t)rbr_rings[i],
2268 		    (p_rx_rcr_ring_t)rcr_rings[i],
2269 		    (p_rx_mbox_t)rx_mbox_p[i]);
2270 	}
2271 
2272 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2273 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2274 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2275 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2276 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2277 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2278 
2279 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma"));
2280 }
2281 
2282 hxge_status_t
2283 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2284     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
2285     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
2286     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
2287     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2288 {
2289 	int status = HXGE_OK;
2290 
2291 	/*
2292 	 * Set up and prepare buffer blocks, descriptors and mailbox.
2293 	 */
2294 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2295 	    "==> hxge_map_rxdma_channel (channel %d)", channel));
2296 
2297 	/*
2298 	 * Receive buffer blocks
2299 	 */
2300 	status = hxge_map_rxdma_channel_buf_ring(hxgep, channel,
2301 	    dma_buf_p, rbr_p, num_chunks);
2302 	if (status != HXGE_OK) {
2303 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2304 		    "==> hxge_map_rxdma_channel (channel %d): "
2305 		    "map buffer failed 0x%x", channel, status));
2306 		goto hxge_map_rxdma_channel_exit;
2307 	}
2308 
2309 	/*
2310 	 * Receive block ring, completion ring and mailbox.
2311 	 */
2312 	status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel,
2313 	    dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p,
2314 	    rbr_p, rcr_p, rx_mbox_p);
2315 	if (status != HXGE_OK) {
2316 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2317 		    "==> hxge_map_rxdma_channel (channel %d): "
2318 		    "map config failed 0x%x", channel, status));
2319 		goto hxge_map_rxdma_channel_fail2;
2320 	}
2321 	goto hxge_map_rxdma_channel_exit;
2322 
2323 hxge_map_rxdma_channel_fail3:
2324 	/* Free rbr, rcr */
2325 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2326 	    "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)",
2327 	    status, channel));
2328 	hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p);
2329 
2330 hxge_map_rxdma_channel_fail2:
2331 	/* Free buffer blocks */
2332 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2333 	    "==> hxge_map_rxdma_channel: free rx buffers"
2334 	    "(hxgep 0x%x status 0x%x channel %d)",
2335 	    hxgep, status, channel));
2336 	hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p);
2337 
2338 	status = HXGE_ERROR;
2339 
2340 hxge_map_rxdma_channel_exit:
2341 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2342 	    "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)",
2343 	    hxgep, status, channel));
2344 
2345 	return (status);
2346 }
2347 
2348 /*ARGSUSED*/
2349 static void
2350 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2351     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2352 {
2353 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2354 	    "==> hxge_unmap_rxdma_channel (channel %d)", channel));
2355 
2356 	/*
2357 	 * unmap receive block ring, completion ring and mailbox.
2358 	 */
2359 	(void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p);
2360 
2361 	/* unmap buffer blocks */
2362 	(void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p);
2363 
2364 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel"));
2365 }
2366 
2367 /*ARGSUSED*/
2368 static hxge_status_t
2369 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
2370     p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p,
2371     p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p,
2372     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2373 {
2374 	p_rx_rbr_ring_t 	rbrp;
2375 	p_rx_rcr_ring_t 	rcrp;
2376 	p_rx_mbox_t 		mboxp;
2377 	p_hxge_dma_common_t 	cntl_dmap;
2378 	p_hxge_dma_common_t 	dmap;
2379 	p_rx_msg_t 		*rx_msg_ring;
2380 	p_rx_msg_t 		rx_msg_p;
2381 	rdc_rbr_cfg_a_t		*rcfga_p;
2382 	rdc_rbr_cfg_b_t		*rcfgb_p;
2383 	rdc_rcr_cfg_a_t		*cfga_p;
2384 	rdc_rcr_cfg_b_t		*cfgb_p;
2385 	rdc_rx_cfg1_t		*cfig1_p;
2386 	rdc_rx_cfg2_t		*cfig2_p;
2387 	rdc_rbr_kick_t		*kick_p;
2388 	uint32_t		dmaaddrp;
2389 	uint32_t		*rbr_vaddrp;
2390 	uint32_t		bkaddr;
2391 	hxge_status_t		status = HXGE_OK;
2392 	int			i;
2393 	uint32_t 		hxge_port_rcr_size;
2394 
2395 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2396 	    "==> hxge_map_rxdma_channel_cfg_ring"));
2397 
2398 	cntl_dmap = *dma_rbr_cntl_p;
2399 
2400 	/*
2401 	 * Map in the receive block ring
2402 	 */
2403 	rbrp = *rbr_p;
2404 	dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc;
2405 	hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
2406 
2407 	/*
2408 	 * Zero out buffer block ring descriptors.
2409 	 */
2410 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2411 
2412 	rcfga_p = &(rbrp->rbr_cfga);
2413 	rcfgb_p = &(rbrp->rbr_cfgb);
2414 	kick_p = &(rbrp->rbr_kick);
2415 	rcfga_p->value = 0;
2416 	rcfgb_p->value = 0;
2417 	kick_p->value = 0;
2418 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
2419 	rcfga_p->value = (rbrp->rbr_addr &
2420 	    (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK));
2421 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
2422 
2423 	/* XXXX: how to choose packet buffer sizes */
2424 	rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0;
2425 	rcfgb_p->bits.vld0 = 1;
2426 	rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1;
2427 	rcfgb_p->bits.vld1 = 1;
2428 	rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2;
2429 	rcfgb_p->bits.vld2 = 1;
2430 	rcfgb_p->bits.bksize = hxgep->rx_bksize_code;
2431 
2432 	/*
2433 	 * For each buffer block, enter receive block address to the ring.
2434 	 */
2435 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
2436 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
2437 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2438 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2439 	    "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
2440 
2441 	rx_msg_ring = rbrp->rx_msg_ring;
2442 	for (i = 0; i < rbrp->tnblocks; i++) {
2443 		rx_msg_p = rx_msg_ring[i];
2444 		rx_msg_p->hxgep = hxgep;
2445 		rx_msg_p->rx_rbr_p = rbrp;
2446 		bkaddr = (uint32_t)
2447 		    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2448 		    RBR_BKADDR_SHIFT));
2449 		rx_msg_p->free = B_FALSE;
2450 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
2451 
2452 		*rbr_vaddrp++ = bkaddr;
2453 	}
2454 
2455 	kick_p->bits.bkadd = rbrp->rbb_max;
2456 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
2457 
2458 	rbrp->rbr_rd_index = 0;
2459 
2460 	rbrp->rbr_consumed = 0;
2461 	rbrp->rbr_use_bcopy = B_TRUE;
2462 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
2463 
2464 	/*
2465 	 * Do bcopy on packets greater than bcopy size once the lo threshold is
2466 	 * reached. This lo threshold should be less than the hi threshold.
2467 	 *
2468 	 * Do bcopy on every packet once the hi threshold is reached.
2469 	 */
2470 	if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) {
2471 		/* default it to use hi */
2472 		hxge_rx_threshold_lo = hxge_rx_threshold_hi;
2473 	}
2474 	if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) {
2475 		hxge_rx_buf_size_type = HXGE_RBR_TYPE2;
2476 	}
2477 	rbrp->rbr_bufsize_type = hxge_rx_buf_size_type;
2478 
2479 	switch (hxge_rx_threshold_hi) {
2480 	default:
2481 	case HXGE_RX_COPY_NONE:
2482 		/* Do not do bcopy at all */
2483 		rbrp->rbr_use_bcopy = B_FALSE;
2484 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
2485 		break;
2486 
2487 	case HXGE_RX_COPY_1:
2488 	case HXGE_RX_COPY_2:
2489 	case HXGE_RX_COPY_3:
2490 	case HXGE_RX_COPY_4:
2491 	case HXGE_RX_COPY_5:
2492 	case HXGE_RX_COPY_6:
2493 	case HXGE_RX_COPY_7:
2494 		rbrp->rbr_threshold_hi =
2495 		    rbrp->rbb_max * (hxge_rx_threshold_hi) /
2496 		    HXGE_RX_BCOPY_SCALE;
2497 		break;
2498 
2499 	case HXGE_RX_COPY_ALL:
2500 		rbrp->rbr_threshold_hi = 0;
2501 		break;
2502 	}
2503 
2504 	switch (hxge_rx_threshold_lo) {
2505 	default:
2506 	case HXGE_RX_COPY_NONE:
2507 		/* Do not do bcopy at all */
2508 		if (rbrp->rbr_use_bcopy) {
2509 			rbrp->rbr_use_bcopy = B_FALSE;
2510 		}
2511 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
2512 		break;
2513 
2514 	case HXGE_RX_COPY_1:
2515 	case HXGE_RX_COPY_2:
2516 	case HXGE_RX_COPY_3:
2517 	case HXGE_RX_COPY_4:
2518 	case HXGE_RX_COPY_5:
2519 	case HXGE_RX_COPY_6:
2520 	case HXGE_RX_COPY_7:
2521 		rbrp->rbr_threshold_lo =
2522 		    rbrp->rbb_max * (hxge_rx_threshold_lo) /
2523 		    HXGE_RX_BCOPY_SCALE;
2524 		break;
2525 
2526 	case HXGE_RX_COPY_ALL:
2527 		rbrp->rbr_threshold_lo = 0;
2528 		break;
2529 	}
2530 
2531 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
2532 	    "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d "
2533 	    "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d "
2534 	    "rbb_threshold_lo %d",
2535 	    dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type,
2536 	    rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo));
2537 
2538 	/* Map in the receive completion ring */
2539 	rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
2540 	rcrp->rdc = dma_channel;
2541 	rcrp->hxgep = hxgep;
2542 
2543 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
2544 	rcrp->comp_size = hxge_port_rcr_size;
2545 	rcrp->comp_wrap_mask = hxge_port_rcr_size - 1;
2546 
2547 	rcrp->max_receive_pkts = hxge_max_rx_pkts;
2548 
2549 	cntl_dmap = *dma_rcr_cntl_p;
2550 
2551 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
2552 	hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
2553 	    sizeof (rcr_entry_t));
2554 	rcrp->comp_rd_index = 0;
2555 	rcrp->comp_wt_index = 0;
2556 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
2557 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
2558 #if defined(__i386)
2559 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
2560 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
2561 #else
2562 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
2563 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
2564 #endif
2565 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
2566 	    (hxge_port_rcr_size - 1);
2567 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
2568 	    (hxge_port_rcr_size - 1);
2569 
2570 	rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
2571 	rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
2572 
2573 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2574 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2575 	    "rbr_vaddrp $%p rcr_desc_rd_head_p $%p "
2576 	    "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p "
2577 	    "rcr_desc_rd_last_pp $%p ",
2578 	    dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p,
2579 	    rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p,
2580 	    rcrp->rcr_desc_last_pp));
2581 
2582 	/*
2583 	 * Zero out buffer block ring descriptors.
2584 	 */
2585 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2586 	rcrp->intr_timeout = hxgep->intr_timeout;
2587 	rcrp->intr_threshold = hxgep->intr_threshold;
2588 	rcrp->full_hdr_flag = B_FALSE;
2589 	rcrp->sw_priv_hdr_len = 0;
2590 
2591 	cfga_p = &(rcrp->rcr_cfga);
2592 	cfgb_p = &(rcrp->rcr_cfgb);
2593 	cfga_p->value = 0;
2594 	cfgb_p->value = 0;
2595 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
2596 
2597 	cfga_p->value = (rcrp->rcr_addr &
2598 	    (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK));
2599 
2600 	cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF);
2601 
2602 	/*
2603 	 * Timeout should be set based on the system clock divider. The
2604 	 * following timeout value of 1 assumes that the granularity (1000) is
2605 	 * 3 microseconds running at 300MHz.
2606 	 */
2607 	cfgb_p->bits.pthres = rcrp->intr_threshold;
2608 	cfgb_p->bits.timeout = rcrp->intr_timeout;
2609 	cfgb_p->bits.entout = 1;
2610 
2611 	/* Map in the mailbox */
2612 	cntl_dmap = *dma_mbox_cntl_p;
2613 	mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
2614 	dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox;
2615 	hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
2616 	cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1;
2617 	cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2;
2618 	cfig1_p->value = cfig2_p->value = 0;
2619 
2620 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
2621 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2622 	    "==> hxge_map_rxdma_channel_cfg_ring: "
2623 	    "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
2624 	    dma_channel, cfig1_p->value, cfig2_p->value,
2625 	    mboxp->mbox_addr));
2626 
2627 	dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff);
2628 	cfig1_p->bits.mbaddr_h = dmaaddrp;
2629 
2630 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
2631 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
2632 	    RXDMA_CFIG2_MBADDR_L_MASK);
2633 
2634 	cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
2635 
2636 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2637 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p "
2638 	    "cfg1 0x%016llx cfig2 0x%016llx",
2639 	    dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value));
2640 
2641 	cfig2_p->bits.full_hdr = rcrp->full_hdr_flag;
2642 	cfig2_p->bits.offset = rcrp->sw_priv_hdr_len;
2643 
2644 	rbrp->rx_rcr_p = rcrp;
2645 	rcrp->rx_rbr_p = rbrp;
2646 	*rcr_p = rcrp;
2647 	*rx_mbox_p = mboxp;
2648 
2649 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2650 	    "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
2651 	return (status);
2652 }
2653 
2654 /*ARGSUSED*/
2655 static void
2656 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
2657     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2658 {
2659 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2660 	    "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc));
2661 
2662 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
2663 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
2664 
2665 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2666 	    "<== hxge_unmap_rxdma_channel_cfg_ring"));
2667 }
2668 
2669 static hxge_status_t
2670 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
2671     p_hxge_dma_common_t *dma_buf_p,
2672     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
2673 {
2674 	p_rx_rbr_ring_t		rbrp;
2675 	p_hxge_dma_common_t	dma_bufp, tmp_bufp;
2676 	p_rx_msg_t		*rx_msg_ring;
2677 	p_rx_msg_t		rx_msg_p;
2678 	p_mblk_t		mblk_p;
2679 
2680 	rxring_info_t *ring_info;
2681 	hxge_status_t status = HXGE_OK;
2682 	int i, j, index;
2683 	uint32_t size, bsize, nblocks, nmsgs;
2684 
2685 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2686 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel));
2687 
2688 	dma_bufp = tmp_bufp = *dma_buf_p;
2689 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2690 	    " hxge_map_rxdma_channel_buf_ring: channel %d to map %d "
2691 	    "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp));
2692 
2693 	nmsgs = 0;
2694 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2695 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2696 		    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2697 		    "bufp 0x%016llx nblocks %d nmsgs %d",
2698 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2699 		nmsgs += tmp_bufp->nblocks;
2700 	}
2701 	if (!nmsgs) {
2702 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2703 		    "<== hxge_map_rxdma_channel_buf_ring: channel %d "
2704 		    "no msg blocks", channel));
2705 		status = HXGE_ERROR;
2706 		goto hxge_map_rxdma_channel_buf_ring_exit;
2707 	}
2708 	rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
2709 
2710 	size = nmsgs * sizeof (p_rx_msg_t);
2711 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2712 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
2713 	    KM_SLEEP);
2714 
2715 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
2716 	    (void *) hxgep->interrupt_cookie);
2717 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
2718 	    (void *) hxgep->interrupt_cookie);
2719 
2720 	rbrp->rdc = channel;
2721 	rbrp->num_blocks = num_chunks;
2722 	rbrp->tnblocks = nmsgs;
2723 	rbrp->rbb_max = nmsgs;
2724 	rbrp->rbr_max_size = nmsgs;
2725 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
2726 
2727 	/*
2728 	 * Buffer sizes suggested by NIU architect. 256, 512 and 2K.
2729 	 */
2730 
2731 	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
2732 	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
2733 	rbrp->hpi_pkt_buf_size0 = SIZE_256B;
2734 
2735 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
2736 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
2737 	rbrp->hpi_pkt_buf_size1 = SIZE_1KB;
2738 
2739 	rbrp->block_size = hxgep->rx_default_block_size;
2740 
2741 	if (!hxgep->param_arr[param_accept_jumbo].value) {
2742 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
2743 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
2744 		rbrp->hpi_pkt_buf_size2 = SIZE_2KB;
2745 	} else {
2746 		rbrp->hpi_pkt_buf_size2 = SIZE_4KB;
2747 		rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
2748 		rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
2749 	}
2750 
2751 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2752 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2753 	    "actual rbr max %d rbb_max %d nmsgs %d "
2754 	    "rbrp->block_size %d default_block_size %d "
2755 	    "(config hxge_rbr_size %d hxge_rbr_spare_size %d)",
2756 	    channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
2757 	    rbrp->block_size, hxgep->rx_default_block_size,
2758 	    hxge_rbr_size, hxge_rbr_spare_size));
2759 
2760 	/*
2761 	 * Map in buffers from the buffer pool.
2762 	 * Note that num_blocks is the num_chunks. For Sparc, there is likely
2763 	 * only one chunk. For x86, there will be many chunks.
2764 	 * Loop over chunks.
2765 	 */
2766 	index = 0;
2767 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
2768 		bsize = dma_bufp->block_size;
2769 		nblocks = dma_bufp->nblocks;
2770 #if defined(__i386)
2771 		ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
2772 #else
2773 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
2774 #endif
2775 		ring_info->buffer[i].buf_index = i;
2776 		ring_info->buffer[i].buf_size = dma_bufp->alength;
2777 		ring_info->buffer[i].start_index = index;
2778 #if defined(__i386)
2779 		ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
2780 #else
2781 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
2782 #endif
2783 
2784 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2785 		    " hxge_map_rxdma_channel_buf_ring: map channel %d "
2786 		    "chunk %d nblocks %d chunk_size %x block_size 0x%x "
2787 		    "dma_bufp $%p dvma_addr $%p", channel, i,
2788 		    dma_bufp->nblocks,
2789 		    ring_info->buffer[i].buf_size, bsize, dma_bufp,
2790 		    ring_info->buffer[i].dvma_addr));
2791 
2792 		/* loop over blocks within a chunk */
2793 		for (j = 0; j < nblocks; j++) {
2794 			if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO,
2795 			    dma_bufp)) == NULL) {
2796 				HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2797 				    "allocb failed (index %d i %d j %d)",
2798 				    index, i, j));
2799 				goto hxge_map_rxdma_channel_buf_ring_fail1;
2800 			}
2801 			rx_msg_ring[index] = rx_msg_p;
2802 			rx_msg_p->block_index = index;
2803 			rx_msg_p->shifted_addr = (uint32_t)
2804 			    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2805 			    RBR_BKADDR_SHIFT));
2806 			/*
2807 			 * Too much output
2808 			 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2809 			 *	"index %d j %d rx_msg_p $%p mblk %p",
2810 			 *	index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
2811 			 */
2812 			mblk_p = rx_msg_p->rx_mblk_p;
2813 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
2814 
2815 			rbrp->rbr_ref_cnt++;
2816 			index++;
2817 			rx_msg_p->buf_dma.dma_channel = channel;
2818 		}
2819 	}
2820 	if (i < rbrp->num_blocks) {
2821 		goto hxge_map_rxdma_channel_buf_ring_fail1;
2822 	}
2823 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2824 	    "hxge_map_rxdma_channel_buf_ring: done buf init "
2825 	    "channel %d msg block entries %d", channel, index));
2826 	ring_info->block_size_mask = bsize - 1;
2827 	rbrp->rx_msg_ring = rx_msg_ring;
2828 	rbrp->dma_bufp = dma_buf_p;
2829 	rbrp->ring_info = ring_info;
2830 
2831 	status = hxge_rxbuf_index_info_init(hxgep, rbrp);
2832 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: "
2833 	    "channel %d done buf info init", channel));
2834 
2835 	/*
2836 	 * Finally, permit hxge_freeb() to call hxge_post_page().
2837 	 */
2838 	rbrp->rbr_state = RBR_POSTING;
2839 
2840 	*rbr_p = rbrp;
2841 
2842 	goto hxge_map_rxdma_channel_buf_ring_exit;
2843 
2844 hxge_map_rxdma_channel_buf_ring_fail1:
2845 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2846 	    " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
2847 	    channel, status));
2848 
2849 	index--;
2850 	for (; index >= 0; index--) {
2851 		rx_msg_p = rx_msg_ring[index];
2852 		if (rx_msg_p != NULL) {
2853 			hxge_freeb(rx_msg_p);
2854 			rx_msg_ring[index] = NULL;
2855 		}
2856 	}
2857 
2858 hxge_map_rxdma_channel_buf_ring_fail:
2859 	MUTEX_DESTROY(&rbrp->post_lock);
2860 	MUTEX_DESTROY(&rbrp->lock);
2861 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
2862 	KMEM_FREE(rx_msg_ring, size);
2863 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
2864 
2865 	status = HXGE_ERROR;
2866 
2867 hxge_map_rxdma_channel_buf_ring_exit:
2868 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2869 	    "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status));
2870 
2871 	return (status);
2872 }
2873 
2874 /*ARGSUSED*/
2875 static void
2876 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
2877     p_rx_rbr_ring_t rbr_p)
2878 {
2879 	p_rx_msg_t	*rx_msg_ring;
2880 	p_rx_msg_t	rx_msg_p;
2881 	rxring_info_t	*ring_info;
2882 	int		i;
2883 	uint32_t	size;
2884 
2885 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2886 	    "==> hxge_unmap_rxdma_channel_buf_ring"));
2887 	if (rbr_p == NULL) {
2888 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2889 		    "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
2890 		return;
2891 	}
2892 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2893 	    "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc));
2894 
2895 	rx_msg_ring = rbr_p->rx_msg_ring;
2896 	ring_info = rbr_p->ring_info;
2897 
2898 	if (rx_msg_ring == NULL || ring_info == NULL) {
2899 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2900 		    "<== hxge_unmap_rxdma_channel_buf_ring: "
2901 		    "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info));
2902 		return;
2903 	}
2904 
2905 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
2906 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2907 	    " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
2908 	    "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks,
2909 	    rbr_p->tnblocks, rbr_p->rbr_max_size, size));
2910 
2911 	for (i = 0; i < rbr_p->tnblocks; i++) {
2912 		rx_msg_p = rx_msg_ring[i];
2913 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2914 		    " hxge_unmap_rxdma_channel_buf_ring: "
2915 		    "rx_msg_p $%p", rx_msg_p));
2916 		if (rx_msg_p != NULL) {
2917 			hxge_freeb(rx_msg_p);
2918 			rx_msg_ring[i] = NULL;
2919 		}
2920 	}
2921 
2922 	/*
2923 	 * We no longer may use the mutex <post_lock>. By setting
2924 	 * <rbr_state> to anything but POSTING, we prevent
2925 	 * hxge_post_page() from accessing a dead mutex.
2926 	 */
2927 	rbr_p->rbr_state = RBR_UNMAPPING;
2928 	MUTEX_DESTROY(&rbr_p->post_lock);
2929 
2930 	MUTEX_DESTROY(&rbr_p->lock);
2931 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
2932 	KMEM_FREE(rx_msg_ring, size);
2933 
2934 	if (rbr_p->rbr_ref_cnt == 0) {
2935 		/* This is the normal state of affairs. */
2936 		KMEM_FREE(rbr_p, sizeof (*rbr_p));
2937 	} else {
2938 		/*
2939 		 * Some of our buffers are still being used.
2940 		 * Therefore, tell hxge_freeb() this ring is
2941 		 * unmapped, so it may free <rbr_p> for us.
2942 		 */
2943 		rbr_p->rbr_state = RBR_UNMAPPED;
2944 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2945 		    "unmap_rxdma_buf_ring: %d %s outstanding.",
2946 		    rbr_p->rbr_ref_cnt,
2947 		    rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
2948 	}
2949 
2950 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2951 	    "<== hxge_unmap_rxdma_channel_buf_ring"));
2952 }
2953 
2954 static hxge_status_t
2955 hxge_rxdma_hw_start_common(p_hxge_t hxgep)
2956 {
2957 	hxge_status_t status = HXGE_OK;
2958 
2959 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
2960 
2961 	/*
2962 	 * Load the sharable parameters by writing to the function zero control
2963 	 * registers. These FZC registers should be initialized only once for
2964 	 * the entire chip.
2965 	 */
2966 	(void) hxge_init_fzc_rx_common(hxgep);
2967 
2968 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
2969 
2970 	return (status);
2971 }
2972 
2973 static hxge_status_t
2974 hxge_rxdma_hw_start(p_hxge_t hxgep)
2975 {
2976 	int			i, ndmas;
2977 	uint16_t		channel;
2978 	p_rx_rbr_rings_t	rx_rbr_rings;
2979 	p_rx_rbr_ring_t		*rbr_rings;
2980 	p_rx_rcr_rings_t	rx_rcr_rings;
2981 	p_rx_rcr_ring_t		*rcr_rings;
2982 	p_rx_mbox_areas_t	rx_mbox_areas_p;
2983 	p_rx_mbox_t		*rx_mbox_p;
2984 	hxge_status_t		status = HXGE_OK;
2985 
2986 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start"));
2987 
2988 	rx_rbr_rings = hxgep->rx_rbr_rings;
2989 	rx_rcr_rings = hxgep->rx_rcr_rings;
2990 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
2991 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2992 		    "<== hxge_rxdma_hw_start: NULL ring pointers"));
2993 		return (HXGE_ERROR);
2994 	}
2995 
2996 	ndmas = rx_rbr_rings->ndmas;
2997 	if (ndmas == 0) {
2998 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2999 		    "<== hxge_rxdma_hw_start: no dma channel allocated"));
3000 		return (HXGE_ERROR);
3001 	}
3002 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3003 	    "==> hxge_rxdma_hw_start (ndmas %d)", ndmas));
3004 
3005 	/*
3006 	 * Scrub the RDC Rx DMA Prefetch Buffer Command.
3007 	 */
3008 	for (i = 0; i < 128; i++) {
3009 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i);
3010 	}
3011 
3012 	/*
3013 	 * Scrub Rx DMA Shadow Tail Command.
3014 	 */
3015 	for (i = 0; i < 64; i++) {
3016 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i);
3017 	}
3018 
3019 	/*
3020 	 * Scrub Rx DMA Control Fifo Command.
3021 	 */
3022 	for (i = 0; i < 512; i++) {
3023 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i);
3024 	}
3025 
3026 	/*
3027 	 * Scrub Rx DMA Data Fifo Command.
3028 	 */
3029 	for (i = 0; i < 1536; i++) {
3030 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i);
3031 	}
3032 
3033 	/*
3034 	 * Reset the FIFO Error Stat.
3035 	 */
3036 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF);
3037 
3038 	/* Set the error mask to receive interrupts */
3039 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
3040 
3041 	rbr_rings = rx_rbr_rings->rbr_rings;
3042 	rcr_rings = rx_rcr_rings->rcr_rings;
3043 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
3044 	if (rx_mbox_areas_p) {
3045 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
3046 	}
3047 
3048 	for (i = 0; i < ndmas; i++) {
3049 		channel = rbr_rings[i]->rdc;
3050 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3051 		    "==> hxge_rxdma_hw_start (ndmas %d) channel %d",
3052 		    ndmas, channel));
3053 		status = hxge_rxdma_start_channel(hxgep, channel,
3054 		    (p_rx_rbr_ring_t)rbr_rings[i],
3055 		    (p_rx_rcr_ring_t)rcr_rings[i],
3056 		    (p_rx_mbox_t)rx_mbox_p[i]);
3057 		if (status != HXGE_OK) {
3058 			goto hxge_rxdma_hw_start_fail1;
3059 		}
3060 	}
3061 
3062 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: "
3063 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
3064 	    rx_rbr_rings, rx_rcr_rings));
3065 	goto hxge_rxdma_hw_start_exit;
3066 
3067 hxge_rxdma_hw_start_fail1:
3068 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3069 	    "==> hxge_rxdma_hw_start: disable "
3070 	    "(status 0x%x channel %d i %d)", status, channel, i));
3071 	for (; i >= 0; i--) {
3072 		channel = rbr_rings[i]->rdc;
3073 		(void) hxge_rxdma_stop_channel(hxgep, channel);
3074 	}
3075 
3076 hxge_rxdma_hw_start_exit:
3077 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3078 	    "==> hxge_rxdma_hw_start: (status 0x%x)", status));
3079 	return (status);
3080 }
3081 
3082 static void
3083 hxge_rxdma_hw_stop(p_hxge_t hxgep)
3084 {
3085 	int			i, ndmas;
3086 	uint16_t		channel;
3087 	p_rx_rbr_rings_t	rx_rbr_rings;
3088 	p_rx_rbr_ring_t		*rbr_rings;
3089 	p_rx_rcr_rings_t	rx_rcr_rings;
3090 
3091 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop"));
3092 
3093 	rx_rbr_rings = hxgep->rx_rbr_rings;
3094 	rx_rcr_rings = hxgep->rx_rcr_rings;
3095 
3096 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
3097 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3098 		    "<== hxge_rxdma_hw_stop: NULL ring pointers"));
3099 		return;
3100 	}
3101 
3102 	ndmas = rx_rbr_rings->ndmas;
3103 	if (!ndmas) {
3104 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3105 		    "<== hxge_rxdma_hw_stop: no dma channel allocated"));
3106 		return;
3107 	}
3108 
3109 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3110 	    "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas));
3111 
3112 	rbr_rings = rx_rbr_rings->rbr_rings;
3113 	for (i = 0; i < ndmas; i++) {
3114 		channel = rbr_rings[i]->rdc;
3115 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3116 		    "==> hxge_rxdma_hw_stop (ndmas %d) channel %d",
3117 		    ndmas, channel));
3118 		(void) hxge_rxdma_stop_channel(hxgep, channel);
3119 	}
3120 
3121 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: "
3122 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
3123 	    rx_rbr_rings, rx_rcr_rings));
3124 
3125 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop"));
3126 }
3127 
3128 static hxge_status_t
3129 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
3130     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
3131 {
3132 	hpi_handle_t		handle;
3133 	hpi_status_t		rs = HPI_SUCCESS;
3134 	rdc_stat_t		cs;
3135 	rdc_int_mask_t		ent_mask;
3136 	hxge_status_t		status = HXGE_OK;
3137 
3138 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel"));
3139 
3140 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3141 
3142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: "
3143 	    "hpi handle addr $%p acc $%p",
3144 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3145 
3146 	/* Reset RXDMA channel */
3147 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3148 	if (rs != HPI_SUCCESS) {
3149 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3150 		    "==> hxge_rxdma_start_channel: "
3151 		    "reset rxdma failed (0x%08x channel %d)",
3152 		    status, channel));
3153 		return (HXGE_ERROR | rs);
3154 	}
3155 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3156 	    "==> hxge_rxdma_start_channel: reset done: channel %d", channel));
3157 
3158 	/*
3159 	 * Initialize the RXDMA channel specific FZC control configurations.
3160 	 * These FZC registers are pertaining to each RX channel (logical
3161 	 * pages).
3162 	 */
3163 	status = hxge_init_fzc_rxdma_channel(hxgep,
3164 	    channel, rbr_p, rcr_p, mbox_p);
3165 	if (status != HXGE_OK) {
3166 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3167 		    "==> hxge_rxdma_start_channel: "
3168 		    "init fzc rxdma failed (0x%08x channel %d)",
3169 		    status, channel));
3170 		return (status);
3171 	}
3172 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3173 	    "==> hxge_rxdma_start_channel: fzc done"));
3174 
3175 	/*
3176 	 * Zero out the shadow  and prefetch ram.
3177 	 */
3178 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3179 	    "==> hxge_rxdma_start_channel: ram done"));
3180 
3181 	/* Set up the interrupt event masks. */
3182 	ent_mask.value = 0;
3183 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3184 	if (rs != HPI_SUCCESS) {
3185 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3186 		    "==> hxge_rxdma_start_channel: "
3187 		    "init rxdma event masks failed (0x%08x channel %d)",
3188 		    status, channel));
3189 		return (HXGE_ERROR | rs);
3190 	}
3191 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3192 	    "event done: channel %d (mask 0x%016llx)",
3193 	    channel, ent_mask.value));
3194 
3195 	/*
3196 	 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA
3197 	 * channels and enable each DMA channel.
3198 	 */
3199 	status = hxge_enable_rxdma_channel(hxgep,
3200 	    channel, rbr_p, rcr_p, mbox_p);
3201 	if (status != HXGE_OK) {
3202 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3203 		    " hxge_rxdma_start_channel: "
3204 		    " init enable rxdma failed (0x%08x channel %d)",
3205 		    status, channel));
3206 		return (status);
3207 	}
3208 
3209 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3210 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
3211 
3212 	/*
3213 	 * Initialize the receive DMA control and status register
3214 	 * Note that rdc_stat HAS to be set after RBR and RCR rings are set
3215 	 */
3216 	cs.value = 0;
3217 	cs.bits.mex = 1;
3218 	cs.bits.rcr_thres = 1;
3219 	cs.bits.rcr_to = 1;
3220 	cs.bits.rbr_empty = 1;
3221 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3222 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3223 	    "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
3224 	if (status != HXGE_OK) {
3225 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3226 		    "==> hxge_rxdma_start_channel: "
3227 		    "init rxdma control register failed (0x%08x channel %d",
3228 		    status, channel));
3229 		return (status);
3230 	}
3231 
3232 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3233 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
3234 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3235 	    "==> hxge_rxdma_start_channel: enable done"));
3236 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel"));
3237 
3238 	return (HXGE_OK);
3239 }
3240 
3241 static hxge_status_t
3242 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel)
3243 {
3244 	hpi_handle_t		handle;
3245 	hpi_status_t		rs = HPI_SUCCESS;
3246 	rdc_stat_t		cs;
3247 	rdc_int_mask_t		ent_mask;
3248 	hxge_status_t		status = HXGE_OK;
3249 
3250 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel"));
3251 
3252 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3253 
3254 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: "
3255 	    "hpi handle addr $%p acc $%p",
3256 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3257 
3258 	/* Reset RXDMA channel */
3259 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3260 	if (rs != HPI_SUCCESS) {
3261 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3262 		    " hxge_rxdma_stop_channel: "
3263 		    " reset rxdma failed (0x%08x channel %d)",
3264 		    rs, channel));
3265 		return (HXGE_ERROR | rs);
3266 	}
3267 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3268 	    "==> hxge_rxdma_stop_channel: reset done"));
3269 
3270 	/* Set up the interrupt event masks. */
3271 	ent_mask.value = RDC_INT_MASK_ALL;
3272 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3273 	if (rs != HPI_SUCCESS) {
3274 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3275 		    "==> hxge_rxdma_stop_channel: "
3276 		    "set rxdma event masks failed (0x%08x channel %d)",
3277 		    rs, channel));
3278 		return (HXGE_ERROR | rs);
3279 	}
3280 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3281 	    "==> hxge_rxdma_stop_channel: event done"));
3282 
3283 	/* Initialize the receive DMA control and status register */
3284 	cs.value = 0;
3285 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3286 
3287 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control "
3288 	    " to default (all 0s) 0x%08x", cs.value));
3289 
3290 	if (status != HXGE_OK) {
3291 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3292 		    " hxge_rxdma_stop_channel: init rxdma"
3293 		    " control register failed (0x%08x channel %d",
3294 		    status, channel));
3295 		return (status);
3296 	}
3297 
3298 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3299 	    "==> hxge_rxdma_stop_channel: control done"));
3300 
3301 	/* disable dma channel */
3302 	status = hxge_disable_rxdma_channel(hxgep, channel);
3303 
3304 	if (status != HXGE_OK) {
3305 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3306 		    " hxge_rxdma_stop_channel: "
3307 		    " init enable rxdma failed (0x%08x channel %d)",
3308 		    status, channel));
3309 		return (status);
3310 	}
3311 
3312 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3313 	    "==> hxge_rxdma_stop_channel: disable done"));
3314 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel"));
3315 
3316 	return (HXGE_OK);
3317 }
3318 
3319 hxge_status_t
3320 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep)
3321 {
3322 	hpi_handle_t		handle;
3323 	p_hxge_rdc_sys_stats_t	statsp;
3324 	rdc_fifo_err_stat_t	stat;
3325 	hxge_status_t		status = HXGE_OK;
3326 
3327 	handle = hxgep->hpi_handle;
3328 	statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats;
3329 
3330 	/* Clear the int_dbg register in case it is an injected err */
3331 	HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0);
3332 
3333 	/* Get the error status and clear the register */
3334 	HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value);
3335 	HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value);
3336 
3337 	if (stat.bits.rx_ctrl_fifo_sec) {
3338 		statsp->ctrl_fifo_sec++;
3339 		if (statsp->ctrl_fifo_sec == 1)
3340 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3341 			    "==> hxge_rxdma_handle_sys_errors: "
3342 			    "rx_ctrl_fifo_sec"));
3343 	}
3344 
3345 	if (stat.bits.rx_ctrl_fifo_ded) {
3346 		/* Global fatal error encountered */
3347 		statsp->ctrl_fifo_ded++;
3348 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
3349 		    HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED);
3350 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3351 		    "==> hxge_rxdma_handle_sys_errors: "
3352 		    "fatal error: rx_ctrl_fifo_ded error"));
3353 	}
3354 
3355 	if (stat.bits.rx_data_fifo_sec) {
3356 		statsp->data_fifo_sec++;
3357 		if (statsp->data_fifo_sec == 1)
3358 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3359 			    "==> hxge_rxdma_handle_sys_errors: "
3360 			    "rx_data_fifo_sec"));
3361 	}
3362 
3363 	if (stat.bits.rx_data_fifo_ded) {
3364 		/* Global fatal error encountered */
3365 		statsp->data_fifo_ded++;
3366 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
3367 		    HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED);
3368 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3369 		    "==> hxge_rxdma_handle_sys_errors: "
3370 		    "fatal error: rx_data_fifo_ded error"));
3371 	}
3372 
3373 	if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) {
3374 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3375 		    " hxge_rxdma_handle_sys_errors: fatal error\n"));
3376 		status = hxge_rx_port_fatal_err_recover(hxgep);
3377 		if (status == HXGE_OK) {
3378 			FM_SERVICE_RESTORED(hxgep);
3379 		}
3380 	}
3381 
3382 	return (HXGE_OK);
3383 }
3384 
3385 static hxge_status_t
3386 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
3387 {
3388 	hpi_handle_t		handle;
3389 	hpi_status_t 		rs = HPI_SUCCESS;
3390 	hxge_status_t 		status = HXGE_OK;
3391 	p_rx_rbr_ring_t		rbrp;
3392 	p_rx_rcr_ring_t		rcrp;
3393 	p_rx_mbox_t		mboxp;
3394 	rdc_int_mask_t		ent_mask;
3395 	p_hxge_dma_common_t	dmap;
3396 	int			ring_idx;
3397 	p_rx_msg_t		rx_msg_p;
3398 	int			i;
3399 	uint32_t		hxge_port_rcr_size;
3400 	uint64_t		tmp;
3401 
3402 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover"));
3403 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3404 	    "Recovering from RxDMAChannel#%d error...", channel));
3405 
3406 	/*
3407 	 * Stop the dma channel waits for the stop done. If the stop done bit
3408 	 * is not set, then create an error.
3409 	 */
3410 
3411 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3412 
3413 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop..."));
3414 
3415 	ring_idx = hxge_rxdma_get_ring_index(hxgep, channel);
3416 	rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx];
3417 	rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx];
3418 
3419 	MUTEX_ENTER(&rcrp->lock);
3420 	MUTEX_ENTER(&rbrp->lock);
3421 	MUTEX_ENTER(&rbrp->post_lock);
3422 
3423 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel..."));
3424 
3425 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
3426 	if (rs != HPI_SUCCESS) {
3427 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3428 		    "hxge_disable_rxdma_channel:failed"));
3429 		goto fail;
3430 	}
3431 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt..."));
3432 
3433 	/* Disable interrupt */
3434 	ent_mask.value = RDC_INT_MASK_ALL;
3435 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3436 	if (rs != HPI_SUCCESS) {
3437 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3438 		    "Set rxdma event masks failed (channel %d)", channel));
3439 	}
3440 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset..."));
3441 
3442 	/* Reset RXDMA channel */
3443 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3444 	if (rs != HPI_SUCCESS) {
3445 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3446 		    "Reset rxdma failed (channel %d)", channel));
3447 		goto fail;
3448 	}
3449 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
3450 	mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx];
3451 
3452 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3453 	rbrp->rbr_rd_index = 0;
3454 
3455 	rcrp->comp_rd_index = 0;
3456 	rcrp->comp_wt_index = 0;
3457 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3458 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3459 #if defined(__i386)
3460 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3461 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3462 #else
3463 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3464 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3465 #endif
3466 
3467 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3468 	    (hxge_port_rcr_size - 1);
3469 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3470 	    (hxge_port_rcr_size - 1);
3471 
3472 	rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
3473 	rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
3474 
3475 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
3476 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
3477 
3478 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n",
3479 	    rbrp->rbr_max_size));
3480 
3481 	for (i = 0; i < rbrp->rbr_max_size; i++) {
3482 		/* Reset all the buffers */
3483 		rx_msg_p = rbrp->rx_msg_ring[i];
3484 		rx_msg_p->ref_cnt = 1;
3485 		rx_msg_p->free = B_TRUE;
3486 		rx_msg_p->cur_usage_cnt = 0;
3487 		rx_msg_p->max_usage_cnt = 0;
3488 		rx_msg_p->pkt_buf_size = 0;
3489 	}
3490 
3491 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start..."));
3492 
3493 	status = hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp);
3494 	if (status != HXGE_OK) {
3495 		goto fail;
3496 	}
3497 
3498 	/*
3499 	 * The DMA channel may disable itself automatically.
3500 	 * The following is a work-around.
3501 	 */
3502 	HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp);
3503 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
3504 	if (rs != HPI_SUCCESS) {
3505 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3506 		    "hpi_rxdma_cfg_rdc_enable (channel %d)", channel));
3507 	}
3508 
3509 	MUTEX_EXIT(&rbrp->post_lock);
3510 	MUTEX_EXIT(&rbrp->lock);
3511 	MUTEX_EXIT(&rcrp->lock);
3512 
3513 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3514 	    "Recovery Successful, RxDMAChannel#%d Restored", channel));
3515 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover"));
3516 
3517 	return (HXGE_OK);
3518 
3519 fail:
3520 	MUTEX_EXIT(&rbrp->post_lock);
3521 	MUTEX_EXIT(&rbrp->lock);
3522 	MUTEX_EXIT(&rcrp->lock);
3523 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
3524 
3525 	return (HXGE_ERROR | rs);
3526 }
3527 
3528 static hxge_status_t
3529 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
3530 {
3531 	hxge_status_t		status = HXGE_OK;
3532 	p_hxge_dma_common_t	*dma_buf_p;
3533 	uint16_t		channel;
3534 	int			ndmas;
3535 	int			i;
3536 	block_reset_t		reset_reg;
3537 
3538 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover"));
3539 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ..."));
3540 
3541 	/* Reset RDC block from PEU for this fatal error */
3542 	reset_reg.value = 0;
3543 	reset_reg.bits.rdc_rst = 1;
3544 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
3545 
3546 	/* Disable RxMAC */
3547 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n"));
3548 	if (hxge_rx_vmac_disable(hxgep) != HXGE_OK)
3549 		goto fail;
3550 
3551 	HXGE_DELAY(1000);
3552 
3553 	/* Restore any common settings after PEU reset */
3554 	if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK)
3555 		goto fail;
3556 
3557 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels..."));
3558 
3559 	ndmas = hxgep->rx_buf_pool_p->ndmas;
3560 	dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p;
3561 
3562 	for (i = 0; i < ndmas; i++) {
3563 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
3564 		if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) {
3565 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3566 			    "Could not recover channel %d", channel));
3567 		}
3568 	}
3569 
3570 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC..."));
3571 
3572 	/* Reset RxMAC */
3573 	if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) {
3574 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3575 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3576 		goto fail;
3577 	}
3578 
3579 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC..."));
3580 
3581 	/* Re-Initialize RxMAC */
3582 	if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) {
3583 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3584 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3585 		goto fail;
3586 	}
3587 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC..."));
3588 
3589 	/* Re-enable RxMAC */
3590 	if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) {
3591 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3592 		    "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC"));
3593 		goto fail;
3594 	}
3595 
3596 	/* Reset the error mask since PEU reset cleared it */
3597 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
3598 
3599 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3600 	    "Recovery Successful, RxPort Restored"));
3601 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover"));
3602 
3603 	return (HXGE_OK);
3604 fail:
3605 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
3606 	return (status);
3607 }
3608