xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_fzc.c (revision 602ca9ea)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include	<nxge_impl.h>
29 #include	<npi_mac.h>
30 #include	<npi_rxdma.h>
31 #include	<nxge_hio.h>
32 
33 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
34 static int	nxge_herr2kerr(uint64_t);
35 #endif
36 
37 static nxge_status_t nxge_init_fzc_rdc_pages(p_nxge_t,
38     uint16_t, dma_log_page_t *, dma_log_page_t *);
39 
40 static nxge_status_t nxge_init_fzc_tdc_pages(p_nxge_t,
41     uint16_t, dma_log_page_t *, dma_log_page_t *);
42 
43 /*
44  * The following interfaces are controlled by the
45  * function control registers. Some global registers
46  * are to be initialized by only byt one of the 2/4 functions.
47  * Use the test and set register.
48  */
49 /*ARGSUSED*/
50 nxge_status_t
51 nxge_test_and_set(p_nxge_t nxgep, uint8_t tas)
52 {
53 	npi_handle_t		handle;
54 	npi_status_t		rs = NPI_SUCCESS;
55 
56 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
57 	if ((rs = npi_dev_func_sr_sr_get_set_clear(handle, tas))
58 			!= NPI_SUCCESS) {
59 		return (NXGE_ERROR | rs);
60 	}
61 
62 	return (NXGE_OK);
63 }
64 
65 nxge_status_t
66 nxge_set_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t mpc)
67 {
68 	npi_handle_t		handle;
69 	npi_status_t		rs = NPI_SUCCESS;
70 
71 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_set_fzc_multi_part_ctl"));
72 
73 	/*
74 	 * In multi-partitioning, the partition manager
75 	 * who owns function zero should set this multi-partition
76 	 * control bit.
77 	 */
78 	if (nxgep->use_partition && nxgep->function_num) {
79 		return (NXGE_ERROR);
80 	}
81 
82 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
83 	if ((rs = npi_fzc_mpc_set(handle, mpc)) != NPI_SUCCESS) {
84 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
85 			"<== nxge_set_fzc_multi_part_ctl"));
86 		return (NXGE_ERROR | rs);
87 	}
88 
89 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_set_fzc_multi_part_ctl"));
90 
91 	return (NXGE_OK);
92 }
93 
94 nxge_status_t
95 nxge_get_fzc_multi_part_ctl(p_nxge_t nxgep, boolean_t *mpc_p)
96 {
97 	npi_handle_t		handle;
98 	npi_status_t		rs = NPI_SUCCESS;
99 
100 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
101 
102 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
103 	if ((rs = npi_fzc_mpc_get(handle, mpc_p)) != NPI_SUCCESS) {
104 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
105 			"<== nxge_set_fzc_multi_part_ctl"));
106 		return (NXGE_ERROR | rs);
107 	}
108 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_get_fzc_multi_part_ctl"));
109 
110 	return (NXGE_OK);
111 }
112 
113 /*
114  * System interrupt registers that are under function zero
115  * management.
116  */
117 nxge_status_t
118 nxge_fzc_intr_init(p_nxge_t nxgep)
119 {
120 	nxge_status_t	status = NXGE_OK;
121 
122 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_init"));
123 
124 	/* Configure the initial timer resolution */
125 	if ((status = nxge_fzc_intr_tmres_set(nxgep)) != NXGE_OK) {
126 		return (status);
127 	}
128 
129 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
130 		/*
131 		 * Set up the logical device group's logical devices that
132 		 * the group owns.
133 		 */
134 		if ((status = nxge_fzc_intr_ldg_num_set(nxgep)) != NXGE_OK)
135 			goto fzc_intr_init_exit;
136 
137 		/* Configure the system interrupt data */
138 		if ((status = nxge_fzc_intr_sid_set(nxgep)) != NXGE_OK)
139 			goto fzc_intr_init_exit;
140 	}
141 
142 fzc_intr_init_exit:
143 
144 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_init"));
145 
146 	return (status);
147 }
148 
149 nxge_status_t
150 nxge_fzc_intr_ldg_num_set(p_nxge_t nxgep)
151 {
152 	p_nxge_ldg_t	ldgp;
153 	p_nxge_ldv_t	ldvp;
154 	npi_handle_t	handle;
155 	int		i, j;
156 	npi_status_t	rs = NPI_SUCCESS;
157 
158 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_ldg_num_set"));
159 
160 	if (nxgep->ldgvp == NULL) {
161 		return (NXGE_ERROR);
162 	}
163 
164 	ldgp = nxgep->ldgvp->ldgp;
165 	ldvp = nxgep->ldgvp->ldvp;
166 	if (ldgp == NULL || ldvp == NULL) {
167 		return (NXGE_ERROR);
168 	}
169 
170 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
171 
172 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
173 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
174 			"==> nxge_fzc_intr_ldg_num_set "
175 			"<== nxge_f(Neptune): # ldv %d "
176 			"in group %d", ldgp->nldvs, ldgp->ldg));
177 
178 		for (j = 0; j < ldgp->nldvs; j++, ldvp++) {
179 			rs = npi_fzc_ldg_num_set(handle, ldvp->ldv,
180 				ldvp->ldg_assigned);
181 			if (rs != NPI_SUCCESS) {
182 				NXGE_DEBUG_MSG((nxgep, INT_CTL,
183 					"<== nxge_fzc_intr_ldg_num_set failed "
184 					" rs 0x%x ldv %d ldg %d",
185 					rs, ldvp->ldv, ldvp->ldg_assigned));
186 				return (NXGE_ERROR | rs);
187 			}
188 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
189 				"<== nxge_fzc_intr_ldg_num_set OK "
190 				" ldv %d ldg %d",
191 				ldvp->ldv, ldvp->ldg_assigned));
192 		}
193 	}
194 
195 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_ldg_num_set"));
196 
197 	return (NXGE_OK);
198 }
199 
200 nxge_status_t
201 nxge_fzc_intr_tmres_set(p_nxge_t nxgep)
202 {
203 	npi_handle_t	handle;
204 	npi_status_t	rs = NPI_SUCCESS;
205 
206 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_tmrese_set"));
207 	if (nxgep->ldgvp == NULL) {
208 		return (NXGE_ERROR);
209 	}
210 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
211 	if ((rs = npi_fzc_ldg_timer_res_set(handle, nxgep->ldgvp->tmres))) {
212 		return (NXGE_ERROR | rs);
213 	}
214 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_tmrese_set"));
215 
216 	return (NXGE_OK);
217 }
218 
219 nxge_status_t
220 nxge_fzc_intr_sid_set(p_nxge_t nxgep)
221 {
222 	npi_handle_t	handle;
223 	p_nxge_ldg_t	ldgp;
224 	fzc_sid_t	sid;
225 	int		i;
226 	npi_status_t	rs = NPI_SUCCESS;
227 
228 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_fzc_intr_sid_set"));
229 	if (nxgep->ldgvp == NULL) {
230 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
231 			"<== nxge_fzc_intr_sid_set: no ldg"));
232 		return (NXGE_ERROR);
233 	}
234 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
235 	ldgp = nxgep->ldgvp->ldgp;
236 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
237 		"==> nxge_fzc_intr_sid_set: #int %d", nxgep->ldgvp->ldg_intrs));
238 	for (i = 0; i < nxgep->ldgvp->ldg_intrs; i++, ldgp++) {
239 		sid.ldg = ldgp->ldg;
240 		sid.niu = B_FALSE;
241 		sid.func = ldgp->func;
242 		sid.vector = ldgp->vector;
243 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
244 			"==> nxge_fzc_intr_sid_set(%d): func %d group %d "
245 			"vector %d",
246 			i, sid.func, sid.ldg, sid.vector));
247 		rs = npi_fzc_sid_set(handle, sid);
248 		if (rs != NPI_SUCCESS) {
249 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
250 				"<== nxge_fzc_intr_sid_set:failed 0x%x",
251 				rs));
252 			return (NXGE_ERROR | rs);
253 		}
254 	}
255 
256 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_fzc_intr_sid_set"));
257 
258 	return (NXGE_OK);
259 
260 }
261 
262 /*
263  * nxge_init_fzc_rdc
264  *
265  *	Initialize all of a RDC's FZC_DMC registers.
266  *	This is executed by the service domain, on behalf of a
267  *	guest domain, who cannot access these registers.
268  *
269  * Arguments:
270  * 	nxgep
271  * 	channel		The channel to initialize.
272  *
273  * NPI_NXGE function calls:
274  *	nxge_init_fzc_rdc_pages()
275  *
276  * Context:
277  *	Service Domain
278  */
279 /*ARGSUSED*/
280 nxge_status_t
281 nxge_init_fzc_rdc(p_nxge_t nxgep, uint16_t channel)
282 {
283 	nxge_status_t	status = NXGE_OK;
284 
285 	dma_log_page_t	page1, page2;
286 	npi_handle_t	handle;
287 	rdc_red_para_t	red;
288 
289 	/*
290 	 * Initialize the RxDMA channel-specific FZC control
291 	 * registers.
292 	 */
293 
294 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_tdc"));
295 
296 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
297 
298 	/* Reset RXDMA channel */
299 	status = npi_rxdma_cfg_rdc_reset(handle, channel);
300 	if (status != NPI_SUCCESS) {
301 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
302 		    "==> nxge_init_fzc_rdc: npi_rxdma_cfg_rdc_reset(%d) "
303 		    "returned 0x%08x", channel, status));
304 		return (NXGE_ERROR | status);
305 	}
306 
307 	/*
308 	 * These values have been copied from
309 	 * nxge_txdma.c:nxge_map_txdma_channel_cfg_ring().
310 	 */
311 	page1.page_num = 0;
312 	page1.valid = 1;
313 	page1.func_num = nxgep->function_num;
314 	page1.mask = 0;
315 	page1.value = 0;
316 	page1.reloc = 0;
317 
318 	page2.page_num = 1;
319 	page2.valid = 1;
320 	page2.func_num = nxgep->function_num;
321 	page2.mask = 0;
322 	page2.value = 0;
323 	page2.reloc = 0;
324 
325 	if (nxgep->niu_type == N2_NIU) {
326 #if !defined(NIU_HV_WORKAROUND)
327 		status = NXGE_OK;
328 #else
329 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
330 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
331 		    "set up logical pages"));
332 		/* Initialize the RXDMA logical pages */
333 		status = nxge_init_fzc_rdc_pages(nxgep, channel,
334 		    &page1, &page2);
335 		if (status != NXGE_OK) {
336 			return (status);
337 		}
338 #endif
339 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
340 		/* Initialize the RXDMA logical pages */
341 		status = nxge_init_fzc_rdc_pages(nxgep, channel,
342 		    &page1, &page2);
343 		if (status != NXGE_OK) {
344 			return (status);
345 		}
346 	} else {
347 		return (NXGE_ERROR);
348 	}
349 
350 	/*
351 	 * Configure RED parameters
352 	 */
353 	red.value = 0;
354 	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
355 	red.bits.ldw.thre =
356 	    (nxgep->nxge_port_rcr_size - RXDMA_RED_LESS_ENTRIES);
357 	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
358 	red.bits.ldw.thre_sync =
359 	    (nxgep->nxge_port_rcr_size - RXDMA_RED_LESS_ENTRIES);
360 
361 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
362 		"==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
363 		red.bits.ldw.thre_sync,
364 		red.bits.ldw.thre_sync));
365 
366 	status |= npi_rxdma_cfg_wred_param(handle, channel, &red);
367 
368 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_rdc"));
369 
370 	return (status);
371 }
372 
373 /*
374  * nxge_init_fzc_rxdma_channel
375  *
376  *	Initialize all per-channel FZC_DMC registers.
377  *
378  * Arguments:
379  * 	nxgep
380  * 	channel		The channel to start
381  *
382  * NPI_NXGE function calls:
383  *	nxge_init_hv_fzc_rxdma_channel_pages()
384  *	nxge_init_fzc_rxdma_channel_pages()
385  *	nxge_init_fzc_rxdma_channel_red()
386  *
387  * Context:
388  *	Service Domain
389  */
390 /*ARGSUSED*/
391 nxge_status_t
392 nxge_init_fzc_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
393 {
394 	rx_rbr_ring_t		*rbr_ring;
395 	rx_rcr_ring_t		*rcr_ring;
396 
397 	nxge_status_t		status = NXGE_OK;
398 
399 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_init_fzc_rxdma_channel"));
400 
401 	rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
402 	rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
403 
404 	if (nxgep->niu_type == N2_NIU) {
405 #ifndef	NIU_HV_WORKAROUND
406 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
407 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
408 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - call HV "
409 		    "set up logical pages"));
410 		/* Initialize the RXDMA logical pages */
411 		status = nxge_init_hv_fzc_rxdma_channel_pages(nxgep, channel,
412 		    rbr_ring);
413 		if (status != NXGE_OK) {
414 			return (status);
415 		}
416 #endif
417 		status = NXGE_OK;
418 #else
419 		NXGE_DEBUG_MSG((nxgep, RX_CTL,
420 		    "==> nxge_init_fzc_rxdma_channel: N2_NIU - NEED to "
421 		    "set up logical pages"));
422 		/* Initialize the RXDMA logical pages */
423 		status = nxge_init_fzc_rxdma_channel_pages(nxgep, channel,
424 		    rbr_ring);
425 		if (status != NXGE_OK) {
426 			return (status);
427 		}
428 #endif
429 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
430 		/* Initialize the RXDMA logical pages */
431 		status = nxge_init_fzc_rxdma_channel_pages(nxgep,
432 		    channel, rbr_ring);
433 		if (status != NXGE_OK) {
434 			return (status);
435 		}
436 	} else {
437 		return (NXGE_ERROR);
438 	}
439 
440 	/* Configure RED parameters */
441 	status = nxge_init_fzc_rxdma_channel_red(nxgep, channel, rcr_ring);
442 
443 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_init_fzc_rxdma_channel"));
444 	return (status);
445 }
446 
447 /*
448  * nxge_init_fzc_rdc_pages
449  *
450  *	Configure a TDC's logical pages.
451  *
452  *	This function is executed by the service domain, on behalf of
453  *	a guest domain, to whom this RDC has been loaned.
454  *
455  * Arguments:
456  * 	nxgep
457  * 	channel		The channel to initialize.
458  * 	page0		Logical page 0 definition.
459  * 	page1		Logical page 1 definition.
460  *
461  * Notes:
462  *	I think that this function can be called from any
463  *	domain, but I need to check.
464  *
465  * NPI/NXGE function calls:
466  *	hv_niu_tx_logical_page_conf()
467  *	hv_niu_tx_logical_page_info()
468  *
469  * Context:
470  *	Any domain
471  */
472 nxge_status_t
473 nxge_init_fzc_rdc_pages(
474 	p_nxge_t nxgep,
475 	uint16_t channel,
476 	dma_log_page_t *page0,
477 	dma_log_page_t *page1)
478 {
479 	npi_handle_t handle;
480 	npi_status_t rs;
481 
482 	uint64_t page_handle;
483 
484 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
485 		"==> nxge_init_fzc_txdma_channel_pages"));
486 
487 #ifndef	NIU_HV_WORKAROUND
488 	if (nxgep->niu_type == N2_NIU) {
489 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
490 			"<== nxge_init_fzc_rdc_pages: "
491 			"N2_NIU: no need to set rxdma logical pages"));
492 		return (NXGE_OK);
493 	}
494 #else
495 	if (nxgep->niu_type == N2_NIU) {
496 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
497 			"<== nxge_init_fzc_rdc_pages: "
498 			"N2_NIU: NEED to set rxdma logical pages"));
499 	}
500 #endif
501 
502 	/*
503 	 * Initialize logical page 1.
504 	 */
505 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
506 	if ((rs = npi_rxdma_cfg_logical_page(handle, channel, page0))
507 	    != NPI_SUCCESS)
508 		return (NXGE_ERROR | rs);
509 
510 	/*
511 	 * Initialize logical page 2.
512 	 */
513 	if ((rs = npi_rxdma_cfg_logical_page(handle, channel, page1))
514 	    != NPI_SUCCESS)
515 		return (NXGE_ERROR | rs);
516 
517 	/*
518 	 * Initialize the page handle.
519 	 * (In the current driver, this is always set to 0.)
520 	 */
521 	page_handle = 0;
522 	rs = npi_rxdma_cfg_logical_page_handle(handle, channel, page_handle);
523 	if (rs == NPI_SUCCESS) {
524 		return (NXGE_OK);
525 	} else {
526 		return (NXGE_ERROR | rs);
527 	}
528 }
529 
530 /*ARGSUSED*/
531 nxge_status_t
532 nxge_init_fzc_rxdma_channel_pages(p_nxge_t nxgep,
533 		uint16_t channel, p_rx_rbr_ring_t rbrp)
534 {
535 	npi_handle_t		handle;
536 	dma_log_page_t		cfg;
537 	npi_status_t		rs = NPI_SUCCESS;
538 
539 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
540 		"==> nxge_init_fzc_rxdma_channel_pages"));
541 
542 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
543 	/*
544 	 * Initialize logical page 1.
545 	 */
546 	cfg.func_num = nxgep->function_num;
547 	cfg.page_num = 0;
548 	cfg.valid = rbrp->page_valid.bits.ldw.page0;
549 	cfg.value = rbrp->page_value_1.value;
550 	cfg.mask = rbrp->page_mask_1.value;
551 	cfg.reloc = rbrp->page_reloc_1.value;
552 	rs = npi_rxdma_cfg_logical_page(handle, channel,
553 			(p_dma_log_page_t)&cfg);
554 	if (rs != NPI_SUCCESS) {
555 		return (NXGE_ERROR | rs);
556 	}
557 
558 	/*
559 	 * Initialize logical page 2.
560 	 */
561 	cfg.page_num = 1;
562 	cfg.valid = rbrp->page_valid.bits.ldw.page1;
563 	cfg.value = rbrp->page_value_2.value;
564 	cfg.mask = rbrp->page_mask_2.value;
565 	cfg.reloc = rbrp->page_reloc_2.value;
566 
567 	rs = npi_rxdma_cfg_logical_page(handle, channel, &cfg);
568 	if (rs != NPI_SUCCESS) {
569 		return (NXGE_ERROR | rs);
570 	}
571 
572 	/* Initialize the page handle */
573 	rs = npi_rxdma_cfg_logical_page_handle(handle, channel,
574 			rbrp->page_hdl.bits.ldw.handle);
575 
576 	if (rs != NPI_SUCCESS) {
577 		return (NXGE_ERROR | rs);
578 	}
579 
580 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
581 		"<== nxge_init_fzc_rxdma_channel_pages"));
582 
583 	return (NXGE_OK);
584 }
585 
586 /*ARGSUSED*/
587 nxge_status_t
588 nxge_init_fzc_rxdma_channel_red(p_nxge_t nxgep,
589 	uint16_t channel, p_rx_rcr_ring_t rcr_p)
590 {
591 	npi_handle_t		handle;
592 	rdc_red_para_t		red;
593 	npi_status_t		rs = NPI_SUCCESS;
594 
595 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_channel_red"));
596 
597 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
598 	red.value = 0;
599 	red.bits.ldw.win = RXDMA_RED_WINDOW_DEFAULT;
600 	red.bits.ldw.thre = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
601 	red.bits.ldw.win_syn = RXDMA_RED_WINDOW_DEFAULT;
602 	red.bits.ldw.thre_sync = (rcr_p->comp_size - RXDMA_RED_LESS_ENTRIES);
603 
604 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
605 		"==> nxge_init_fzc_rxdma_channel_red(thre_sync %d(%x))",
606 		red.bits.ldw.thre_sync,
607 		red.bits.ldw.thre_sync));
608 
609 	rs = npi_rxdma_cfg_wred_param(handle, channel, &red);
610 	if (rs != NPI_SUCCESS) {
611 		return (NXGE_ERROR | rs);
612 	}
613 
614 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
615 		"<== nxge_init_fzc_rxdma_channel_red"));
616 
617 	return (NXGE_OK);
618 }
619 
620 /*
621  * nxge_init_fzc_tdc
622  *
623  *	Initialize all of a TDC's FZC_DMC registers.
624  *	This is executed by the service domain, on behalf of a
625  *	guest domain, who cannot access these registers.
626  *
627  * Arguments:
628  * 	nxgep
629  * 	channel		The channel to initialize.
630  *
631  * NPI_NXGE function calls:
632  *	nxge_init_fzc_tdc_pages()
633  *	npi_txc_dma_max_burst_set()
634  *
635  * Registers accessed:
636  *	TXC_DMA_MAX_BURST
637  *
638  * Context:
639  *	Service Domain
640  */
641 /*ARGSUSED*/
642 nxge_status_t
643 nxge_init_fzc_tdc(p_nxge_t nxgep, uint16_t channel)
644 {
645 	nxge_status_t	status = NXGE_OK;
646 
647 	dma_log_page_t	page1, page2;
648 	npi_handle_t	handle;
649 
650 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_tdc"));
651 
652 	/*
653 	 * These values have been copied from
654 	 * nxge_txdma.c:nxge_map_txdma_channel_cfg_ring().
655 	 */
656 	page1.page_num = 0;
657 	page1.valid = 1;
658 	page1.func_num = nxgep->function_num;
659 	page1.mask = 0;
660 	page1.value = 0;
661 	page1.reloc = 0;
662 
663 	page1.page_num = 1;
664 	page1.valid = 1;
665 	page1.func_num = nxgep->function_num;
666 	page1.mask = 0;
667 	page1.value = 0;
668 	page1.reloc = 0;
669 
670 #ifdef	NIU_HV_WORKAROUND
671 	if (nxgep->niu_type == N2_NIU) {
672 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
673 		    "==> nxge_init_fzc_txdma_channel "
674 		    "N2_NIU: NEED to set up txdma logical pages"));
675 		/* Initialize the TXDMA logical pages */
676 		(void) nxge_init_fzc_tdc_pages(nxgep, channel,
677 		    &page1, &page2);
678 	}
679 #endif
680 	if (nxgep->niu_type != N2_NIU) {
681 		if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
682 			/* Initialize the TXDMA logical pages */
683 			(void) nxge_init_fzc_tdc_pages(nxgep, channel,
684 			    &page1, &page2);
685 		} else
686 			return (NXGE_ERROR);
687 	}
688 
689 	/*
690 	 * Configure the TXC DMA Max Burst value.
691 	 *
692 	 * PRM.13.5
693 	 *
694 	 * TXC DMA Max Burst. TXC_DMA_MAX (FZC_TXC + 0000016)
695 	 * 19:0		dma_max_burst		RW
696 	 * Max burst value associated with DMA. Used by DRR engine
697 	 * for computing when DMA has gone into deficit.
698 	 */
699 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
700 	(void) npi_txc_dma_max_burst_set(
701 		handle, channel, TXC_DMA_MAX_BURST_DEFAULT);
702 
703 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_init_fzc_tdc"));
704 
705 	return (status);
706 }
707 
708 /*ARGSUSED*/
709 nxge_status_t
710 nxge_init_fzc_txdma_channel(p_nxge_t nxgep, uint16_t channel,
711 	p_tx_ring_t tx_ring_p, p_tx_mbox_t mbox_p)
712 {
713 	nxge_status_t	status = NXGE_OK;
714 
715 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
716 		"==> nxge_init_fzc_txdma_channel"));
717 
718 	if (nxgep->niu_type == N2_NIU) {
719 #ifndef	NIU_HV_WORKAROUND
720 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
721 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
722 		    "==> nxge_init_fzc_txdma_channel "
723 		    "N2_NIU: call HV to set up txdma logical pages"));
724 		status = nxge_init_hv_fzc_txdma_channel_pages(nxgep, channel,
725 		    tx_ring_p);
726 		if (status != NXGE_OK) {
727 			return (status);
728 		}
729 #endif
730 		status = NXGE_OK;
731 #else
732 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
733 		    "==> nxge_init_fzc_txdma_channel "
734 		    "N2_NIU: NEED to set up txdma logical pages"));
735 		/* Initialize the TXDMA logical pages */
736 		(void) nxge_init_fzc_txdma_channel_pages(nxgep, channel,
737 		    tx_ring_p);
738 #endif
739 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
740 		/* Initialize the TXDMA logical pages */
741 		(void) nxge_init_fzc_txdma_channel_pages(nxgep,
742 		    channel, tx_ring_p);
743 	} else {
744 		return (NXGE_ERROR);
745 	}
746 
747 	/*
748 	 * Configure Transmit DRR Weight parameters
749 	 * (It actually programs the TXC max burst register).
750 	 */
751 	(void) nxge_init_fzc_txdma_channel_drr(nxgep, channel, tx_ring_p);
752 
753 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
754 		"<== nxge_init_fzc_txdma_channel"));
755 	return (status);
756 }
757 
758 nxge_status_t
759 nxge_init_fzc_common(p_nxge_t nxgep)
760 {
761 	nxge_status_t	status = NXGE_OK;
762 
763 	(void) nxge_init_fzc_rx_common(nxgep);
764 
765 	return (status);
766 }
767 
768 nxge_status_t
769 nxge_init_fzc_rx_common(p_nxge_t nxgep)
770 {
771 	npi_handle_t	handle;
772 	npi_status_t	rs = NPI_SUCCESS;
773 	nxge_status_t	status = NXGE_OK;
774 	clock_t		lbolt;
775 	int		table;
776 
777 	nxge_hw_pt_cfg_t *hardware;
778 
779 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rx_common"));
780 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
781 	if (!handle.regp) {
782 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
783 			"==> nxge_init_fzc_rx_common null ptr"));
784 		return (NXGE_ERROR);
785 	}
786 
787 	/*
788 	 * Configure the rxdma clock divider
789 	 * This is the granularity counter based on
790 	 * the hardware system clock (i.e. 300 Mhz) and
791 	 * it is running around 3 nanoseconds.
792 	 * So, set the clock divider counter to 1000 to get
793 	 * microsecond granularity.
794 	 * For example, for a 3 microsecond timeout, the timeout
795 	 * will be set to 1.
796 	 */
797 	rs = npi_rxdma_cfg_clock_div_set(handle, RXDMA_CK_DIV_DEFAULT);
798 	if (rs != NPI_SUCCESS)
799 		return (NXGE_ERROR | rs);
800 
801 #if defined(__i386)
802 	rs = npi_rxdma_cfg_32bitmode_enable(handle);
803 	if (rs != NPI_SUCCESS)
804 		return (NXGE_ERROR | rs);
805 	rs = npi_txdma_mode32_set(handle, B_TRUE);
806 	if (rs != NPI_SUCCESS)
807 		return (NXGE_ERROR | rs);
808 #endif
809 
810 	/*
811 	 * Enable WRED and program an initial value.
812 	 * Use time to set the initial random number.
813 	 */
814 	(void) drv_getparm(LBOLT, &lbolt);
815 	rs = npi_rxdma_cfg_red_rand_init(handle, (uint16_t)lbolt);
816 	if (rs != NPI_SUCCESS)
817 		return (NXGE_ERROR | rs);
818 
819 	hardware = &nxgep->pt_config.hw_config;
820 	for (table = 0; table < NXGE_MAX_RDC_GRPS; table++) {
821 		/* Does this table belong to <nxgep>? */
822 		if (hardware->grpids[table] == (nxgep->function_num + 256))
823 			status = nxge_init_fzc_rdc_tbl(nxgep, table);
824 	}
825 
826 	/* Ethernet Timeout Counter (?) */
827 
828 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
829 		"<== nxge_init_fzc_rx_common:status 0x%08x", status));
830 
831 	return (status);
832 }
833 
834 nxge_status_t
835 nxge_init_fzc_rdc_tbl(p_nxge_t nxge, int rdc_tbl)
836 {
837 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
838 	nx_rdc_tbl_t	*table;
839 	nxge_rdc_grp_t	*group;
840 	npi_handle_t	handle;
841 
842 	npi_status_t	rs = NPI_SUCCESS;
843 	nxge_status_t	status = NXGE_OK;
844 
845 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "==> nxge_init_fzc_rdc_tbl(%d)", table));
846 
847 	group = &nxge->pt_config.rdc_grps[rdc_tbl];
848 
849 	/* This RDC table must have been previously bound to <nxge>. */
850 	MUTEX_ENTER(&nhd->lock);
851 	table = &nhd->rdc_tbl[rdc_tbl];
852 	if (table->nxge != (uintptr_t)nxge) {
853 		MUTEX_EXIT(&nhd->lock);
854 		NXGE_ERROR_MSG((nxge, DMA_CTL,
855 		    "nxge_init_fzc_rdc_tbl(%d): not owner", table));
856 		return (NXGE_ERROR);
857 	} else {
858 		table->map = group->map;
859 	}
860 	MUTEX_EXIT(&nhd->lock);
861 
862 	handle = NXGE_DEV_NPI_HANDLE(nxge);
863 
864 	rs = npi_rxdma_rdc_table_config(handle, rdc_tbl,
865 	    group->map, group->max_rdcs);
866 
867 	if (rs != NPI_SUCCESS) {
868 		status = NXGE_ERROR | rs;
869 	}
870 
871 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "<== nxge_init_fzc_rdc_tbl(%d)", table));
872 	return (status);
873 }
874 
875 static
876 int
877 rdc_tbl_bind(p_nxge_t nxge, int rdc_tbl)
878 {
879 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
880 	nx_rdc_tbl_t *table;
881 	int i;
882 
883 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "==> nxge_fzc_rdc_tbl_bind"));
884 
885 	MUTEX_ENTER(&nhd->lock);
886 	/* is the caller asking for a particular table? */
887 	if (rdc_tbl >= 0 && rdc_tbl < NXGE_MAX_RDC_GROUPS) {
888 		table = &nhd->rdc_tbl[rdc_tbl];
889 		if (table->nxge == 0) {
890 			table->nxge = (uintptr_t)nxge; /* It is now bound. */
891 			NXGE_DEBUG_MSG((nxge, DMA_CTL,
892 			    "<== nxge_fzc_rdc_tbl_bind(%d)", rdc_tbl));
893 			MUTEX_EXIT(&nhd->lock);
894 			return (rdc_tbl);
895 		}
896 	} else {	/* The caller will take any old RDC table. */
897 		for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
898 			nx_rdc_tbl_t *table = &nhd->rdc_tbl[i];
899 			if (table->nxge == 0) {
900 				table->nxge = (uintptr_t)nxge;
901 				/* It is now bound. */
902 				MUTEX_EXIT(&nhd->lock);
903 				NXGE_DEBUG_MSG((nxge, DMA_CTL,
904 				    "<== nxge_fzc_rdc_tbl_bind: %d", i));
905 				return (i);
906 			}
907 		}
908 	}
909 	MUTEX_EXIT(&nhd->lock);
910 
911 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_fzc_rdc_tbl_bind"));
912 
913 	return (-EBUSY);	/* RDC tables are bound. */
914 }
915 
916 int
917 nxge_fzc_rdc_tbl_bind(
918 	nxge_t *nxge,
919 	int grp_index,
920 	int acceptNoSubstitutes)
921 {
922 	nxge_hw_pt_cfg_t *hardware;
923 	int index;
924 
925 	hardware = &nxge->pt_config.hw_config;
926 
927 	if ((index = rdc_tbl_bind(nxge, grp_index)) < 0) {
928 		if (acceptNoSubstitutes)
929 			return (index);
930 		index = rdc_tbl_bind(nxge, grp_index);
931 		if (index < 0) {
932 			NXGE_ERROR_MSG((nxge, OBP_CTL,
933 			    "nxge_fzc_rdc_tbl_init: "
934 			    "there are no free RDC tables!"));
935 			return (index);
936 		}
937 	}
938 
939 	hardware->grpids[index] = nxge->function_num + 256;
940 
941 	return (index);
942 }
943 
944 int
945 nxge_fzc_rdc_tbl_unbind(p_nxge_t nxge, int rdc_tbl)
946 {
947 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
948 	nx_rdc_tbl_t *table;
949 
950 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "==> nxge_fzc_rdc_tbl_unbind(%d)",
951 	    rdc_tbl));
952 
953 	table = &nhd->rdc_tbl[rdc_tbl];
954 	if (table->nxge != (uintptr_t)nxge) {
955 		NXGE_ERROR_MSG((nxge, DMA_CTL,
956 		    "nxge_fzc_rdc_tbl_unbind(%d): func%d not owner",
957 		    nxge->function_num, rdc_tbl));
958 		return (EINVAL);
959 	} else {
960 		bzero(table, sizeof (*table));
961 	}
962 
963 	NXGE_DEBUG_MSG((nxge, DMA_CTL, "<== nxge_fzc_rdc_tbl_unbind(%d)",
964 	    rdc_tbl));
965 
966 	return (0);
967 }
968 
969 nxge_status_t
970 nxge_init_fzc_rxdma_port(p_nxge_t nxgep)
971 {
972 	npi_handle_t		handle;
973 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
974 	p_nxge_hw_pt_cfg_t	p_cfgp;
975 	hostinfo_t 		hostinfo;
976 	int			i;
977 	npi_status_t		rs = NPI_SUCCESS;
978 	p_nxge_class_pt_cfg_t 	p_class_cfgp;
979 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_init_fzc_rxdma_port"));
980 
981 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
982 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
983 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
984 	/*
985 	 * Initialize the port scheduler DRR weight.
986 	 * npi_rxdma_cfg_port_ddr_weight();
987 	 */
988 
989 	if ((nxgep->mac.portmode == PORT_1G_COPPER) ||
990 	    (nxgep->mac.portmode == PORT_1G_FIBER) ||
991 	    (nxgep->mac.portmode == PORT_1G_SERDES)) {
992 		rs = npi_rxdma_cfg_port_ddr_weight(handle,
993 		    nxgep->function_num, NXGE_RX_DRR_WT_1G);
994 		if (rs != NPI_SUCCESS) {
995 			return (NXGE_ERROR | rs);
996 		}
997 	}
998 
999 	/* Program the default RDC of a port */
1000 	rs = npi_rxdma_cfg_default_port_rdc(handle, nxgep->function_num,
1001 	    p_cfgp->def_rdc);
1002 	if (rs != NPI_SUCCESS) {
1003 		return (NXGE_ERROR | rs);
1004 	}
1005 
1006 	/*
1007 	 * Configure the MAC host info table with RDC tables
1008 	 */
1009 	hostinfo.value = 0;
1010 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1011 	for (i = 0; i < p_cfgp->max_macs; i++) {
1012 		hostinfo.bits.w0.rdc_tbl_num = p_cfgp->def_mac_rxdma_grpid;
1013 		hostinfo.bits.w0.mac_pref = p_cfgp->mac_pref;
1014 		if (p_class_cfgp->mac_host_info[i].flag) {
1015 			hostinfo.bits.w0.rdc_tbl_num =
1016 				p_class_cfgp->mac_host_info[i].rdctbl;
1017 			hostinfo.bits.w0.mac_pref =
1018 				p_class_cfgp->mac_host_info[i].mpr_npr;
1019 		}
1020 
1021 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
1022 				nxgep->function_num, i, &hostinfo);
1023 		if (rs != NPI_SUCCESS)
1024 			return (NXGE_ERROR | rs);
1025 	}
1026 
1027 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1028 		"<== nxge_init_fzc_rxdma_port rs 0x%08x", rs));
1029 
1030 	return (NXGE_OK);
1031 
1032 }
1033 
1034 nxge_status_t
1035 nxge_fzc_dmc_def_port_rdc(p_nxge_t nxgep, uint8_t port, uint16_t rdc)
1036 {
1037 	npi_status_t rs = NPI_SUCCESS;
1038 	rs = npi_rxdma_cfg_default_port_rdc(nxgep->npi_reg_handle,
1039 				    port, rdc);
1040 	if (rs & NPI_FAILURE)
1041 		return (NXGE_ERROR | rs);
1042 	return (NXGE_OK);
1043 }
1044 
1045 /*
1046  * nxge_init_fzc_tdc_pages
1047  *
1048  *	Configure a TDC's logical pages.
1049  *
1050  *	This function is executed by the service domain, on behalf of
1051  *	a guest domain, to whom this TDC has been loaned.
1052  *
1053  * Arguments:
1054  * 	nxgep
1055  * 	channel		The channel to initialize.
1056  * 	page0		Logical page 0 definition.
1057  * 	page1		Logical page 1 definition.
1058  *
1059  * Notes:
1060  *	I think that this function can be called from any
1061  *	domain, but I need to check.
1062  *
1063  * NPI/NXGE function calls:
1064  *	hv_niu_tx_logical_page_conf()
1065  *	hv_niu_tx_logical_page_info()
1066  *
1067  * Context:
1068  *	Any domain
1069  */
1070 nxge_status_t
1071 nxge_init_fzc_tdc_pages(
1072 	p_nxge_t nxgep,
1073 	uint16_t channel,
1074 	dma_log_page_t *page0,
1075 	dma_log_page_t *page1)
1076 {
1077 	npi_handle_t handle;
1078 	npi_status_t rs;
1079 
1080 	log_page_hdl_t page_handle;
1081 
1082 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1083 		"==> nxge_init_fzc_txdma_channel_pages"));
1084 
1085 #ifndef	NIU_HV_WORKAROUND
1086 	if (nxgep->niu_type == N2_NIU) {
1087 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1088 			"<== nxge_init_fzc_tdc_pages: "
1089 			"N2_NIU: no need to set txdma logical pages"));
1090 		return (NXGE_OK);
1091 	}
1092 #else
1093 	if (nxgep->niu_type == N2_NIU) {
1094 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1095 			"<== nxge_init_fzc_tdc_pages: "
1096 			"N2_NIU: NEED to set txdma logical pages"));
1097 	}
1098 #endif
1099 
1100 	/*
1101 	 * Initialize logical page 1.
1102 	 */
1103 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1104 	if ((rs = npi_txdma_log_page_set(handle, channel, page0))
1105 	    != NPI_SUCCESS)
1106 		return (NXGE_ERROR | rs);
1107 
1108 	/*
1109 	 * Initialize logical page 2.
1110 	 */
1111 	if ((rs = npi_txdma_log_page_set(handle, channel, page1))
1112 	    != NPI_SUCCESS)
1113 		return (NXGE_ERROR | rs);
1114 
1115 	/*
1116 	 * Initialize the page handle.
1117 	 * (In the current driver, this is always set to 0.)
1118 	 */
1119 	page_handle.value = 0;
1120 	rs = npi_txdma_log_page_handle_set(handle, channel, &page_handle);
1121 	if (rs == NPI_SUCCESS) {
1122 		return (NXGE_OK);
1123 	} else {
1124 		return (NXGE_ERROR | rs);
1125 	}
1126 }
1127 
1128 nxge_status_t
1129 nxge_init_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
1130 	p_tx_ring_t tx_ring_p)
1131 {
1132 	npi_handle_t		handle;
1133 	dma_log_page_t		cfg;
1134 	npi_status_t		rs = NPI_SUCCESS;
1135 
1136 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1137 		"==> nxge_init_fzc_txdma_channel_pages"));
1138 
1139 #ifndef	NIU_HV_WORKAROUND
1140 	if (nxgep->niu_type == N2_NIU) {
1141 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1142 			"<== nxge_init_fzc_txdma_channel_pages: "
1143 			"N2_NIU: no need to set txdma logical pages"));
1144 		return (NXGE_OK);
1145 	}
1146 #else
1147 	if (nxgep->niu_type == N2_NIU) {
1148 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1149 			"<== nxge_init_fzc_txdma_channel_pages: "
1150 			"N2_NIU: NEED to set txdma logical pages"));
1151 	}
1152 #endif
1153 
1154 	/*
1155 	 * Initialize logical page 1.
1156 	 */
1157 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1158 	cfg.func_num = nxgep->function_num;
1159 	cfg.page_num = 0;
1160 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page0;
1161 	cfg.value = tx_ring_p->page_value_1.value;
1162 	cfg.mask = tx_ring_p->page_mask_1.value;
1163 	cfg.reloc = tx_ring_p->page_reloc_1.value;
1164 
1165 	rs = npi_txdma_log_page_set(handle, channel,
1166 		(p_dma_log_page_t)&cfg);
1167 	if (rs != NPI_SUCCESS) {
1168 		return (NXGE_ERROR | rs);
1169 	}
1170 
1171 	/*
1172 	 * Initialize logical page 2.
1173 	 */
1174 	cfg.page_num = 1;
1175 	cfg.valid = tx_ring_p->page_valid.bits.ldw.page1;
1176 	cfg.value = tx_ring_p->page_value_2.value;
1177 	cfg.mask = tx_ring_p->page_mask_2.value;
1178 	cfg.reloc = tx_ring_p->page_reloc_2.value;
1179 
1180 	rs = npi_txdma_log_page_set(handle, channel, &cfg);
1181 	if (rs != NPI_SUCCESS) {
1182 		return (NXGE_ERROR | rs);
1183 	}
1184 
1185 	/* Initialize the page handle */
1186 	rs = npi_txdma_log_page_handle_set(handle, channel,
1187 			&tx_ring_p->page_hdl);
1188 
1189 	if (rs == NPI_SUCCESS) {
1190 		return (NXGE_OK);
1191 	} else {
1192 		return (NXGE_ERROR | rs);
1193 	}
1194 }
1195 
1196 
1197 nxge_status_t
1198 nxge_init_fzc_txdma_channel_drr(p_nxge_t nxgep, uint16_t channel,
1199 	p_tx_ring_t tx_ring_p)
1200 {
1201 	npi_status_t	rs = NPI_SUCCESS;
1202 	npi_handle_t	handle;
1203 
1204 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1205 	rs = npi_txc_dma_max_burst_set(handle, channel,
1206 			tx_ring_p->max_burst.value);
1207 	if (rs == NPI_SUCCESS) {
1208 		return (NXGE_OK);
1209 	} else {
1210 		return (NXGE_ERROR | rs);
1211 	}
1212 }
1213 
1214 nxge_status_t
1215 nxge_fzc_sys_err_mask_set(p_nxge_t nxgep, uint64_t mask)
1216 {
1217 	npi_status_t	rs = NPI_SUCCESS;
1218 	npi_handle_t	handle;
1219 
1220 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1221 	rs = npi_fzc_sys_err_mask_set(handle, mask);
1222 	if (rs == NPI_SUCCESS) {
1223 		return (NXGE_OK);
1224 	} else {
1225 		return (NXGE_ERROR | rs);
1226 	}
1227 }
1228 
1229 /*
1230  * nxge_init_hv_fzc_txdma_channel_pages
1231  *
1232  *	Configure a TDC's logical pages.
1233  *
1234  * Arguments:
1235  * 	nxgep
1236  * 	channel		The channel to initialize.
1237  * 	tx_ring_p	The transmit ring.
1238  *
1239  * Notes:
1240  *	I think that this function can be called from any
1241  *	domain, but I need to check.
1242  *
1243  * NPI/NXGE function calls:
1244  *	hv_niu_tx_logical_page_conf()
1245  *	hv_niu_tx_logical_page_info()
1246  *
1247  * Context:
1248  *	Any domain
1249  */
1250 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
1251 nxge_status_t
1252 nxge_init_hv_fzc_txdma_channel_pages(p_nxge_t nxgep, uint16_t channel,
1253 	p_tx_ring_t tx_ring_p)
1254 {
1255 	int			err;
1256 	uint64_t		hverr;
1257 #ifdef	DEBUG
1258 	uint64_t		ra, size;
1259 #endif
1260 
1261 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1262 		"==> nxge_init_hv_fzc_txdma_channel_pages"));
1263 
1264 	if (tx_ring_p->hv_set) {
1265 		return (NXGE_OK);
1266 	}
1267 
1268 	/*
1269 	 * Initialize logical page 1 for data buffers.
1270 	 */
1271 	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
1272 			(uint64_t)0,
1273 			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1274 			tx_ring_p->hv_tx_buf_ioaddr_size);
1275 
1276 	err = (nxge_status_t)nxge_herr2kerr(hverr);
1277 	if (err != 0) {
1278 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1279 			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1280 			"error status 0x%x "
1281 			"(page 0 data buf) hverr 0x%llx "
1282 			"ioaddr_pp $%p "
1283 			"size 0x%llx ",
1284 			channel,
1285 			err,
1286 			hverr,
1287 			tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1288 			tx_ring_p->hv_tx_buf_ioaddr_size));
1289 		return (NXGE_ERROR | err);
1290 	}
1291 
1292 #ifdef	DEBUG
1293 	ra = size = 0;
1294 	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
1295 			(uint64_t)0,
1296 			&ra,
1297 			&size);
1298 
1299 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1300 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1301 		"ok status 0x%x "
1302 		"(page 0 data buf) hverr 0x%llx "
1303 		"set ioaddr_pp $%p "
1304 		"set size 0x%llx "
1305 		"get ra ioaddr_pp $%p "
1306 		"get size 0x%llx ",
1307 		channel,
1308 		err,
1309 		hverr,
1310 		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1311 		tx_ring_p->hv_tx_buf_ioaddr_size,
1312 		ra,
1313 		size));
1314 #endif
1315 
1316 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1317 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1318 		"(page 0 data buf) hverr 0x%llx "
1319 		"ioaddr_pp $%p "
1320 		"size 0x%llx ",
1321 		channel,
1322 		hverr,
1323 		tx_ring_p->hv_tx_buf_base_ioaddr_pp,
1324 		tx_ring_p->hv_tx_buf_ioaddr_size));
1325 
1326 	/*
1327 	 * Initialize logical page 2 for control buffers.
1328 	 */
1329 	hverr = hv_niu_tx_logical_page_conf((uint64_t)channel,
1330 			(uint64_t)1,
1331 			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1332 			tx_ring_p->hv_tx_cntl_ioaddr_size);
1333 
1334 	err = (nxge_status_t)nxge_herr2kerr(hverr);
1335 
1336 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1337 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d"
1338 		"ok status 0x%x "
1339 		"(page 1 cntl buf) hverr 0x%llx "
1340 		"ioaddr_pp $%p "
1341 		"size 0x%llx ",
1342 		channel,
1343 		err,
1344 		hverr,
1345 		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1346 		tx_ring_p->hv_tx_cntl_ioaddr_size));
1347 
1348 	if (err != 0) {
1349 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1350 			"<== nxge_init_hv_fzc_txdma_channel_pages: channel %d"
1351 			"error status 0x%x "
1352 			"(page 1 cntl buf) hverr 0x%llx "
1353 			"ioaddr_pp $%p "
1354 			"size 0x%llx ",
1355 			channel,
1356 			err,
1357 			hverr,
1358 			tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1359 			tx_ring_p->hv_tx_cntl_ioaddr_size));
1360 		return (NXGE_ERROR | err);
1361 	}
1362 
1363 #ifdef	DEBUG
1364 	ra = size = 0;
1365 	hverr = hv_niu_tx_logical_page_info((uint64_t)channel,
1366 			(uint64_t)1,
1367 			&ra,
1368 			&size);
1369 
1370 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1371 		"==> nxge_init_hv_fzc_txdma_channel_pages: channel %d "
1372 		"(page 1 cntl buf) hverr 0x%llx "
1373 		"set ioaddr_pp $%p "
1374 		"set size 0x%llx "
1375 		"get ra ioaddr_pp $%p "
1376 		"get size 0x%llx ",
1377 		channel,
1378 		hverr,
1379 		tx_ring_p->hv_tx_cntl_base_ioaddr_pp,
1380 		tx_ring_p->hv_tx_cntl_ioaddr_size,
1381 		ra,
1382 		size));
1383 #endif
1384 
1385 	tx_ring_p->hv_set = B_TRUE;
1386 
1387 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1388 		"<== nxge_init_hv_fzc_txdma_channel_pages"));
1389 
1390 	return (NXGE_OK);
1391 }
1392 
1393 /*ARGSUSED*/
1394 nxge_status_t
1395 nxge_init_hv_fzc_rxdma_channel_pages(p_nxge_t nxgep,
1396 		uint16_t channel, p_rx_rbr_ring_t rbrp)
1397 {
1398 	int			err;
1399 	uint64_t		hverr;
1400 #ifdef	DEBUG
1401 	uint64_t		ra, size;
1402 #endif
1403 
1404 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1405 		"==> nxge_init_hv_fzc_rxdma_channel_pages"));
1406 
1407 	if (rbrp->hv_set) {
1408 		return (NXGE_OK);
1409 	}
1410 
1411 	/* Initialize data buffers for page 0 */
1412 	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
1413 			(uint64_t)0,
1414 			rbrp->hv_rx_buf_base_ioaddr_pp,
1415 			rbrp->hv_rx_buf_ioaddr_size);
1416 	err = (nxge_status_t)nxge_herr2kerr(hverr);
1417 	if (err != 0) {
1418 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1419 			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
1420 			"error status 0x%x "
1421 			"(page 0 data buf) hverr 0x%llx "
1422 			"ioaddr_pp $%p "
1423 			"size 0x%llx ",
1424 			channel,
1425 			err,
1426 			hverr,
1427 			rbrp->hv_rx_buf_base_ioaddr_pp,
1428 			rbrp->hv_rx_buf_ioaddr_size));
1429 
1430 		return (NXGE_ERROR | err);
1431 	}
1432 
1433 #ifdef	DEBUG
1434 	ra = size = 0;
1435 	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
1436 			(uint64_t)0,
1437 			&ra,
1438 			&size);
1439 
1440 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1441 		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
1442 		"ok status 0x%x "
1443 		"(page 0 data buf) hverr 0x%llx "
1444 		"set databuf ioaddr_pp $%p "
1445 		"set databuf size 0x%llx "
1446 		"get databuf ra ioaddr_pp %p "
1447 		"get databuf size 0x%llx",
1448 		channel,
1449 		err,
1450 		hverr,
1451 		rbrp->hv_rx_buf_base_ioaddr_pp,
1452 		rbrp->hv_rx_buf_ioaddr_size,
1453 		ra,
1454 		size));
1455 #endif
1456 
1457 	/* Initialize control buffers for logical page 1.  */
1458 	hverr = hv_niu_rx_logical_page_conf((uint64_t)channel,
1459 			(uint64_t)1,
1460 			rbrp->hv_rx_cntl_base_ioaddr_pp,
1461 			rbrp->hv_rx_cntl_ioaddr_size);
1462 
1463 	err = (nxge_status_t)nxge_herr2kerr(hverr);
1464 	if (err != 0) {
1465 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1466 			"<== nxge_init_hv_fzc_rxdma_channel_pages: channel %d"
1467 			"error status 0x%x "
1468 			"(page 1 cntl buf) hverr 0x%llx "
1469 			"ioaddr_pp $%p "
1470 			"size 0x%llx ",
1471 			channel,
1472 			err,
1473 			hverr,
1474 			rbrp->hv_rx_buf_base_ioaddr_pp,
1475 			rbrp->hv_rx_buf_ioaddr_size));
1476 
1477 		return (NXGE_ERROR | err);
1478 	}
1479 
1480 #ifdef	DEBUG
1481 	ra = size = 0;
1482 	(void) hv_niu_rx_logical_page_info((uint64_t)channel,
1483 			(uint64_t)1,
1484 			&ra,
1485 			&size);
1486 
1487 
1488 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1489 		"==> nxge_init_hv_fzc_rxdma_channel_pages: channel %d "
1490 		"error status 0x%x "
1491 		"(page 1 cntl buf) hverr 0x%llx "
1492 		"set cntl ioaddr_pp $%p "
1493 		"set cntl size 0x%llx "
1494 		"get cntl ioaddr_pp $%p "
1495 		"get cntl size 0x%llx ",
1496 		channel,
1497 		err,
1498 		hverr,
1499 		rbrp->hv_rx_cntl_base_ioaddr_pp,
1500 		rbrp->hv_rx_cntl_ioaddr_size,
1501 		ra,
1502 		size));
1503 #endif
1504 
1505 	rbrp->hv_set = B_FALSE;
1506 
1507 	NXGE_DEBUG_MSG((nxgep, RX_CTL,
1508 		"<== nxge_init_hv_fzc_rxdma_channel_pages"));
1509 
1510 	return (NXGE_OK);
1511 }
1512 
1513 /*
1514  * Map hypervisor error code to errno. Only
1515  * H_ENORADDR, H_EBADALIGN and H_EINVAL are meaningful
1516  * for niu driver. Any other error codes are mapped to EINVAL.
1517  */
1518 static int
1519 nxge_herr2kerr(uint64_t hv_errcode)
1520 {
1521 	int	s_errcode;
1522 
1523 	switch (hv_errcode) {
1524 	case H_ENORADDR:
1525 	case H_EBADALIGN:
1526 		s_errcode = EFAULT;
1527 		break;
1528 	case H_EOK:
1529 		s_errcode = 0;
1530 		break;
1531 	default:
1532 		s_errcode = EINVAL;
1533 		break;
1534 	}
1535 	return (s_errcode);
1536 }
1537 
1538 #endif	/* sun4v and NIU_LP_WORKAROUND */
1539