nxge_txdma.c (75d94465) nxge_txdma.c (e3d11eee)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#include <sys/nxge/nxge_impl.h>
28#include <sys/nxge/nxge_txdma.h>
29#include <sys/nxge/nxge_hio.h>
30#include <npi_tx_rd64.h>
31#include <npi_tx_wr64.h>
32#include <sys/llc1.h>
33
34uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
35uint32_t nxge_tx_minfree = 64;
36uint32_t nxge_tx_intr_thres = 0;
37uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
38uint32_t nxge_tx_tiny_pack = 1;
39uint32_t nxge_tx_use_bcopy = 1;
40
41extern uint32_t nxge_tx_ring_size;
42extern uint32_t nxge_bcopy_thresh;
43extern uint32_t nxge_dvma_thresh;
44extern uint32_t nxge_dma_stream_thresh;
45extern dma_method_t nxge_force_dma;
46extern uint32_t nxge_cksum_offload;
47
48/* Device register access attributes for PIO. */
49extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
50/* Device descriptor access attributes for DMA. */
51extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
52/* Device buffer access attributes for DMA. */
53extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
54extern ddi_dma_attr_t nxge_desc_dma_attr;
55extern ddi_dma_attr_t nxge_tx_dma_attr;
56
57extern void nxge_tx_ring_task(void *arg);
58
59static nxge_status_t nxge_map_txdma(p_nxge_t, int);
60
61static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
62
63static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
64 p_nxge_dma_common_t *, p_tx_ring_t *,
65 uint32_t, p_nxge_dma_common_t *,
66 p_tx_mbox_t *);
67static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
68
69static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
71static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
72
73static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
74 p_nxge_dma_common_t *, p_tx_ring_t,
75 p_tx_mbox_t *);
76static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
77 p_tx_ring_t, p_tx_mbox_t);
78
79static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
80 p_tx_ring_t, p_tx_mbox_t);
81static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
82
83static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
84static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
85 p_nxge_ldv_t, tx_cs_t);
86static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
87static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
88 uint16_t, p_tx_ring_t);
89
90static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
91 p_tx_ring_t ring_p, uint16_t channel);
92
93nxge_status_t
94nxge_init_txdma_channels(p_nxge_t nxgep)
95{
96 nxge_grp_set_t *set = &nxgep->tx_set;
97 int i, tdc, count;
98 nxge_grp_t *group;
99 dc_map_t map;
100 int dev_gindex;
101
102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
103
104 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
105 if ((1 << i) & set->lg.map) {
106 group = set->group[i];
107 dev_gindex =
108 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
109 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
110 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
111 if ((1 << tdc) & map) {
112 if ((nxge_grp_dc_add(nxgep,
113 group, VP_BOUND_TX, tdc)))
114 goto init_txdma_channels_exit;
115 }
116 }
117 }
118 if (++count == set->lg.count)
119 break;
120 }
121
122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
123 return (NXGE_OK);
124
125init_txdma_channels_exit:
126 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
127 if ((1 << i) & set->lg.map) {
128 group = set->group[i];
129 dev_gindex =
130 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
131 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
132 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
133 if ((1 << tdc) & map) {
134 nxge_grp_dc_remove(nxgep,
135 VP_BOUND_TX, tdc);
136 }
137 }
138 }
139 if (++count == set->lg.count)
140 break;
141 }
142
143 return (NXGE_ERROR);
144
145}
146
147nxge_status_t
148nxge_init_txdma_channel(
149 p_nxge_t nxge,
150 int channel)
151{
152 nxge_status_t status;
153
154 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
155
156 status = nxge_map_txdma(nxge, channel);
157 if (status != NXGE_OK) {
158 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
159 "<== nxge_init_txdma_channel: status 0x%x", status));
160 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
161 return (status);
162 }
163
164 status = nxge_txdma_hw_start(nxge, channel);
165 if (status != NXGE_OK) {
166 (void) nxge_unmap_txdma_channel(nxge, channel);
167 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
168 return (status);
169 }
170
171 if (!nxge->statsp->tdc_ksp[channel])
172 nxge_setup_tdc_kstats(nxge, channel);
173
174 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
175
176 return (status);
177}
178
179void
180nxge_uninit_txdma_channels(p_nxge_t nxgep)
181{
182 nxge_grp_set_t *set = &nxgep->tx_set;
183 int tdc;
184
185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
186
187 if (set->owned.map == 0) {
188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
189 "nxge_uninit_txdma_channels: no channels"));
190 return;
191 }
192
193 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
194 if ((1 << tdc) & set->owned.map) {
195 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
196 }
197 }
198
199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
200}
201
202void
203nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
204{
205 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
206
207 if (nxgep->statsp->tdc_ksp[channel]) {
208 kstat_delete(nxgep->statsp->tdc_ksp[channel]);
209 nxgep->statsp->tdc_ksp[channel] = 0;
210 }
211
212 if (nxge_txdma_stop_channel(nxgep, channel) != NXGE_OK)
213 goto nxge_uninit_txdma_channel_exit;
214
215 nxge_unmap_txdma_channel(nxgep, channel);
216
217nxge_uninit_txdma_channel_exit:
218 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_uninit_txdma_channel"));
219}
220
221void
222nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
223 uint32_t entries, uint32_t size)
224{
225 size_t tsize;
226 *dest_p = *src_p;
227 tsize = size * entries;
228 dest_p->alength = tsize;
229 dest_p->nblocks = entries;
230 dest_p->block_size = size;
231 dest_p->offset += tsize;
232
233 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
234 src_p->alength -= tsize;
235 src_p->dma_cookie.dmac_laddress += tsize;
236 src_p->dma_cookie.dmac_size -= tsize;
237}
238
239/*
240 * nxge_reset_txdma_channel
241 *
242 * Reset a TDC.
243 *
244 * Arguments:
245 * nxgep
246 * channel The channel to reset.
247 * reg_data The current TX_CS.
248 *
249 * Notes:
250 *
251 * NPI/NXGE function calls:
252 * npi_txdma_channel_reset()
253 * npi_txdma_channel_control()
254 *
255 * Registers accessed:
256 * TX_CS DMC+0x40028 Transmit Control And Status
257 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick
258 *
259 * Context:
260 * Any domain
261 */
262nxge_status_t
263nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
264{
265 npi_status_t rs = NPI_SUCCESS;
266 nxge_status_t status = NXGE_OK;
267 npi_handle_t handle;
268
269 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
270
271 handle = NXGE_DEV_NPI_HANDLE(nxgep);
272 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
273 rs = npi_txdma_channel_reset(handle, channel);
274 } else {
275 rs = npi_txdma_channel_control(handle, TXDMA_RESET,
276 channel);
277 }
278
279 if (rs != NPI_SUCCESS) {
280 status = NXGE_ERROR | rs;
281 }
282
283 /*
284 * Reset the tail (kick) register to 0.
285 * (Hardware will not reset it. Tx overflow fatal
286 * error if tail is not set to 0 after reset!
287 */
288 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
289
290 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
291 return (status);
292}
293
294/*
295 * nxge_init_txdma_channel_event_mask
296 *
297 * Enable interrupts for a set of events.
298 *
299 * Arguments:
300 * nxgep
301 * channel The channel to map.
302 * mask_p The events to enable.
303 *
304 * Notes:
305 *
306 * NPI/NXGE function calls:
307 * npi_txdma_event_mask()
308 *
309 * Registers accessed:
310 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask
311 *
312 * Context:
313 * Any domain
314 */
315nxge_status_t
316nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
317 p_tx_dma_ent_msk_t mask_p)
318{
319 npi_handle_t handle;
320 npi_status_t rs = NPI_SUCCESS;
321 nxge_status_t status = NXGE_OK;
322
323 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
324 "<== nxge_init_txdma_channel_event_mask"));
325
326 handle = NXGE_DEV_NPI_HANDLE(nxgep);
327 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
328 if (rs != NPI_SUCCESS) {
329 status = NXGE_ERROR | rs;
330 }
331
332 return (status);
333}
334
335/*
336 * nxge_init_txdma_channel_cntl_stat
337 *
338 * Stop a TDC. If at first we don't succeed, inject an error.
339 *
340 * Arguments:
341 * nxgep
342 * channel The channel to stop.
343 *
344 * Notes:
345 *
346 * NPI/NXGE function calls:
347 * npi_txdma_control_status()
348 *
349 * Registers accessed:
350 * TX_CS DMC+0x40028 Transmit Control And Status
351 *
352 * Context:
353 * Any domain
354 */
355nxge_status_t
356nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
357 uint64_t reg_data)
358{
359 npi_handle_t handle;
360 npi_status_t rs = NPI_SUCCESS;
361 nxge_status_t status = NXGE_OK;
362
363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
364 "<== nxge_init_txdma_channel_cntl_stat"));
365
366 handle = NXGE_DEV_NPI_HANDLE(nxgep);
367 rs = npi_txdma_control_status(handle, OP_SET, channel,
368 (p_tx_cs_t)&reg_data);
369
370 if (rs != NPI_SUCCESS) {
371 status = NXGE_ERROR | rs;
372 }
373
374 return (status);
375}
376
377/*
378 * nxge_enable_txdma_channel
379 *
380 * Enable a TDC.
381 *
382 * Arguments:
383 * nxgep
384 * channel The channel to enable.
385 * tx_desc_p channel's transmit descriptor ring.
386 * mbox_p channel's mailbox,
387 *
388 * Notes:
389 *
390 * NPI/NXGE function calls:
391 * npi_txdma_ring_config()
392 * npi_txdma_mbox_config()
393 * npi_txdma_channel_init_enable()
394 *
395 * Registers accessed:
396 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration
397 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High
398 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low
399 * TX_CS DMC+0x40028 Transmit Control And Status
400 *
401 * Context:
402 * Any domain
403 */
404nxge_status_t
405nxge_enable_txdma_channel(p_nxge_t nxgep,
406 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
407{
408 npi_handle_t handle;
409 npi_status_t rs = NPI_SUCCESS;
410 nxge_status_t status = NXGE_OK;
411
412 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
413
414 handle = NXGE_DEV_NPI_HANDLE(nxgep);
415 /*
416 * Use configuration data composed at init time.
417 * Write to hardware the transmit ring configurations.
418 */
419 rs = npi_txdma_ring_config(handle, OP_SET, channel,
420 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
421
422 if (rs != NPI_SUCCESS) {
423 return (NXGE_ERROR | rs);
424 }
425
426 if (isLDOMguest(nxgep)) {
427 /* Add interrupt handler for this channel. */
428 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
429 return (NXGE_ERROR);
430 }
431
432 /* Write to hardware the mailbox */
433 rs = npi_txdma_mbox_config(handle, OP_SET, channel,
434 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
435
436 if (rs != NPI_SUCCESS) {
437 return (NXGE_ERROR | rs);
438 }
439
440 /* Start the DMA engine. */
441 rs = npi_txdma_channel_init_enable(handle, channel);
442
443 if (rs != NPI_SUCCESS) {
444 return (NXGE_ERROR | rs);
445 }
446
447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
448
449 return (status);
450}
451
452void
453nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
454 boolean_t l4_cksum, int pkt_len, uint8_t npads,
455 p_tx_pkt_hdr_all_t pkthdrp,
456 t_uscalar_t start_offset,
457 t_uscalar_t stuff_offset)
458{
459 p_tx_pkt_header_t hdrp;
460 p_mblk_t nmp;
461 uint64_t tmp;
462 size_t mblk_len;
463 size_t iph_len;
464 size_t hdrs_size;
465 uint8_t hdrs_buf[sizeof (struct ether_header) +
466 64 + sizeof (uint32_t)];
467 uint8_t *cursor;
468 uint8_t *ip_buf;
469 uint16_t eth_type;
470 uint8_t ipproto;
471 boolean_t is_vlan = B_FALSE;
472 size_t eth_hdr_size;
473
474 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
475
476 /*
477 * Caller should zero out the headers first.
478 */
479 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
480
481 if (fill_len) {
482 NXGE_DEBUG_MSG((NULL, TX_CTL,
483 "==> nxge_fill_tx_hdr: pkt_len %d "
484 "npads %d", pkt_len, npads));
485 tmp = (uint64_t)pkt_len;
486 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
487 goto fill_tx_header_done;
488 }
489
490 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
491
492 /*
493 * mp is the original data packet (does not include the
494 * Neptune transmit header).
495 */
496 nmp = mp;
497 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
498 "mp $%p b_rptr $%p len %d",
499 mp, nmp->b_rptr, MBLKL(nmp)));
500 /* copy ether_header from mblk to hdrs_buf */
501 cursor = &hdrs_buf[0];
502 tmp = sizeof (struct ether_vlan_header);
503 while ((nmp != NULL) && (tmp > 0)) {
504 size_t buflen;
505 mblk_len = MBLKL(nmp);
506 buflen = min((size_t)tmp, mblk_len);
507 bcopy(nmp->b_rptr, cursor, buflen);
508 cursor += buflen;
509 tmp -= buflen;
510 nmp = nmp->b_cont;
511 }
512
513 nmp = mp;
514 mblk_len = MBLKL(nmp);
515 ip_buf = NULL;
516 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
517 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
518 "ether type 0x%x", eth_type, hdrp->value));
519
520 if (eth_type < ETHERMTU) {
521 tmp = 1ull;
522 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
523 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
524 "value 0x%llx", hdrp->value));
525 if (*(hdrs_buf + sizeof (struct ether_header))
526 == LLC_SNAP_SAP) {
527 eth_type = ntohs(*((uint16_t *)(hdrs_buf +
528 sizeof (struct ether_header) + 6)));
529 NXGE_DEBUG_MSG((NULL, TX_CTL,
530 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
531 eth_type));
532 } else {
533 goto fill_tx_header_done;
534 }
535 } else if (eth_type == VLAN_ETHERTYPE) {
536 tmp = 1ull;
537 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
538
539 eth_type = ntohs(((struct ether_vlan_header *)
540 hdrs_buf)->ether_type);
541 is_vlan = B_TRUE;
542 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
543 "value 0x%llx", hdrp->value));
544 }
545
546 if (!is_vlan) {
547 eth_hdr_size = sizeof (struct ether_header);
548 } else {
549 eth_hdr_size = sizeof (struct ether_vlan_header);
550 }
551
552 switch (eth_type) {
553 case ETHERTYPE_IP:
554 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
555 ip_buf = nmp->b_rptr + eth_hdr_size;
556 mblk_len -= eth_hdr_size;
557 iph_len = ((*ip_buf) & 0x0f);
558 if (mblk_len > (iph_len + sizeof (uint32_t))) {
559 ip_buf = nmp->b_rptr;
560 ip_buf += eth_hdr_size;
561 } else {
562 ip_buf = NULL;
563 }
564
565 }
566 if (ip_buf == NULL) {
567 hdrs_size = 0;
568 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
569 while ((nmp) && (hdrs_size <
570 sizeof (hdrs_buf))) {
571 mblk_len = (size_t)nmp->b_wptr -
572 (size_t)nmp->b_rptr;
573 if (mblk_len >=
574 (sizeof (hdrs_buf) - hdrs_size))
575 mblk_len = sizeof (hdrs_buf) -
576 hdrs_size;
577 bcopy(nmp->b_rptr,
578 &hdrs_buf[hdrs_size], mblk_len);
579 hdrs_size += mblk_len;
580 nmp = nmp->b_cont;
581 }
582 ip_buf = hdrs_buf;
583 ip_buf += eth_hdr_size;
584 iph_len = ((*ip_buf) & 0x0f);
585 }
586
587 ipproto = ip_buf[9];
588
589 tmp = (uint64_t)iph_len;
590 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
591 tmp = (uint64_t)(eth_hdr_size >> 1);
592 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
593
594 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
595 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
596 "tmp 0x%x",
597 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
598 ipproto, tmp));
599 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
600 "value 0x%llx", hdrp->value));
601
602 break;
603
604 case ETHERTYPE_IPV6:
605 hdrs_size = 0;
606 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
607 while ((nmp) && (hdrs_size <
608 sizeof (hdrs_buf))) {
609 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
610 if (mblk_len >=
611 (sizeof (hdrs_buf) - hdrs_size))
612 mblk_len = sizeof (hdrs_buf) -
613 hdrs_size;
614 bcopy(nmp->b_rptr,
615 &hdrs_buf[hdrs_size], mblk_len);
616 hdrs_size += mblk_len;
617 nmp = nmp->b_cont;
618 }
619 ip_buf = hdrs_buf;
620 ip_buf += eth_hdr_size;
621
622 tmp = 1ull;
623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
624
625 tmp = (eth_hdr_size >> 1);
626 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
627
628 /* byte 6 is the next header protocol */
629 ipproto = ip_buf[6];
630
631 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
632 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
633 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
634 ipproto));
635 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
636 "value 0x%llx", hdrp->value));
637
638 break;
639
640 default:
641 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
642 goto fill_tx_header_done;
643 }
644
645 switch (ipproto) {
646 case IPPROTO_TCP:
647 NXGE_DEBUG_MSG((NULL, TX_CTL,
648 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
649 if (l4_cksum) {
650 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
651 hdrp->value |=
652 (((uint64_t)(start_offset >> 1)) <<
653 TX_PKT_HEADER_L4START_SHIFT);
654 hdrp->value |=
655 (((uint64_t)(stuff_offset >> 1)) <<
656 TX_PKT_HEADER_L4STUFF_SHIFT);
657
658 NXGE_DEBUG_MSG((NULL, TX_CTL,
659 "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
660 "value 0x%llx", hdrp->value));
661 }
662
663 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
664 "value 0x%llx", hdrp->value));
665 break;
666
667 case IPPROTO_UDP:
668 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
669 if (l4_cksum) {
670 if (!nxge_cksum_offload) {
671 uint16_t *up;
672 uint16_t cksum;
673 t_uscalar_t stuff_len;
674
675 /*
676 * The checksum field has the
677 * partial checksum.
678 * IP_CSUM() macro calls ip_cksum() which
679 * can add in the partial checksum.
680 */
681 cksum = IP_CSUM(mp, start_offset, 0);
682 stuff_len = stuff_offset;
683 nmp = mp;
684 mblk_len = MBLKL(nmp);
685 while ((nmp != NULL) &&
686 (mblk_len < stuff_len)) {
687 stuff_len -= mblk_len;
688 nmp = nmp->b_cont;
689 if (nmp)
690 mblk_len = MBLKL(nmp);
691 }
692 ASSERT(nmp);
693 up = (uint16_t *)(nmp->b_rptr + stuff_len);
694
695 *up = cksum;
696 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
697 NXGE_DEBUG_MSG((NULL, TX_CTL,
698 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
699 "use sw cksum "
700 "write to $%p cksum 0x%x content up 0x%x",
701 stuff_len,
702 up,
703 cksum,
704 *up));
705 } else {
706 /* Hardware will compute the full checksum */
707 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
708 hdrp->value |=
709 (((uint64_t)(start_offset >> 1)) <<
710 TX_PKT_HEADER_L4START_SHIFT);
711 hdrp->value |=
712 (((uint64_t)(stuff_offset >> 1)) <<
713 TX_PKT_HEADER_L4STUFF_SHIFT);
714
715 NXGE_DEBUG_MSG((NULL, TX_CTL,
716 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
717 " use partial checksum "
718 "cksum 0x%x ",
719 "value 0x%llx",
720 stuff_offset,
721 IP_CSUM(mp, start_offset, 0),
722 hdrp->value));
723 }
724 }
725
726 NXGE_DEBUG_MSG((NULL, TX_CTL,
727 "==> nxge_tx_pkt_hdr_init: UDP"
728 "value 0x%llx", hdrp->value));
729 break;
730
731 default:
732 goto fill_tx_header_done;
733 }
734
735fill_tx_header_done:
736 NXGE_DEBUG_MSG((NULL, TX_CTL,
737 "==> nxge_fill_tx_hdr: pkt_len %d "
738 "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
739
740 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
741}
742
743/*ARGSUSED*/
744p_mblk_t
745nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
746{
747 p_mblk_t newmp = NULL;
748
749 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
750 NXGE_DEBUG_MSG((NULL, TX_CTL,
751 "<== nxge_tx_pkt_header_reserve: allocb failed"));
752 return (NULL);
753 }
754
755 NXGE_DEBUG_MSG((NULL, TX_CTL,
756 "==> nxge_tx_pkt_header_reserve: get new mp"));
757 DB_TYPE(newmp) = M_DATA;
758 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
759 linkb(newmp, mp);
760 newmp->b_rptr -= TX_PKT_HEADER_SIZE;
761
762 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
763 "b_rptr $%p b_wptr $%p",
764 newmp->b_rptr, newmp->b_wptr));
765
766 NXGE_DEBUG_MSG((NULL, TX_CTL,
767 "<== nxge_tx_pkt_header_reserve: use new mp"));
768
769 return (newmp);
770}
771
772int
773nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
774{
775 uint_t nmblks;
776 ssize_t len;
777 uint_t pkt_len;
778 p_mblk_t nmp, bmp, tmp;
779 uint8_t *b_wptr;
780
781 NXGE_DEBUG_MSG((NULL, TX_CTL,
782 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
783 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
784
785 nmp = mp;
786 bmp = mp;
787 nmblks = 0;
788 pkt_len = 0;
789 *tot_xfer_len_p = 0;
790
791 while (nmp) {
792 len = MBLKL(nmp);
793 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
794 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
795 len, pkt_len, nmblks,
796 *tot_xfer_len_p));
797
798 if (len <= 0) {
799 bmp = nmp;
800 nmp = nmp->b_cont;
801 NXGE_DEBUG_MSG((NULL, TX_CTL,
802 "==> nxge_tx_pkt_nmblocks: "
803 "len (0) pkt_len %d nmblks %d",
804 pkt_len, nmblks));
805 continue;
806 }
807
808 *tot_xfer_len_p += len;
809 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
810 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
811 len, pkt_len, nmblks,
812 *tot_xfer_len_p));
813
814 if (len < nxge_bcopy_thresh) {
815 NXGE_DEBUG_MSG((NULL, TX_CTL,
816 "==> nxge_tx_pkt_nmblocks: "
817 "len %d (< thresh) pkt_len %d nmblks %d",
818 len, pkt_len, nmblks));
819 if (pkt_len == 0)
820 nmblks++;
821 pkt_len += len;
822 if (pkt_len >= nxge_bcopy_thresh) {
823 pkt_len = 0;
824 len = 0;
825 nmp = bmp;
826 }
827 } else {
828 NXGE_DEBUG_MSG((NULL, TX_CTL,
829 "==> nxge_tx_pkt_nmblocks: "
830 "len %d (> thresh) pkt_len %d nmblks %d",
831 len, pkt_len, nmblks));
832 pkt_len = 0;
833 nmblks++;
834 /*
835 * Hardware limits the transfer length to 4K.
836 * If len is more than 4K, we need to break
837 * it up to at most 2 more blocks.
838 */
839 if (len > TX_MAX_TRANSFER_LENGTH) {
840 uint32_t nsegs;
841
842 nsegs = 1;
843 NXGE_DEBUG_MSG((NULL, TX_CTL,
844 "==> nxge_tx_pkt_nmblocks: "
845 "len %d pkt_len %d nmblks %d nsegs %d",
846 len, pkt_len, nmblks, nsegs));
847 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
848 ++nsegs;
849 }
850 do {
851 b_wptr = nmp->b_rptr +
852 TX_MAX_TRANSFER_LENGTH;
853 nmp->b_wptr = b_wptr;
854 if ((tmp = dupb(nmp)) == NULL) {
855 return (0);
856 }
857 tmp->b_rptr = b_wptr;
858 tmp->b_wptr = nmp->b_wptr;
859 tmp->b_cont = nmp->b_cont;
860 nmp->b_cont = tmp;
861 nmblks++;
862 if (--nsegs) {
863 nmp = tmp;
864 }
865 } while (nsegs);
866 nmp = tmp;
867 }
868 }
869
870 /*
871 * Hardware limits the transmit gather pointers to 15.
872 */
873 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
874 TX_MAX_GATHER_POINTERS) {
875 NXGE_DEBUG_MSG((NULL, TX_CTL,
876 "==> nxge_tx_pkt_nmblocks: pull msg - "
877 "len %d pkt_len %d nmblks %d",
878 len, pkt_len, nmblks));
879 /* Pull all message blocks from b_cont */
880 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
881 return (0);
882 }
883 freemsg(nmp->b_cont);
884 nmp->b_cont = tmp;
885 pkt_len = 0;
886 }
887 bmp = nmp;
888 nmp = nmp->b_cont;
889 }
890
891 NXGE_DEBUG_MSG((NULL, TX_CTL,
892 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
893 "nmblks %d len %d tot_xfer_len %d",
894 mp->b_rptr, mp->b_wptr, nmblks,
895 MBLKL(mp), *tot_xfer_len_p));
896
897 return (nmblks);
898}
899
900boolean_t
901nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
902{
903 boolean_t status = B_TRUE;
904 p_nxge_dma_common_t tx_desc_dma_p;
905 nxge_dma_common_t desc_area;
906 p_tx_desc_t tx_desc_ring_vp;
907 p_tx_desc_t tx_desc_p;
908 p_tx_desc_t tx_desc_pp;
909 tx_desc_t r_tx_desc;
910 p_tx_msg_t tx_msg_ring;
911 p_tx_msg_t tx_msg_p;
912 npi_handle_t handle;
913 tx_ring_hdl_t tx_head;
914 uint32_t pkt_len;
915 uint_t tx_rd_index;
916 uint16_t head_index, tail_index;
917 uint8_t tdc;
918 boolean_t head_wrap, tail_wrap;
919 p_nxge_tx_ring_stats_t tdc_stats;
920 int rc;
921
922 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
923
924 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
925 (nmblks != 0));
926 NXGE_DEBUG_MSG((nxgep, TX_CTL,
927 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
928 tx_ring_p->descs_pending, nxge_reclaim_pending,
929 nmblks));
930 if (!status) {
931 tx_desc_dma_p = &tx_ring_p->tdc_desc;
932 desc_area = tx_ring_p->tdc_desc;
933 handle = NXGE_DEV_NPI_HANDLE(nxgep);
934 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
935 tx_desc_ring_vp =
936 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
937 tx_rd_index = tx_ring_p->rd_index;
938 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
939 tx_msg_ring = tx_ring_p->tx_msg_ring;
940 tx_msg_p = &tx_msg_ring[tx_rd_index];
941 tdc = tx_ring_p->tdc;
942 tdc_stats = tx_ring_p->tdc_stats;
943 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
944 tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
945 }
946
947 tail_index = tx_ring_p->wr_index;
948 tail_wrap = tx_ring_p->wr_index_wrap;
949
950 NXGE_DEBUG_MSG((nxgep, TX_CTL,
951 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
952 "tail_index %d tail_wrap %d "
953 "tx_desc_p $%p ($%p) ",
954 tdc, tx_rd_index, tail_index, tail_wrap,
955 tx_desc_p, (*(uint64_t *)tx_desc_p)));
956 /*
957 * Read the hardware maintained transmit head
958 * and wrap around bit.
959 */
960 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
961 head_index = tx_head.bits.ldw.head;
962 head_wrap = tx_head.bits.ldw.wrap;
963 NXGE_DEBUG_MSG((nxgep, TX_CTL,
964 "==> nxge_txdma_reclaim: "
965 "tx_rd_index %d tail %d tail_wrap %d "
966 "head %d wrap %d",
967 tx_rd_index, tail_index, tail_wrap,
968 head_index, head_wrap));
969
970 if (head_index == tail_index) {
971 if (TXDMA_RING_EMPTY(head_index, head_wrap,
972 tail_index, tail_wrap) &&
973 (head_index == tx_rd_index)) {
974 NXGE_DEBUG_MSG((nxgep, TX_CTL,
975 "==> nxge_txdma_reclaim: EMPTY"));
976 return (B_TRUE);
977 }
978
979 NXGE_DEBUG_MSG((nxgep, TX_CTL,
980 "==> nxge_txdma_reclaim: Checking "
981 "if ring full"));
982 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
983 tail_wrap)) {
984 NXGE_DEBUG_MSG((nxgep, TX_CTL,
985 "==> nxge_txdma_reclaim: full"));
986 return (B_FALSE);
987 }
988 }
989
990 NXGE_DEBUG_MSG((nxgep, TX_CTL,
991 "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
992
993 tx_desc_pp = &r_tx_desc;
994 while ((tx_rd_index != head_index) &&
995 (tx_ring_p->descs_pending != 0)) {
996
997 NXGE_DEBUG_MSG((nxgep, TX_CTL,
998 "==> nxge_txdma_reclaim: Checking if pending"));
999
1000 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1001 "==> nxge_txdma_reclaim: "
1002 "descs_pending %d ",
1003 tx_ring_p->descs_pending));
1004
1005 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1006 "==> nxge_txdma_reclaim: "
1007 "(tx_rd_index %d head_index %d "
1008 "(tx_desc_p $%p)",
1009 tx_rd_index, head_index,
1010 tx_desc_p));
1011
1012 tx_desc_pp->value = tx_desc_p->value;
1013 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1014 "==> nxge_txdma_reclaim: "
1015 "(tx_rd_index %d head_index %d "
1016 "tx_desc_p $%p (desc value 0x%llx) ",
1017 tx_rd_index, head_index,
1018 tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
1019
1020 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1021 "==> nxge_txdma_reclaim: dump desc:"));
1022
1023 pkt_len = tx_desc_pp->bits.hdw.tr_len;
1024 tdc_stats->obytes += (pkt_len - TX_PKT_HEADER_SIZE);
1025 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
1026 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1027 "==> nxge_txdma_reclaim: pkt_len %d "
1028 "tdc channel %d opackets %d",
1029 pkt_len,
1030 tdc,
1031 tdc_stats->opackets));
1032
1033 if (tx_msg_p->flags.dma_type == USE_DVMA) {
1034 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1035 "tx_desc_p = $%p "
1036 "tx_desc_pp = $%p "
1037 "index = %d",
1038 tx_desc_p,
1039 tx_desc_pp,
1040 tx_ring_p->rd_index));
1041 (void) dvma_unload(tx_msg_p->dvma_handle,
1042 0, -1);
1043 tx_msg_p->dvma_handle = NULL;
1044 if (tx_ring_p->dvma_wr_index ==
1045 tx_ring_p->dvma_wrap_mask) {
1046 tx_ring_p->dvma_wr_index = 0;
1047 } else {
1048 tx_ring_p->dvma_wr_index++;
1049 }
1050 tx_ring_p->dvma_pending--;
1051 } else if (tx_msg_p->flags.dma_type ==
1052 USE_DMA) {
1053 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1054 "==> nxge_txdma_reclaim: "
1055 "USE DMA"));
1056 if (rc = ddi_dma_unbind_handle
1057 (tx_msg_p->dma_handle)) {
1058 cmn_err(CE_WARN, "!nxge_reclaim: "
1059 "ddi_dma_unbind_handle "
1060 "failed. status %d", rc);
1061 }
1062 }
1063 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1064 "==> nxge_txdma_reclaim: count packets"));
1065 /*
1066 * count a chained packet only once.
1067 */
1068 if (tx_msg_p->tx_message != NULL) {
1069 freemsg(tx_msg_p->tx_message);
1070 tx_msg_p->tx_message = NULL;
1071 }
1072
1073 tx_msg_p->flags.dma_type = USE_NONE;
1074 tx_rd_index = tx_ring_p->rd_index;
1075 tx_rd_index = (tx_rd_index + 1) &
1076 tx_ring_p->tx_wrap_mask;
1077 tx_ring_p->rd_index = tx_rd_index;
1078 tx_ring_p->descs_pending--;
1079 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
1080 tx_msg_p = &tx_msg_ring[tx_rd_index];
1081 }
1082
1083 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1084 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1085 if (status) {
1086 (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
1087 1, 0);
1088 }
1089 } else {
1090 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1091 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1092 }
1093
1094 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1095 "<== nxge_txdma_reclaim status = 0x%08x", status));
1096
1097 return (status);
1098}
1099
1100/*
1101 * nxge_tx_intr
1102 *
1103 * Process a TDC interrupt
1104 *
1105 * Arguments:
1106 * arg1 A Logical Device state Vector (LSV) data structure.
1107 * arg2 nxge_t *
1108 *
1109 * Notes:
1110 *
1111 * NPI/NXGE function calls:
1112 * npi_txdma_control_status()
1113 * npi_intr_ldg_mgmt_set()
1114 *
1115 * nxge_tx_err_evnts()
1116 * nxge_txdma_reclaim()
1117 *
1118 * Registers accessed:
1119 * TX_CS DMC+0x40028 Transmit Control And Status
1120 * PIO_LDSV
1121 *
1122 * Context:
1123 * Any domain
1124 */
1125uint_t
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#include <sys/nxge/nxge_impl.h>
28#include <sys/nxge/nxge_txdma.h>
29#include <sys/nxge/nxge_hio.h>
30#include <npi_tx_rd64.h>
31#include <npi_tx_wr64.h>
32#include <sys/llc1.h>
33
34uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
35uint32_t nxge_tx_minfree = 64;
36uint32_t nxge_tx_intr_thres = 0;
37uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
38uint32_t nxge_tx_tiny_pack = 1;
39uint32_t nxge_tx_use_bcopy = 1;
40
41extern uint32_t nxge_tx_ring_size;
42extern uint32_t nxge_bcopy_thresh;
43extern uint32_t nxge_dvma_thresh;
44extern uint32_t nxge_dma_stream_thresh;
45extern dma_method_t nxge_force_dma;
46extern uint32_t nxge_cksum_offload;
47
48/* Device register access attributes for PIO. */
49extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
50/* Device descriptor access attributes for DMA. */
51extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
52/* Device buffer access attributes for DMA. */
53extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
54extern ddi_dma_attr_t nxge_desc_dma_attr;
55extern ddi_dma_attr_t nxge_tx_dma_attr;
56
57extern void nxge_tx_ring_task(void *arg);
58
59static nxge_status_t nxge_map_txdma(p_nxge_t, int);
60
61static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
62
63static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
64 p_nxge_dma_common_t *, p_tx_ring_t *,
65 uint32_t, p_nxge_dma_common_t *,
66 p_tx_mbox_t *);
67static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
68
69static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
71static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
72
73static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
74 p_nxge_dma_common_t *, p_tx_ring_t,
75 p_tx_mbox_t *);
76static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
77 p_tx_ring_t, p_tx_mbox_t);
78
79static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
80 p_tx_ring_t, p_tx_mbox_t);
81static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
82
83static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
84static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
85 p_nxge_ldv_t, tx_cs_t);
86static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
87static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
88 uint16_t, p_tx_ring_t);
89
90static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
91 p_tx_ring_t ring_p, uint16_t channel);
92
93nxge_status_t
94nxge_init_txdma_channels(p_nxge_t nxgep)
95{
96 nxge_grp_set_t *set = &nxgep->tx_set;
97 int i, tdc, count;
98 nxge_grp_t *group;
99 dc_map_t map;
100 int dev_gindex;
101
102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
103
104 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
105 if ((1 << i) & set->lg.map) {
106 group = set->group[i];
107 dev_gindex =
108 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
109 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
110 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
111 if ((1 << tdc) & map) {
112 if ((nxge_grp_dc_add(nxgep,
113 group, VP_BOUND_TX, tdc)))
114 goto init_txdma_channels_exit;
115 }
116 }
117 }
118 if (++count == set->lg.count)
119 break;
120 }
121
122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
123 return (NXGE_OK);
124
125init_txdma_channels_exit:
126 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
127 if ((1 << i) & set->lg.map) {
128 group = set->group[i];
129 dev_gindex =
130 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
131 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
132 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
133 if ((1 << tdc) & map) {
134 nxge_grp_dc_remove(nxgep,
135 VP_BOUND_TX, tdc);
136 }
137 }
138 }
139 if (++count == set->lg.count)
140 break;
141 }
142
143 return (NXGE_ERROR);
144
145}
146
147nxge_status_t
148nxge_init_txdma_channel(
149 p_nxge_t nxge,
150 int channel)
151{
152 nxge_status_t status;
153
154 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
155
156 status = nxge_map_txdma(nxge, channel);
157 if (status != NXGE_OK) {
158 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
159 "<== nxge_init_txdma_channel: status 0x%x", status));
160 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
161 return (status);
162 }
163
164 status = nxge_txdma_hw_start(nxge, channel);
165 if (status != NXGE_OK) {
166 (void) nxge_unmap_txdma_channel(nxge, channel);
167 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
168 return (status);
169 }
170
171 if (!nxge->statsp->tdc_ksp[channel])
172 nxge_setup_tdc_kstats(nxge, channel);
173
174 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
175
176 return (status);
177}
178
179void
180nxge_uninit_txdma_channels(p_nxge_t nxgep)
181{
182 nxge_grp_set_t *set = &nxgep->tx_set;
183 int tdc;
184
185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
186
187 if (set->owned.map == 0) {
188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
189 "nxge_uninit_txdma_channels: no channels"));
190 return;
191 }
192
193 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
194 if ((1 << tdc) & set->owned.map) {
195 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
196 }
197 }
198
199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
200}
201
202void
203nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
204{
205 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
206
207 if (nxgep->statsp->tdc_ksp[channel]) {
208 kstat_delete(nxgep->statsp->tdc_ksp[channel]);
209 nxgep->statsp->tdc_ksp[channel] = 0;
210 }
211
212 if (nxge_txdma_stop_channel(nxgep, channel) != NXGE_OK)
213 goto nxge_uninit_txdma_channel_exit;
214
215 nxge_unmap_txdma_channel(nxgep, channel);
216
217nxge_uninit_txdma_channel_exit:
218 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_uninit_txdma_channel"));
219}
220
221void
222nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
223 uint32_t entries, uint32_t size)
224{
225 size_t tsize;
226 *dest_p = *src_p;
227 tsize = size * entries;
228 dest_p->alength = tsize;
229 dest_p->nblocks = entries;
230 dest_p->block_size = size;
231 dest_p->offset += tsize;
232
233 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
234 src_p->alength -= tsize;
235 src_p->dma_cookie.dmac_laddress += tsize;
236 src_p->dma_cookie.dmac_size -= tsize;
237}
238
239/*
240 * nxge_reset_txdma_channel
241 *
242 * Reset a TDC.
243 *
244 * Arguments:
245 * nxgep
246 * channel The channel to reset.
247 * reg_data The current TX_CS.
248 *
249 * Notes:
250 *
251 * NPI/NXGE function calls:
252 * npi_txdma_channel_reset()
253 * npi_txdma_channel_control()
254 *
255 * Registers accessed:
256 * TX_CS DMC+0x40028 Transmit Control And Status
257 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick
258 *
259 * Context:
260 * Any domain
261 */
262nxge_status_t
263nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
264{
265 npi_status_t rs = NPI_SUCCESS;
266 nxge_status_t status = NXGE_OK;
267 npi_handle_t handle;
268
269 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
270
271 handle = NXGE_DEV_NPI_HANDLE(nxgep);
272 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
273 rs = npi_txdma_channel_reset(handle, channel);
274 } else {
275 rs = npi_txdma_channel_control(handle, TXDMA_RESET,
276 channel);
277 }
278
279 if (rs != NPI_SUCCESS) {
280 status = NXGE_ERROR | rs;
281 }
282
283 /*
284 * Reset the tail (kick) register to 0.
285 * (Hardware will not reset it. Tx overflow fatal
286 * error if tail is not set to 0 after reset!
287 */
288 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
289
290 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
291 return (status);
292}
293
294/*
295 * nxge_init_txdma_channel_event_mask
296 *
297 * Enable interrupts for a set of events.
298 *
299 * Arguments:
300 * nxgep
301 * channel The channel to map.
302 * mask_p The events to enable.
303 *
304 * Notes:
305 *
306 * NPI/NXGE function calls:
307 * npi_txdma_event_mask()
308 *
309 * Registers accessed:
310 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask
311 *
312 * Context:
313 * Any domain
314 */
315nxge_status_t
316nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
317 p_tx_dma_ent_msk_t mask_p)
318{
319 npi_handle_t handle;
320 npi_status_t rs = NPI_SUCCESS;
321 nxge_status_t status = NXGE_OK;
322
323 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
324 "<== nxge_init_txdma_channel_event_mask"));
325
326 handle = NXGE_DEV_NPI_HANDLE(nxgep);
327 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
328 if (rs != NPI_SUCCESS) {
329 status = NXGE_ERROR | rs;
330 }
331
332 return (status);
333}
334
335/*
336 * nxge_init_txdma_channel_cntl_stat
337 *
338 * Stop a TDC. If at first we don't succeed, inject an error.
339 *
340 * Arguments:
341 * nxgep
342 * channel The channel to stop.
343 *
344 * Notes:
345 *
346 * NPI/NXGE function calls:
347 * npi_txdma_control_status()
348 *
349 * Registers accessed:
350 * TX_CS DMC+0x40028 Transmit Control And Status
351 *
352 * Context:
353 * Any domain
354 */
355nxge_status_t
356nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
357 uint64_t reg_data)
358{
359 npi_handle_t handle;
360 npi_status_t rs = NPI_SUCCESS;
361 nxge_status_t status = NXGE_OK;
362
363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
364 "<== nxge_init_txdma_channel_cntl_stat"));
365
366 handle = NXGE_DEV_NPI_HANDLE(nxgep);
367 rs = npi_txdma_control_status(handle, OP_SET, channel,
368 (p_tx_cs_t)&reg_data);
369
370 if (rs != NPI_SUCCESS) {
371 status = NXGE_ERROR | rs;
372 }
373
374 return (status);
375}
376
377/*
378 * nxge_enable_txdma_channel
379 *
380 * Enable a TDC.
381 *
382 * Arguments:
383 * nxgep
384 * channel The channel to enable.
385 * tx_desc_p channel's transmit descriptor ring.
386 * mbox_p channel's mailbox,
387 *
388 * Notes:
389 *
390 * NPI/NXGE function calls:
391 * npi_txdma_ring_config()
392 * npi_txdma_mbox_config()
393 * npi_txdma_channel_init_enable()
394 *
395 * Registers accessed:
396 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration
397 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High
398 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low
399 * TX_CS DMC+0x40028 Transmit Control And Status
400 *
401 * Context:
402 * Any domain
403 */
404nxge_status_t
405nxge_enable_txdma_channel(p_nxge_t nxgep,
406 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
407{
408 npi_handle_t handle;
409 npi_status_t rs = NPI_SUCCESS;
410 nxge_status_t status = NXGE_OK;
411
412 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
413
414 handle = NXGE_DEV_NPI_HANDLE(nxgep);
415 /*
416 * Use configuration data composed at init time.
417 * Write to hardware the transmit ring configurations.
418 */
419 rs = npi_txdma_ring_config(handle, OP_SET, channel,
420 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
421
422 if (rs != NPI_SUCCESS) {
423 return (NXGE_ERROR | rs);
424 }
425
426 if (isLDOMguest(nxgep)) {
427 /* Add interrupt handler for this channel. */
428 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
429 return (NXGE_ERROR);
430 }
431
432 /* Write to hardware the mailbox */
433 rs = npi_txdma_mbox_config(handle, OP_SET, channel,
434 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
435
436 if (rs != NPI_SUCCESS) {
437 return (NXGE_ERROR | rs);
438 }
439
440 /* Start the DMA engine. */
441 rs = npi_txdma_channel_init_enable(handle, channel);
442
443 if (rs != NPI_SUCCESS) {
444 return (NXGE_ERROR | rs);
445 }
446
447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
448
449 return (status);
450}
451
452void
453nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
454 boolean_t l4_cksum, int pkt_len, uint8_t npads,
455 p_tx_pkt_hdr_all_t pkthdrp,
456 t_uscalar_t start_offset,
457 t_uscalar_t stuff_offset)
458{
459 p_tx_pkt_header_t hdrp;
460 p_mblk_t nmp;
461 uint64_t tmp;
462 size_t mblk_len;
463 size_t iph_len;
464 size_t hdrs_size;
465 uint8_t hdrs_buf[sizeof (struct ether_header) +
466 64 + sizeof (uint32_t)];
467 uint8_t *cursor;
468 uint8_t *ip_buf;
469 uint16_t eth_type;
470 uint8_t ipproto;
471 boolean_t is_vlan = B_FALSE;
472 size_t eth_hdr_size;
473
474 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
475
476 /*
477 * Caller should zero out the headers first.
478 */
479 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
480
481 if (fill_len) {
482 NXGE_DEBUG_MSG((NULL, TX_CTL,
483 "==> nxge_fill_tx_hdr: pkt_len %d "
484 "npads %d", pkt_len, npads));
485 tmp = (uint64_t)pkt_len;
486 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
487 goto fill_tx_header_done;
488 }
489
490 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
491
492 /*
493 * mp is the original data packet (does not include the
494 * Neptune transmit header).
495 */
496 nmp = mp;
497 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
498 "mp $%p b_rptr $%p len %d",
499 mp, nmp->b_rptr, MBLKL(nmp)));
500 /* copy ether_header from mblk to hdrs_buf */
501 cursor = &hdrs_buf[0];
502 tmp = sizeof (struct ether_vlan_header);
503 while ((nmp != NULL) && (tmp > 0)) {
504 size_t buflen;
505 mblk_len = MBLKL(nmp);
506 buflen = min((size_t)tmp, mblk_len);
507 bcopy(nmp->b_rptr, cursor, buflen);
508 cursor += buflen;
509 tmp -= buflen;
510 nmp = nmp->b_cont;
511 }
512
513 nmp = mp;
514 mblk_len = MBLKL(nmp);
515 ip_buf = NULL;
516 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
517 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
518 "ether type 0x%x", eth_type, hdrp->value));
519
520 if (eth_type < ETHERMTU) {
521 tmp = 1ull;
522 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
523 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
524 "value 0x%llx", hdrp->value));
525 if (*(hdrs_buf + sizeof (struct ether_header))
526 == LLC_SNAP_SAP) {
527 eth_type = ntohs(*((uint16_t *)(hdrs_buf +
528 sizeof (struct ether_header) + 6)));
529 NXGE_DEBUG_MSG((NULL, TX_CTL,
530 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
531 eth_type));
532 } else {
533 goto fill_tx_header_done;
534 }
535 } else if (eth_type == VLAN_ETHERTYPE) {
536 tmp = 1ull;
537 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
538
539 eth_type = ntohs(((struct ether_vlan_header *)
540 hdrs_buf)->ether_type);
541 is_vlan = B_TRUE;
542 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
543 "value 0x%llx", hdrp->value));
544 }
545
546 if (!is_vlan) {
547 eth_hdr_size = sizeof (struct ether_header);
548 } else {
549 eth_hdr_size = sizeof (struct ether_vlan_header);
550 }
551
552 switch (eth_type) {
553 case ETHERTYPE_IP:
554 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
555 ip_buf = nmp->b_rptr + eth_hdr_size;
556 mblk_len -= eth_hdr_size;
557 iph_len = ((*ip_buf) & 0x0f);
558 if (mblk_len > (iph_len + sizeof (uint32_t))) {
559 ip_buf = nmp->b_rptr;
560 ip_buf += eth_hdr_size;
561 } else {
562 ip_buf = NULL;
563 }
564
565 }
566 if (ip_buf == NULL) {
567 hdrs_size = 0;
568 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
569 while ((nmp) && (hdrs_size <
570 sizeof (hdrs_buf))) {
571 mblk_len = (size_t)nmp->b_wptr -
572 (size_t)nmp->b_rptr;
573 if (mblk_len >=
574 (sizeof (hdrs_buf) - hdrs_size))
575 mblk_len = sizeof (hdrs_buf) -
576 hdrs_size;
577 bcopy(nmp->b_rptr,
578 &hdrs_buf[hdrs_size], mblk_len);
579 hdrs_size += mblk_len;
580 nmp = nmp->b_cont;
581 }
582 ip_buf = hdrs_buf;
583 ip_buf += eth_hdr_size;
584 iph_len = ((*ip_buf) & 0x0f);
585 }
586
587 ipproto = ip_buf[9];
588
589 tmp = (uint64_t)iph_len;
590 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
591 tmp = (uint64_t)(eth_hdr_size >> 1);
592 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
593
594 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
595 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
596 "tmp 0x%x",
597 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
598 ipproto, tmp));
599 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
600 "value 0x%llx", hdrp->value));
601
602 break;
603
604 case ETHERTYPE_IPV6:
605 hdrs_size = 0;
606 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
607 while ((nmp) && (hdrs_size <
608 sizeof (hdrs_buf))) {
609 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
610 if (mblk_len >=
611 (sizeof (hdrs_buf) - hdrs_size))
612 mblk_len = sizeof (hdrs_buf) -
613 hdrs_size;
614 bcopy(nmp->b_rptr,
615 &hdrs_buf[hdrs_size], mblk_len);
616 hdrs_size += mblk_len;
617 nmp = nmp->b_cont;
618 }
619 ip_buf = hdrs_buf;
620 ip_buf += eth_hdr_size;
621
622 tmp = 1ull;
623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
624
625 tmp = (eth_hdr_size >> 1);
626 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
627
628 /* byte 6 is the next header protocol */
629 ipproto = ip_buf[6];
630
631 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
632 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
633 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
634 ipproto));
635 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
636 "value 0x%llx", hdrp->value));
637
638 break;
639
640 default:
641 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
642 goto fill_tx_header_done;
643 }
644
645 switch (ipproto) {
646 case IPPROTO_TCP:
647 NXGE_DEBUG_MSG((NULL, TX_CTL,
648 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
649 if (l4_cksum) {
650 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
651 hdrp->value |=
652 (((uint64_t)(start_offset >> 1)) <<
653 TX_PKT_HEADER_L4START_SHIFT);
654 hdrp->value |=
655 (((uint64_t)(stuff_offset >> 1)) <<
656 TX_PKT_HEADER_L4STUFF_SHIFT);
657
658 NXGE_DEBUG_MSG((NULL, TX_CTL,
659 "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
660 "value 0x%llx", hdrp->value));
661 }
662
663 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
664 "value 0x%llx", hdrp->value));
665 break;
666
667 case IPPROTO_UDP:
668 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
669 if (l4_cksum) {
670 if (!nxge_cksum_offload) {
671 uint16_t *up;
672 uint16_t cksum;
673 t_uscalar_t stuff_len;
674
675 /*
676 * The checksum field has the
677 * partial checksum.
678 * IP_CSUM() macro calls ip_cksum() which
679 * can add in the partial checksum.
680 */
681 cksum = IP_CSUM(mp, start_offset, 0);
682 stuff_len = stuff_offset;
683 nmp = mp;
684 mblk_len = MBLKL(nmp);
685 while ((nmp != NULL) &&
686 (mblk_len < stuff_len)) {
687 stuff_len -= mblk_len;
688 nmp = nmp->b_cont;
689 if (nmp)
690 mblk_len = MBLKL(nmp);
691 }
692 ASSERT(nmp);
693 up = (uint16_t *)(nmp->b_rptr + stuff_len);
694
695 *up = cksum;
696 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
697 NXGE_DEBUG_MSG((NULL, TX_CTL,
698 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
699 "use sw cksum "
700 "write to $%p cksum 0x%x content up 0x%x",
701 stuff_len,
702 up,
703 cksum,
704 *up));
705 } else {
706 /* Hardware will compute the full checksum */
707 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
708 hdrp->value |=
709 (((uint64_t)(start_offset >> 1)) <<
710 TX_PKT_HEADER_L4START_SHIFT);
711 hdrp->value |=
712 (((uint64_t)(stuff_offset >> 1)) <<
713 TX_PKT_HEADER_L4STUFF_SHIFT);
714
715 NXGE_DEBUG_MSG((NULL, TX_CTL,
716 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
717 " use partial checksum "
718 "cksum 0x%x ",
719 "value 0x%llx",
720 stuff_offset,
721 IP_CSUM(mp, start_offset, 0),
722 hdrp->value));
723 }
724 }
725
726 NXGE_DEBUG_MSG((NULL, TX_CTL,
727 "==> nxge_tx_pkt_hdr_init: UDP"
728 "value 0x%llx", hdrp->value));
729 break;
730
731 default:
732 goto fill_tx_header_done;
733 }
734
735fill_tx_header_done:
736 NXGE_DEBUG_MSG((NULL, TX_CTL,
737 "==> nxge_fill_tx_hdr: pkt_len %d "
738 "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
739
740 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
741}
742
743/*ARGSUSED*/
744p_mblk_t
745nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
746{
747 p_mblk_t newmp = NULL;
748
749 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
750 NXGE_DEBUG_MSG((NULL, TX_CTL,
751 "<== nxge_tx_pkt_header_reserve: allocb failed"));
752 return (NULL);
753 }
754
755 NXGE_DEBUG_MSG((NULL, TX_CTL,
756 "==> nxge_tx_pkt_header_reserve: get new mp"));
757 DB_TYPE(newmp) = M_DATA;
758 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
759 linkb(newmp, mp);
760 newmp->b_rptr -= TX_PKT_HEADER_SIZE;
761
762 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
763 "b_rptr $%p b_wptr $%p",
764 newmp->b_rptr, newmp->b_wptr));
765
766 NXGE_DEBUG_MSG((NULL, TX_CTL,
767 "<== nxge_tx_pkt_header_reserve: use new mp"));
768
769 return (newmp);
770}
771
772int
773nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
774{
775 uint_t nmblks;
776 ssize_t len;
777 uint_t pkt_len;
778 p_mblk_t nmp, bmp, tmp;
779 uint8_t *b_wptr;
780
781 NXGE_DEBUG_MSG((NULL, TX_CTL,
782 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
783 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
784
785 nmp = mp;
786 bmp = mp;
787 nmblks = 0;
788 pkt_len = 0;
789 *tot_xfer_len_p = 0;
790
791 while (nmp) {
792 len = MBLKL(nmp);
793 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
794 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
795 len, pkt_len, nmblks,
796 *tot_xfer_len_p));
797
798 if (len <= 0) {
799 bmp = nmp;
800 nmp = nmp->b_cont;
801 NXGE_DEBUG_MSG((NULL, TX_CTL,
802 "==> nxge_tx_pkt_nmblocks: "
803 "len (0) pkt_len %d nmblks %d",
804 pkt_len, nmblks));
805 continue;
806 }
807
808 *tot_xfer_len_p += len;
809 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
810 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
811 len, pkt_len, nmblks,
812 *tot_xfer_len_p));
813
814 if (len < nxge_bcopy_thresh) {
815 NXGE_DEBUG_MSG((NULL, TX_CTL,
816 "==> nxge_tx_pkt_nmblocks: "
817 "len %d (< thresh) pkt_len %d nmblks %d",
818 len, pkt_len, nmblks));
819 if (pkt_len == 0)
820 nmblks++;
821 pkt_len += len;
822 if (pkt_len >= nxge_bcopy_thresh) {
823 pkt_len = 0;
824 len = 0;
825 nmp = bmp;
826 }
827 } else {
828 NXGE_DEBUG_MSG((NULL, TX_CTL,
829 "==> nxge_tx_pkt_nmblocks: "
830 "len %d (> thresh) pkt_len %d nmblks %d",
831 len, pkt_len, nmblks));
832 pkt_len = 0;
833 nmblks++;
834 /*
835 * Hardware limits the transfer length to 4K.
836 * If len is more than 4K, we need to break
837 * it up to at most 2 more blocks.
838 */
839 if (len > TX_MAX_TRANSFER_LENGTH) {
840 uint32_t nsegs;
841
842 nsegs = 1;
843 NXGE_DEBUG_MSG((NULL, TX_CTL,
844 "==> nxge_tx_pkt_nmblocks: "
845 "len %d pkt_len %d nmblks %d nsegs %d",
846 len, pkt_len, nmblks, nsegs));
847 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
848 ++nsegs;
849 }
850 do {
851 b_wptr = nmp->b_rptr +
852 TX_MAX_TRANSFER_LENGTH;
853 nmp->b_wptr = b_wptr;
854 if ((tmp = dupb(nmp)) == NULL) {
855 return (0);
856 }
857 tmp->b_rptr = b_wptr;
858 tmp->b_wptr = nmp->b_wptr;
859 tmp->b_cont = nmp->b_cont;
860 nmp->b_cont = tmp;
861 nmblks++;
862 if (--nsegs) {
863 nmp = tmp;
864 }
865 } while (nsegs);
866 nmp = tmp;
867 }
868 }
869
870 /*
871 * Hardware limits the transmit gather pointers to 15.
872 */
873 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
874 TX_MAX_GATHER_POINTERS) {
875 NXGE_DEBUG_MSG((NULL, TX_CTL,
876 "==> nxge_tx_pkt_nmblocks: pull msg - "
877 "len %d pkt_len %d nmblks %d",
878 len, pkt_len, nmblks));
879 /* Pull all message blocks from b_cont */
880 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
881 return (0);
882 }
883 freemsg(nmp->b_cont);
884 nmp->b_cont = tmp;
885 pkt_len = 0;
886 }
887 bmp = nmp;
888 nmp = nmp->b_cont;
889 }
890
891 NXGE_DEBUG_MSG((NULL, TX_CTL,
892 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
893 "nmblks %d len %d tot_xfer_len %d",
894 mp->b_rptr, mp->b_wptr, nmblks,
895 MBLKL(mp), *tot_xfer_len_p));
896
897 return (nmblks);
898}
899
900boolean_t
901nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
902{
903 boolean_t status = B_TRUE;
904 p_nxge_dma_common_t tx_desc_dma_p;
905 nxge_dma_common_t desc_area;
906 p_tx_desc_t tx_desc_ring_vp;
907 p_tx_desc_t tx_desc_p;
908 p_tx_desc_t tx_desc_pp;
909 tx_desc_t r_tx_desc;
910 p_tx_msg_t tx_msg_ring;
911 p_tx_msg_t tx_msg_p;
912 npi_handle_t handle;
913 tx_ring_hdl_t tx_head;
914 uint32_t pkt_len;
915 uint_t tx_rd_index;
916 uint16_t head_index, tail_index;
917 uint8_t tdc;
918 boolean_t head_wrap, tail_wrap;
919 p_nxge_tx_ring_stats_t tdc_stats;
920 int rc;
921
922 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
923
924 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
925 (nmblks != 0));
926 NXGE_DEBUG_MSG((nxgep, TX_CTL,
927 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
928 tx_ring_p->descs_pending, nxge_reclaim_pending,
929 nmblks));
930 if (!status) {
931 tx_desc_dma_p = &tx_ring_p->tdc_desc;
932 desc_area = tx_ring_p->tdc_desc;
933 handle = NXGE_DEV_NPI_HANDLE(nxgep);
934 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
935 tx_desc_ring_vp =
936 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
937 tx_rd_index = tx_ring_p->rd_index;
938 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
939 tx_msg_ring = tx_ring_p->tx_msg_ring;
940 tx_msg_p = &tx_msg_ring[tx_rd_index];
941 tdc = tx_ring_p->tdc;
942 tdc_stats = tx_ring_p->tdc_stats;
943 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
944 tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
945 }
946
947 tail_index = tx_ring_p->wr_index;
948 tail_wrap = tx_ring_p->wr_index_wrap;
949
950 NXGE_DEBUG_MSG((nxgep, TX_CTL,
951 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
952 "tail_index %d tail_wrap %d "
953 "tx_desc_p $%p ($%p) ",
954 tdc, tx_rd_index, tail_index, tail_wrap,
955 tx_desc_p, (*(uint64_t *)tx_desc_p)));
956 /*
957 * Read the hardware maintained transmit head
958 * and wrap around bit.
959 */
960 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
961 head_index = tx_head.bits.ldw.head;
962 head_wrap = tx_head.bits.ldw.wrap;
963 NXGE_DEBUG_MSG((nxgep, TX_CTL,
964 "==> nxge_txdma_reclaim: "
965 "tx_rd_index %d tail %d tail_wrap %d "
966 "head %d wrap %d",
967 tx_rd_index, tail_index, tail_wrap,
968 head_index, head_wrap));
969
970 if (head_index == tail_index) {
971 if (TXDMA_RING_EMPTY(head_index, head_wrap,
972 tail_index, tail_wrap) &&
973 (head_index == tx_rd_index)) {
974 NXGE_DEBUG_MSG((nxgep, TX_CTL,
975 "==> nxge_txdma_reclaim: EMPTY"));
976 return (B_TRUE);
977 }
978
979 NXGE_DEBUG_MSG((nxgep, TX_CTL,
980 "==> nxge_txdma_reclaim: Checking "
981 "if ring full"));
982 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
983 tail_wrap)) {
984 NXGE_DEBUG_MSG((nxgep, TX_CTL,
985 "==> nxge_txdma_reclaim: full"));
986 return (B_FALSE);
987 }
988 }
989
990 NXGE_DEBUG_MSG((nxgep, TX_CTL,
991 "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
992
993 tx_desc_pp = &r_tx_desc;
994 while ((tx_rd_index != head_index) &&
995 (tx_ring_p->descs_pending != 0)) {
996
997 NXGE_DEBUG_MSG((nxgep, TX_CTL,
998 "==> nxge_txdma_reclaim: Checking if pending"));
999
1000 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1001 "==> nxge_txdma_reclaim: "
1002 "descs_pending %d ",
1003 tx_ring_p->descs_pending));
1004
1005 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1006 "==> nxge_txdma_reclaim: "
1007 "(tx_rd_index %d head_index %d "
1008 "(tx_desc_p $%p)",
1009 tx_rd_index, head_index,
1010 tx_desc_p));
1011
1012 tx_desc_pp->value = tx_desc_p->value;
1013 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1014 "==> nxge_txdma_reclaim: "
1015 "(tx_rd_index %d head_index %d "
1016 "tx_desc_p $%p (desc value 0x%llx) ",
1017 tx_rd_index, head_index,
1018 tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
1019
1020 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1021 "==> nxge_txdma_reclaim: dump desc:"));
1022
1023 pkt_len = tx_desc_pp->bits.hdw.tr_len;
1024 tdc_stats->obytes += (pkt_len - TX_PKT_HEADER_SIZE);
1025 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
1026 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1027 "==> nxge_txdma_reclaim: pkt_len %d "
1028 "tdc channel %d opackets %d",
1029 pkt_len,
1030 tdc,
1031 tdc_stats->opackets));
1032
1033 if (tx_msg_p->flags.dma_type == USE_DVMA) {
1034 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1035 "tx_desc_p = $%p "
1036 "tx_desc_pp = $%p "
1037 "index = %d",
1038 tx_desc_p,
1039 tx_desc_pp,
1040 tx_ring_p->rd_index));
1041 (void) dvma_unload(tx_msg_p->dvma_handle,
1042 0, -1);
1043 tx_msg_p->dvma_handle = NULL;
1044 if (tx_ring_p->dvma_wr_index ==
1045 tx_ring_p->dvma_wrap_mask) {
1046 tx_ring_p->dvma_wr_index = 0;
1047 } else {
1048 tx_ring_p->dvma_wr_index++;
1049 }
1050 tx_ring_p->dvma_pending--;
1051 } else if (tx_msg_p->flags.dma_type ==
1052 USE_DMA) {
1053 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1054 "==> nxge_txdma_reclaim: "
1055 "USE DMA"));
1056 if (rc = ddi_dma_unbind_handle
1057 (tx_msg_p->dma_handle)) {
1058 cmn_err(CE_WARN, "!nxge_reclaim: "
1059 "ddi_dma_unbind_handle "
1060 "failed. status %d", rc);
1061 }
1062 }
1063 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1064 "==> nxge_txdma_reclaim: count packets"));
1065 /*
1066 * count a chained packet only once.
1067 */
1068 if (tx_msg_p->tx_message != NULL) {
1069 freemsg(tx_msg_p->tx_message);
1070 tx_msg_p->tx_message = NULL;
1071 }
1072
1073 tx_msg_p->flags.dma_type = USE_NONE;
1074 tx_rd_index = tx_ring_p->rd_index;
1075 tx_rd_index = (tx_rd_index + 1) &
1076 tx_ring_p->tx_wrap_mask;
1077 tx_ring_p->rd_index = tx_rd_index;
1078 tx_ring_p->descs_pending--;
1079 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
1080 tx_msg_p = &tx_msg_ring[tx_rd_index];
1081 }
1082
1083 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1084 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1085 if (status) {
1086 (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
1087 1, 0);
1088 }
1089 } else {
1090 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1091 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1092 }
1093
1094 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1095 "<== nxge_txdma_reclaim status = 0x%08x", status));
1096
1097 return (status);
1098}
1099
1100/*
1101 * nxge_tx_intr
1102 *
1103 * Process a TDC interrupt
1104 *
1105 * Arguments:
1106 * arg1 A Logical Device state Vector (LSV) data structure.
1107 * arg2 nxge_t *
1108 *
1109 * Notes:
1110 *
1111 * NPI/NXGE function calls:
1112 * npi_txdma_control_status()
1113 * npi_intr_ldg_mgmt_set()
1114 *
1115 * nxge_tx_err_evnts()
1116 * nxge_txdma_reclaim()
1117 *
1118 * Registers accessed:
1119 * TX_CS DMC+0x40028 Transmit Control And Status
1120 * PIO_LDSV
1121 *
1122 * Context:
1123 * Any domain
1124 */
1125uint_t
1126nxge_tx_intr(void *arg1, void *arg2)
1126nxge_tx_intr(char *arg1, char *arg2)
1127{
1128 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1129 p_nxge_t nxgep = (p_nxge_t)arg2;
1130 p_nxge_ldg_t ldgp;
1131 uint8_t channel;
1132 uint32_t vindex;
1133 npi_handle_t handle;
1134 tx_cs_t cs;
1135 p_tx_ring_t *tx_rings;
1136 p_tx_ring_t tx_ring_p;
1137 npi_status_t rs = NPI_SUCCESS;
1138 uint_t serviced = DDI_INTR_UNCLAIMED;
1139 nxge_status_t status = NXGE_OK;
1140
1141 if (ldvp == NULL) {
1142 NXGE_DEBUG_MSG((NULL, INT_CTL,
1143 "<== nxge_tx_intr: nxgep $%p ldvp $%p",
1144 nxgep, ldvp));
1145 return (DDI_INTR_UNCLAIMED);
1146 }
1147
1148 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1149 nxgep = ldvp->nxgep;
1150 }
1151 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1152 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1153 nxgep, ldvp));
1154
1155 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1156 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1157 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1158 "<== nxge_tx_intr: interface not started or intialized"));
1159 return (DDI_INTR_CLAIMED);
1160 }
1161
1162 /*
1163 * This interrupt handler is for a specific
1164 * transmit dma channel.
1165 */
1166 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1167 /* Get the control and status for this channel. */
1168 channel = ldvp->channel;
1169 ldgp = ldvp->ldgp;
1170 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1171 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1172 "channel %d",
1173 nxgep, ldvp, channel));
1174
1175 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1176 vindex = ldvp->vdma_index;
1177 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1178 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1179 channel, vindex, rs));
1180 if (!rs && cs.bits.ldw.mk) {
1181 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1182 "==> nxge_tx_intr:channel %d ring index %d "
1183 "status 0x%08x (mk bit set)",
1184 channel, vindex, rs));
1185 tx_rings = nxgep->tx_rings->rings;
1186 tx_ring_p = tx_rings[vindex];
1187 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1188 "==> nxge_tx_intr:channel %d ring index %d "
1189 "status 0x%08x (mk bit set, calling reclaim)",
1190 channel, vindex, rs));
1191
1192 nxge_tx_ring_task((void *)tx_ring_p);
1193 }
1194
1195 /*
1196 * Process other transmit control and status.
1197 * Check the ldv state.
1198 */
1199 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1200 /*
1201 * Rearm this logical group if this is a single device
1202 * group.
1203 */
1204 if (ldgp->nldvs == 1) {
1205 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1206 "==> nxge_tx_intr: rearm"));
1207 if (status == NXGE_OK) {
1208 if (isLDOMguest(nxgep)) {
1209 nxge_hio_ldgimgn(nxgep, ldgp);
1210 } else {
1211 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1212 B_TRUE, ldgp->ldg_timer);
1213 }
1214 }
1215 }
1216
1217 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1218 serviced = DDI_INTR_CLAIMED;
1219 return (serviced);
1220}
1221
1222void
1223nxge_txdma_stop(p_nxge_t nxgep) /* Dead */
1224{
1225 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1226
1227 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1228
1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1230}
1231
1232void
1233nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1234{
1235 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1236
1237 (void) nxge_txdma_stop(nxgep);
1238
1239 (void) nxge_fixup_txdma_rings(nxgep);
1240 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1241 (void) nxge_tx_mac_enable(nxgep);
1242 (void) nxge_txdma_hw_kick(nxgep);
1243
1244 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1245}
1246
1247npi_status_t
1248nxge_txdma_channel_disable(
1249 nxge_t *nxge,
1250 int channel)
1251{
1252 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge);
1253 npi_status_t rs;
1254 tdmc_intr_dbg_t intr_dbg;
1255
1256 /*
1257 * Stop the dma channel and wait for the stop-done.
1258 * If the stop-done bit is not present, then force
1259 * an error so TXC will stop.
1260 * All channels bound to this port need to be stopped
1261 * and reset after injecting an interrupt error.
1262 */
1263 rs = npi_txdma_channel_disable(handle, channel);
1264 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1265 "==> nxge_txdma_channel_disable(%d) "
1266 "rs 0x%x", channel, rs));
1267 if (rs != NPI_SUCCESS) {
1268 /* Inject any error */
1269 intr_dbg.value = 0;
1270 intr_dbg.bits.ldw.nack_pref = 1;
1271 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1272 "==> nxge_txdma_hw_mode: "
1273 "channel %d (stop failed 0x%x) "
1274 "(inject err)", rs, channel));
1275 (void) npi_txdma_inj_int_error_set(
1276 handle, channel, &intr_dbg);
1277 rs = npi_txdma_channel_disable(handle, channel);
1278 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1279 "==> nxge_txdma_hw_mode: "
1280 "channel %d (stop again 0x%x) "
1281 "(after inject err)",
1282 rs, channel));
1283 }
1284
1285 return (rs);
1286}
1287
1288/*
1289 * nxge_txdma_hw_mode
1290 *
1291 * Toggle all TDCs on (enable) or off (disable).
1292 *
1293 * Arguments:
1294 * nxgep
1295 * enable Enable or disable a TDC.
1296 *
1297 * Notes:
1298 *
1299 * NPI/NXGE function calls:
1300 * npi_txdma_channel_enable(TX_CS)
1301 * npi_txdma_channel_disable(TX_CS)
1302 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1303 *
1304 * Registers accessed:
1305 * TX_CS DMC+0x40028 Transmit Control And Status
1306 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1307 *
1308 * Context:
1309 * Any domain
1310 */
1311nxge_status_t
1312nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1313{
1314 nxge_grp_set_t *set = &nxgep->tx_set;
1315
1316 npi_handle_t handle;
1317 nxge_status_t status;
1318 npi_status_t rs;
1319 int tdc;
1320
1321 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1322 "==> nxge_txdma_hw_mode: enable mode %d", enable));
1323
1324 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1325 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1326 "<== nxge_txdma_mode: not initialized"));
1327 return (NXGE_ERROR);
1328 }
1329
1330 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1331 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1332 "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1333 return (NXGE_ERROR);
1334 }
1335
1336 /* Enable or disable all of the TDCs owned by us. */
1127{
1128 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1129 p_nxge_t nxgep = (p_nxge_t)arg2;
1130 p_nxge_ldg_t ldgp;
1131 uint8_t channel;
1132 uint32_t vindex;
1133 npi_handle_t handle;
1134 tx_cs_t cs;
1135 p_tx_ring_t *tx_rings;
1136 p_tx_ring_t tx_ring_p;
1137 npi_status_t rs = NPI_SUCCESS;
1138 uint_t serviced = DDI_INTR_UNCLAIMED;
1139 nxge_status_t status = NXGE_OK;
1140
1141 if (ldvp == NULL) {
1142 NXGE_DEBUG_MSG((NULL, INT_CTL,
1143 "<== nxge_tx_intr: nxgep $%p ldvp $%p",
1144 nxgep, ldvp));
1145 return (DDI_INTR_UNCLAIMED);
1146 }
1147
1148 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1149 nxgep = ldvp->nxgep;
1150 }
1151 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1152 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1153 nxgep, ldvp));
1154
1155 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1156 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1157 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1158 "<== nxge_tx_intr: interface not started or intialized"));
1159 return (DDI_INTR_CLAIMED);
1160 }
1161
1162 /*
1163 * This interrupt handler is for a specific
1164 * transmit dma channel.
1165 */
1166 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1167 /* Get the control and status for this channel. */
1168 channel = ldvp->channel;
1169 ldgp = ldvp->ldgp;
1170 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1171 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1172 "channel %d",
1173 nxgep, ldvp, channel));
1174
1175 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1176 vindex = ldvp->vdma_index;
1177 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1178 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1179 channel, vindex, rs));
1180 if (!rs && cs.bits.ldw.mk) {
1181 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1182 "==> nxge_tx_intr:channel %d ring index %d "
1183 "status 0x%08x (mk bit set)",
1184 channel, vindex, rs));
1185 tx_rings = nxgep->tx_rings->rings;
1186 tx_ring_p = tx_rings[vindex];
1187 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1188 "==> nxge_tx_intr:channel %d ring index %d "
1189 "status 0x%08x (mk bit set, calling reclaim)",
1190 channel, vindex, rs));
1191
1192 nxge_tx_ring_task((void *)tx_ring_p);
1193 }
1194
1195 /*
1196 * Process other transmit control and status.
1197 * Check the ldv state.
1198 */
1199 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1200 /*
1201 * Rearm this logical group if this is a single device
1202 * group.
1203 */
1204 if (ldgp->nldvs == 1) {
1205 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1206 "==> nxge_tx_intr: rearm"));
1207 if (status == NXGE_OK) {
1208 if (isLDOMguest(nxgep)) {
1209 nxge_hio_ldgimgn(nxgep, ldgp);
1210 } else {
1211 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1212 B_TRUE, ldgp->ldg_timer);
1213 }
1214 }
1215 }
1216
1217 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1218 serviced = DDI_INTR_CLAIMED;
1219 return (serviced);
1220}
1221
1222void
1223nxge_txdma_stop(p_nxge_t nxgep) /* Dead */
1224{
1225 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1226
1227 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1228
1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1230}
1231
1232void
1233nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1234{
1235 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1236
1237 (void) nxge_txdma_stop(nxgep);
1238
1239 (void) nxge_fixup_txdma_rings(nxgep);
1240 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1241 (void) nxge_tx_mac_enable(nxgep);
1242 (void) nxge_txdma_hw_kick(nxgep);
1243
1244 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1245}
1246
1247npi_status_t
1248nxge_txdma_channel_disable(
1249 nxge_t *nxge,
1250 int channel)
1251{
1252 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge);
1253 npi_status_t rs;
1254 tdmc_intr_dbg_t intr_dbg;
1255
1256 /*
1257 * Stop the dma channel and wait for the stop-done.
1258 * If the stop-done bit is not present, then force
1259 * an error so TXC will stop.
1260 * All channels bound to this port need to be stopped
1261 * and reset after injecting an interrupt error.
1262 */
1263 rs = npi_txdma_channel_disable(handle, channel);
1264 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1265 "==> nxge_txdma_channel_disable(%d) "
1266 "rs 0x%x", channel, rs));
1267 if (rs != NPI_SUCCESS) {
1268 /* Inject any error */
1269 intr_dbg.value = 0;
1270 intr_dbg.bits.ldw.nack_pref = 1;
1271 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1272 "==> nxge_txdma_hw_mode: "
1273 "channel %d (stop failed 0x%x) "
1274 "(inject err)", rs, channel));
1275 (void) npi_txdma_inj_int_error_set(
1276 handle, channel, &intr_dbg);
1277 rs = npi_txdma_channel_disable(handle, channel);
1278 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1279 "==> nxge_txdma_hw_mode: "
1280 "channel %d (stop again 0x%x) "
1281 "(after inject err)",
1282 rs, channel));
1283 }
1284
1285 return (rs);
1286}
1287
1288/*
1289 * nxge_txdma_hw_mode
1290 *
1291 * Toggle all TDCs on (enable) or off (disable).
1292 *
1293 * Arguments:
1294 * nxgep
1295 * enable Enable or disable a TDC.
1296 *
1297 * Notes:
1298 *
1299 * NPI/NXGE function calls:
1300 * npi_txdma_channel_enable(TX_CS)
1301 * npi_txdma_channel_disable(TX_CS)
1302 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1303 *
1304 * Registers accessed:
1305 * TX_CS DMC+0x40028 Transmit Control And Status
1306 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1307 *
1308 * Context:
1309 * Any domain
1310 */
1311nxge_status_t
1312nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1313{
1314 nxge_grp_set_t *set = &nxgep->tx_set;
1315
1316 npi_handle_t handle;
1317 nxge_status_t status;
1318 npi_status_t rs;
1319 int tdc;
1320
1321 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1322 "==> nxge_txdma_hw_mode: enable mode %d", enable));
1323
1324 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1325 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1326 "<== nxge_txdma_mode: not initialized"));
1327 return (NXGE_ERROR);
1328 }
1329
1330 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1331 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1332 "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1333 return (NXGE_ERROR);
1334 }
1335
1336 /* Enable or disable all of the TDCs owned by us. */
1337 rs = 0;
1337 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1338 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1339 if ((1 << tdc) & set->owned.map) {
1340 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1341 if (ring) {
1342 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1343 "==> nxge_txdma_hw_mode: channel %d", tdc));
1344 if (enable) {
1345 rs = npi_txdma_channel_enable
1346 (handle, tdc);
1347 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1348 "==> nxge_txdma_hw_mode: "
1349 "channel %d (enable) rs 0x%x",
1350 tdc, rs));
1351 } else {
1352 rs = nxge_txdma_channel_disable
1353 (nxgep, tdc);
1354 }
1355 }
1356 }
1357 }
1358
1359 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1360
1361 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1362 "<== nxge_txdma_hw_mode: status 0x%x", status));
1363
1364 return (status);
1365}
1366
1367void
1368nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1369{
1370 npi_handle_t handle;
1371
1372 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1373 "==> nxge_txdma_enable_channel: channel %d", channel));
1374
1375 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1376 /* enable the transmit dma channels */
1377 (void) npi_txdma_channel_enable(handle, channel);
1378
1379 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1380}
1381
1382void
1383nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1384{
1385 npi_handle_t handle;
1386
1387 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1388 "==> nxge_txdma_disable_channel: channel %d", channel));
1389
1390 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1391 /* stop the transmit dma channels */
1392 (void) npi_txdma_channel_disable(handle, channel);
1393
1394 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1395}
1396
1397/*
1398 * nxge_txdma_stop_inj_err
1399 *
1400 * Stop a TDC. If at first we don't succeed, inject an error.
1401 *
1402 * Arguments:
1403 * nxgep
1404 * channel The channel to stop.
1405 *
1406 * Notes:
1407 *
1408 * NPI/NXGE function calls:
1409 * npi_txdma_channel_disable()
1410 * npi_txdma_inj_int_error_set()
1411 * #if defined(NXGE_DEBUG)
1412 * nxge_txdma_regs_dump_channels(nxgep);
1413 * #endif
1414 *
1415 * Registers accessed:
1416 * TX_CS DMC+0x40028 Transmit Control And Status
1417 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1418 *
1419 * Context:
1420 * Any domain
1421 */
1422int
1423nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1424{
1425 npi_handle_t handle;
1426 tdmc_intr_dbg_t intr_dbg;
1427 int status;
1428 npi_status_t rs = NPI_SUCCESS;
1429
1430 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1431 /*
1432 * Stop the dma channel waits for the stop done.
1433 * If the stop done bit is not set, then create
1434 * an error.
1435 */
1436 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1437 rs = npi_txdma_channel_disable(handle, channel);
1438 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1439 if (status == NXGE_OK) {
1440 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1441 "<== nxge_txdma_stop_inj_err (channel %d): "
1442 "stopped OK", channel));
1443 return (status);
1444 }
1445
1446 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1447 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1448 "injecting error", channel, rs));
1449 /* Inject any error */
1450 intr_dbg.value = 0;
1451 intr_dbg.bits.ldw.nack_pref = 1;
1452 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1453
1454 /* Stop done bit will be set as a result of error injection */
1455 rs = npi_txdma_channel_disable(handle, channel);
1456 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1457 if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1458 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1459 "<== nxge_txdma_stop_inj_err (channel %d): "
1460 "stopped OK ", channel));
1461 return (status);
1462 }
1463
1464#if defined(NXGE_DEBUG)
1465 nxge_txdma_regs_dump_channels(nxgep);
1466#endif
1467 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1468 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1469 " (injected error but still not stopped)", channel, rs));
1470
1471 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1472 return (status);
1473}
1474
1475/*ARGSUSED*/
1476void
1477nxge_fixup_txdma_rings(p_nxge_t nxgep)
1478{
1479 nxge_grp_set_t *set = &nxgep->tx_set;
1480 int tdc;
1481
1482 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1483
1484 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1485 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1486 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1487 return;
1488 }
1489
1490 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1491 if ((1 << tdc) & set->owned.map) {
1492 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1493 if (ring) {
1494 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1495 "==> nxge_fixup_txdma_rings: channel %d",
1496 tdc));
1497 nxge_txdma_fixup_channel(nxgep, ring, tdc);
1498 }
1499 }
1500 }
1501
1502 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1503}
1504
1505/*ARGSUSED*/
1506void
1507nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1508{
1509 p_tx_ring_t ring_p;
1510
1511 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1512 ring_p = nxge_txdma_get_ring(nxgep, channel);
1513 if (ring_p == NULL) {
1514 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1515 return;
1516 }
1517
1518 if (ring_p->tdc != channel) {
1519 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1520 "<== nxge_txdma_fix_channel: channel not matched "
1521 "ring tdc %d passed channel",
1522 ring_p->tdc, channel));
1523 return;
1524 }
1525
1526 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1527
1528 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1529}
1530
1531/*ARGSUSED*/
1532void
1533nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1534{
1535 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1536
1537 if (ring_p == NULL) {
1538 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1539 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1540 return;
1541 }
1542
1543 if (ring_p->tdc != channel) {
1544 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1545 "<== nxge_txdma_fixup_channel: channel not matched "
1546 "ring tdc %d passed channel",
1547 ring_p->tdc, channel));
1548 return;
1549 }
1550
1551 MUTEX_ENTER(&ring_p->lock);
1552 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1553 ring_p->rd_index = 0;
1554 ring_p->wr_index = 0;
1555 ring_p->ring_head.value = 0;
1556 ring_p->ring_kick_tail.value = 0;
1557 ring_p->descs_pending = 0;
1558 MUTEX_EXIT(&ring_p->lock);
1559
1560 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1561}
1562
1563/*ARGSUSED*/
1564void
1565nxge_txdma_hw_kick(p_nxge_t nxgep)
1566{
1567 nxge_grp_set_t *set = &nxgep->tx_set;
1568 int tdc;
1569
1570 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1571
1572 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1573 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1574 "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1575 return;
1576 }
1577
1578 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1579 if ((1 << tdc) & set->owned.map) {
1580 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1581 if (ring) {
1582 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1583 "==> nxge_txdma_hw_kick: channel %d", tdc));
1584 nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1585 }
1586 }
1587 }
1588
1589 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1590}
1591
1592/*ARGSUSED*/
1593void
1594nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1595{
1596 p_tx_ring_t ring_p;
1597
1598 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1599
1600 ring_p = nxge_txdma_get_ring(nxgep, channel);
1601 if (ring_p == NULL) {
1602 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1603 " nxge_txdma_kick_channel"));
1604 return;
1605 }
1606
1607 if (ring_p->tdc != channel) {
1608 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1609 "<== nxge_txdma_kick_channel: channel not matched "
1610 "ring tdc %d passed channel",
1611 ring_p->tdc, channel));
1612 return;
1613 }
1614
1615 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1616
1617 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1618}
1619
1620/*ARGSUSED*/
1621void
1622nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1623{
1624
1625 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1626
1627 if (ring_p == NULL) {
1628 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1629 "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1630 return;
1631 }
1632
1633 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1634}
1635
1636/*
1637 * nxge_check_tx_hang
1638 *
1639 * Check the state of all TDCs belonging to nxgep.
1640 *
1641 * Arguments:
1642 * nxgep
1643 *
1644 * Notes:
1645 * Called by nxge_hw.c:nxge_check_hw_state().
1646 *
1647 * NPI/NXGE function calls:
1648 *
1649 * Registers accessed:
1650 *
1651 * Context:
1652 * Any domain
1653 */
1654/*ARGSUSED*/
1655void
1656nxge_check_tx_hang(p_nxge_t nxgep)
1657{
1658 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1659
1660 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1661 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1662 goto nxge_check_tx_hang_exit;
1663 }
1664
1665 /*
1666 * Needs inputs from hardware for regs:
1667 * head index had not moved since last timeout.
1668 * packets not transmitted or stuffed registers.
1669 */
1670 if (nxge_txdma_hung(nxgep)) {
1671 nxge_fixup_hung_txdma_rings(nxgep);
1672 }
1673
1674nxge_check_tx_hang_exit:
1675 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1676}
1677
1678/*
1679 * nxge_txdma_hung
1680 *
1681 * Reset a TDC.
1682 *
1683 * Arguments:
1684 * nxgep
1685 * channel The channel to reset.
1686 * reg_data The current TX_CS.
1687 *
1688 * Notes:
1689 * Called by nxge_check_tx_hang()
1690 *
1691 * NPI/NXGE function calls:
1692 * nxge_txdma_channel_hung()
1693 *
1694 * Registers accessed:
1695 *
1696 * Context:
1697 * Any domain
1698 */
1699int
1700nxge_txdma_hung(p_nxge_t nxgep)
1701{
1702 nxge_grp_set_t *set = &nxgep->tx_set;
1703 int tdc;
1704 boolean_t shared;
1705
1706 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1707
1708 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1709 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1710 "<== nxge_txdma_hung: NULL ring pointer(s)"));
1711 return (B_FALSE);
1712 }
1713
1714 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1715 /*
1716 * Grab the shared state of the TDC.
1717 */
1718 if (isLDOMservice(nxgep)) {
1719 nxge_hio_data_t *nhd =
1720 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1721
1722 MUTEX_ENTER(&nhd->lock);
1723 shared = nxgep->tdc_is_shared[tdc];
1724 MUTEX_EXIT(&nhd->lock);
1725 } else {
1726 shared = B_FALSE;
1727 }
1728
1729 /*
1730 * Now, process continue to process.
1731 */
1732 if (((1 << tdc) & set->owned.map) && !shared) {
1733 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1734 if (ring) {
1735 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1736 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1737 "==> nxge_txdma_hung: TDC %d hung",
1738 tdc));
1739 return (B_TRUE);
1740 }
1741 }
1742 }
1743 }
1744
1745 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1746
1747 return (B_FALSE);
1748}
1749
1750/*
1751 * nxge_txdma_channel_hung
1752 *
1753 * Reset a TDC.
1754 *
1755 * Arguments:
1756 * nxgep
1757 * ring <channel>'s ring.
1758 * channel The channel to reset.
1759 *
1760 * Notes:
1761 * Called by nxge_txdma.c:nxge_txdma_hung()
1762 *
1763 * NPI/NXGE function calls:
1764 * npi_txdma_ring_head_get()
1765 *
1766 * Registers accessed:
1767 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1768 *
1769 * Context:
1770 * Any domain
1771 */
1772int
1773nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1774{
1775 uint16_t head_index, tail_index;
1776 boolean_t head_wrap, tail_wrap;
1777 npi_handle_t handle;
1778 tx_ring_hdl_t tx_head;
1779 uint_t tx_rd_index;
1780
1781 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1782
1783 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1784 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1785 "==> nxge_txdma_channel_hung: channel %d", channel));
1786 MUTEX_ENTER(&tx_ring_p->lock);
1787 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1788
1789 tail_index = tx_ring_p->wr_index;
1790 tail_wrap = tx_ring_p->wr_index_wrap;
1791 tx_rd_index = tx_ring_p->rd_index;
1792 MUTEX_EXIT(&tx_ring_p->lock);
1793
1794 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1795 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1796 "tail_index %d tail_wrap %d ",
1797 channel, tx_rd_index, tail_index, tail_wrap));
1798 /*
1799 * Read the hardware maintained transmit head
1800 * and wrap around bit.
1801 */
1802 (void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1803 head_index = tx_head.bits.ldw.head;
1804 head_wrap = tx_head.bits.ldw.wrap;
1805 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1806 "==> nxge_txdma_channel_hung: "
1807 "tx_rd_index %d tail %d tail_wrap %d "
1808 "head %d wrap %d",
1809 tx_rd_index, tail_index, tail_wrap,
1810 head_index, head_wrap));
1811
1812 if (TXDMA_RING_EMPTY(head_index, head_wrap,
1813 tail_index, tail_wrap) &&
1814 (head_index == tx_rd_index)) {
1815 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1816 "==> nxge_txdma_channel_hung: EMPTY"));
1817 return (B_FALSE);
1818 }
1819
1820 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1821 "==> nxge_txdma_channel_hung: Checking if ring full"));
1822 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1823 tail_wrap)) {
1824 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1825 "==> nxge_txdma_channel_hung: full"));
1826 return (B_TRUE);
1827 }
1828
1829 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1830
1831 return (B_FALSE);
1832}
1833
1834/*
1835 * nxge_fixup_hung_txdma_rings
1836 *
1837 * Disable a TDC.
1838 *
1839 * Arguments:
1840 * nxgep
1841 * channel The channel to reset.
1842 * reg_data The current TX_CS.
1843 *
1844 * Notes:
1845 * Called by nxge_check_tx_hang()
1846 *
1847 * NPI/NXGE function calls:
1848 * npi_txdma_ring_head_get()
1849 *
1850 * Registers accessed:
1851 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1852 *
1853 * Context:
1854 * Any domain
1855 */
1856/*ARGSUSED*/
1857void
1858nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1859{
1860 nxge_grp_set_t *set = &nxgep->tx_set;
1861 int tdc;
1862
1863 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1864
1865 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1866 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1867 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1868 return;
1869 }
1870
1871 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1872 if ((1 << tdc) & set->owned.map) {
1873 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1874 if (ring) {
1875 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1876 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1877 "==> nxge_fixup_hung_txdma_rings: TDC %d",
1878 tdc));
1879 }
1880 }
1881 }
1882
1883 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1884}
1885
1886/*
1887 * nxge_txdma_fixup_hung_channel
1888 *
1889 * 'Fix' a hung TDC.
1890 *
1891 * Arguments:
1892 * nxgep
1893 * channel The channel to fix.
1894 *
1895 * Notes:
1896 * Called by nxge_fixup_hung_txdma_rings()
1897 *
1898 * 1. Reclaim the TDC.
1899 * 2. Disable the TDC.
1900 *
1901 * NPI/NXGE function calls:
1902 * nxge_txdma_reclaim()
1903 * npi_txdma_channel_disable(TX_CS)
1904 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1905 *
1906 * Registers accessed:
1907 * TX_CS DMC+0x40028 Transmit Control And Status
1908 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1909 *
1910 * Context:
1911 * Any domain
1912 */
1913/*ARGSUSED*/
1914void
1915nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1916{
1917 p_tx_ring_t ring_p;
1918
1919 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1920 ring_p = nxge_txdma_get_ring(nxgep, channel);
1921 if (ring_p == NULL) {
1922 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1923 "<== nxge_txdma_fix_hung_channel"));
1924 return;
1925 }
1926
1927 if (ring_p->tdc != channel) {
1928 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1929 "<== nxge_txdma_fix_hung_channel: channel not matched "
1930 "ring tdc %d passed channel",
1931 ring_p->tdc, channel));
1932 return;
1933 }
1934
1935 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1936
1937 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1938}
1939
1940/*ARGSUSED*/
1941void
1942nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1943 uint16_t channel)
1944{
1945 npi_handle_t handle;
1946 tdmc_intr_dbg_t intr_dbg;
1947 int status = NXGE_OK;
1948
1949 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1950
1951 if (ring_p == NULL) {
1952 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1953 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1954 return;
1955 }
1956
1957 if (ring_p->tdc != channel) {
1958 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1959 "<== nxge_txdma_fixup_hung_channel: channel "
1960 "not matched "
1961 "ring tdc %d passed channel",
1962 ring_p->tdc, channel));
1963 return;
1964 }
1965
1966 /* Reclaim descriptors */
1967 MUTEX_ENTER(&ring_p->lock);
1968 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1969 MUTEX_EXIT(&ring_p->lock);
1970
1971 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1972 /*
1973 * Stop the dma channel waits for the stop done.
1974 * If the stop done bit is not set, then force
1975 * an error.
1976 */
1977 status = npi_txdma_channel_disable(handle, channel);
1978 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1979 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1980 "<== nxge_txdma_fixup_hung_channel: stopped OK "
1981 "ring tdc %d passed channel %d",
1982 ring_p->tdc, channel));
1983 return;
1984 }
1985
1986 /* Inject any error */
1987 intr_dbg.value = 0;
1988 intr_dbg.bits.ldw.nack_pref = 1;
1989 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1990
1991 /* Stop done bit will be set as a result of error injection */
1992 status = npi_txdma_channel_disable(handle, channel);
1993 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1994 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1995 "<== nxge_txdma_fixup_hung_channel: stopped again"
1996 "ring tdc %d passed channel",
1997 ring_p->tdc, channel));
1998 return;
1999 }
2000
2001 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2002 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
2003 "ring tdc %d passed channel",
2004 ring_p->tdc, channel));
2005
2006 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
2007}
2008
2009/*ARGSUSED*/
2010void
2011nxge_reclaim_rings(p_nxge_t nxgep)
2012{
2013 nxge_grp_set_t *set = &nxgep->tx_set;
2014 int tdc;
2015
2016 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
2017
2018 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2019 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2020 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2021 return;
2022 }
2023
2024 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2025 if ((1 << tdc) & set->owned.map) {
2026 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2027 if (ring) {
2028 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2029 "==> nxge_reclaim_rings: TDC %d", tdc));
2030 MUTEX_ENTER(&ring->lock);
2031 (void) nxge_txdma_reclaim(nxgep, ring, 0);
2032 MUTEX_EXIT(&ring->lock);
2033 }
2034 }
2035 }
2036
2037 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
2038}
2039
2040void
2041nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
2042{
2043 nxge_grp_set_t *set = &nxgep->tx_set;
2044 npi_handle_t handle;
2045 int tdc;
2046
2047 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
2048
2049 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2050
2051 if (!isLDOMguest(nxgep)) {
2052 (void) npi_txdma_dump_fzc_regs(handle);
2053
2054 /* Dump TXC registers. */
2055 (void) npi_txc_dump_fzc_regs(handle);
2056 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
2057 }
2058
2059 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2060 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2061 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2062 return;
2063 }
2064
2065 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2066 if ((1 << tdc) & set->owned.map) {
2067 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2068 if (ring) {
2069 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2070 "==> nxge_txdma_regs_dump_channels: "
2071 "TDC %d", tdc));
2072 (void) npi_txdma_dump_tdc_regs(handle, tdc);
2073
2074 /* Dump TXC registers, if able to. */
2075 if (!isLDOMguest(nxgep)) {
2076 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2077 "==> nxge_txdma_regs_dump_channels:"
2078 " FZC TDC %d", tdc));
2079 (void) npi_txc_dump_tdc_fzc_regs
2080 (handle, tdc);
2081 }
2082 nxge_txdma_regs_dump(nxgep, tdc);
2083 }
2084 }
2085 }
2086
2087 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
2088}
2089
2090void
2091nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
2092{
2093 npi_handle_t handle;
2094 tx_ring_hdl_t hdl;
2095 tx_ring_kick_t kick;
2096 tx_cs_t cs;
2097 txc_control_t control;
2098 uint32_t bitmap = 0;
2099 uint32_t burst = 0;
2100 uint32_t bytes = 0;
2101 dma_log_page_t cfg;
2102
2103 printf("\n\tfunc # %d tdc %d ",
2104 nxgep->function_num, channel);
2105 cfg.page_num = 0;
2106 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2107 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2108 printf("\n\tlog page func %d valid page 0 %d",
2109 cfg.func_num, cfg.valid);
2110 cfg.page_num = 1;
2111 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2112 printf("\n\tlog page func %d valid page 1 %d",
2113 cfg.func_num, cfg.valid);
2114
2115 (void) npi_txdma_ring_head_get(handle, channel, &hdl);
2116 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2117 printf("\n\thead value is 0x%0llx",
2118 (long long)hdl.value);
2119 printf("\n\thead index %d", hdl.bits.ldw.head);
2120 printf("\n\tkick value is 0x%0llx",
2121 (long long)kick.value);
2122 printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2123
2124 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2125 printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2126 printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2127
2128 (void) npi_txc_control(handle, OP_GET, &control);
2129 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2130 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2131 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2132
2133 printf("\n\tTXC port control 0x%0llx",
2134 (long long)control.value);
2135 printf("\n\tTXC port bitmap 0x%x", bitmap);
2136 printf("\n\tTXC max burst %d", burst);
2137 printf("\n\tTXC bytes xmt %d\n", bytes);
2138
2139 {
2140 ipp_status_t status;
2141
2142 (void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2143#if defined(__i386)
2144 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
2145#else
2146 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2147#endif
2148 }
2149}
2150
2151/*
2152 * nxge_tdc_hvio_setup
2153 *
2154 * I'm not exactly sure what this code does.
2155 *
2156 * Arguments:
2157 * nxgep
2158 * channel The channel to map.
2159 *
2160 * Notes:
2161 *
2162 * NPI/NXGE function calls:
2163 * na
2164 *
2165 * Context:
2166 * Service domain?
2167 */
2168#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2169static void
2170nxge_tdc_hvio_setup(
2171 nxge_t *nxgep, int channel)
2172{
2173 nxge_dma_common_t *data;
2174 nxge_dma_common_t *control;
2175 tx_ring_t *ring;
2176
2177 ring = nxgep->tx_rings->rings[channel];
2178 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2179
2180 ring->hv_set = B_FALSE;
2181
2182 ring->hv_tx_buf_base_ioaddr_pp =
2183 (uint64_t)data->orig_ioaddr_pp;
2184 ring->hv_tx_buf_ioaddr_size =
2185 (uint64_t)data->orig_alength;
2186
2187 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2188 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2189 "orig vatopa base io $%p orig_len 0x%llx (%d)",
2190 ring->hv_tx_buf_base_ioaddr_pp,
2191 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2192 data->ioaddr_pp, data->orig_vatopa,
2193 data->orig_alength, data->orig_alength));
2194
2195 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2196
2197 ring->hv_tx_cntl_base_ioaddr_pp =
2198 (uint64_t)control->orig_ioaddr_pp;
2199 ring->hv_tx_cntl_ioaddr_size =
2200 (uint64_t)control->orig_alength;
2201
2202 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2203 "hv cntl base io $%p orig ioaddr_pp ($%p) "
2204 "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2205 ring->hv_tx_cntl_base_ioaddr_pp,
2206 control->orig_ioaddr_pp, control->orig_vatopa,
2207 ring->hv_tx_cntl_ioaddr_size,
2208 control->orig_alength, control->orig_alength));
2209}
2210#endif
2211
2212static nxge_status_t
2213nxge_map_txdma(p_nxge_t nxgep, int channel)
2214{
2215 nxge_dma_common_t **pData;
2216 nxge_dma_common_t **pControl;
2217 tx_ring_t **pRing, *ring;
2218 tx_mbox_t **mailbox;
2219 uint32_t num_chunks;
2220
2221 nxge_status_t status = NXGE_OK;
2222
2223 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2224
2225 if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2226 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2227 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2228 "<== nxge_map_txdma: buf not allocated"));
2229 return (NXGE_ERROR);
2230 }
2231 }
2232
2233 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2234 return (NXGE_ERROR);
2235
2236 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2237 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2238 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2239 pRing = &nxgep->tx_rings->rings[channel];
2240 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2241
2242 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2243 "tx_rings $%p tx_desc_rings $%p",
2244 nxgep->tx_rings, nxgep->tx_rings->rings));
2245
2246 /*
2247 * Map descriptors from the buffer pools for <channel>.
2248 */
2249
2250 /*
2251 * Set up and prepare buffer blocks, descriptors
2252 * and mailbox.
2253 */
2254 status = nxge_map_txdma_channel(nxgep, channel,
2255 pData, pRing, num_chunks, pControl, mailbox);
2256 if (status != NXGE_OK) {
2257 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2258 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2259 "returned 0x%x",
2260 nxgep, channel, status));
2261 return (status);
2262 }
2263
2264 ring = *pRing;
2265
2266 ring->index = (uint16_t)channel;
2267 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2268
2269#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2270 if (isLDOMguest(nxgep)) {
2271 (void) nxge_tdc_lp_conf(nxgep, channel);
2272 } else {
2273 nxge_tdc_hvio_setup(nxgep, channel);
2274 }
2275#endif
2276
2277 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2278 "(status 0x%x channel %d)", status, channel));
2279
2280 return (status);
2281}
2282
2283static nxge_status_t
2284nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2285 p_nxge_dma_common_t *dma_buf_p,
2286 p_tx_ring_t *tx_desc_p,
2287 uint32_t num_chunks,
2288 p_nxge_dma_common_t *dma_cntl_p,
2289 p_tx_mbox_t *tx_mbox_p)
2290{
2291 int status = NXGE_OK;
2292
2293 /*
2294 * Set up and prepare buffer blocks, descriptors
2295 * and mailbox.
2296 */
2297 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2298 "==> nxge_map_txdma_channel (channel %d)", channel));
2299 /*
2300 * Transmit buffer blocks
2301 */
2302 status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2303 dma_buf_p, tx_desc_p, num_chunks);
2304 if (status != NXGE_OK) {
2305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2306 "==> nxge_map_txdma_channel (channel %d): "
2307 "map buffer failed 0x%x", channel, status));
2308 goto nxge_map_txdma_channel_exit;
2309 }
2310
2311 /*
2312 * Transmit block ring, and mailbox.
2313 */
2314 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2315 tx_mbox_p);
2316
2317 goto nxge_map_txdma_channel_exit;
2318
2319nxge_map_txdma_channel_fail1:
2320 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2321 "==> nxge_map_txdma_channel: unmap buf"
2322 "(status 0x%x channel %d)",
2323 status, channel));
2324 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2325
2326nxge_map_txdma_channel_exit:
2327 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2328 "<== nxge_map_txdma_channel: "
2329 "(status 0x%x channel %d)",
2330 status, channel));
2331
2332 return (status);
2333}
2334
2335/*ARGSUSED*/
2336static void
2337nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2338{
2339 tx_ring_t *ring;
2340 tx_mbox_t *mailbox;
2341
2342 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2343 "==> nxge_unmap_txdma_channel (channel %d)", channel));
2344 /*
2345 * unmap tx block ring, and mailbox.
2346 */
2347 ring = nxgep->tx_rings->rings[channel];
2348 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2349
2350 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2351
2352 /* unmap buffer blocks */
2353 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2354
2355 nxge_free_txb(nxgep, channel);
2356
2357 /*
2358 * Cleanup the reference to the ring now that it does not exist.
2359 */
2360 nxgep->tx_rings->rings[channel] = NULL;
2361
2362 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2363}
2364
2365/*
2366 * nxge_map_txdma_channel_cfg_ring
2367 *
2368 * Map a TDC into our kernel space.
2369 * This function allocates all of the per-channel data structures.
2370 *
2371 * Arguments:
2372 * nxgep
2373 * dma_channel The channel to map.
2374 * dma_cntl_p
2375 * tx_ring_p dma_channel's transmit ring
2376 * tx_mbox_p dma_channel's mailbox
2377 *
2378 * Notes:
2379 *
2380 * NPI/NXGE function calls:
2381 * nxge_setup_dma_common()
2382 *
2383 * Registers accessed:
2384 * none.
2385 *
2386 * Context:
2387 * Any domain
2388 */
2389/*ARGSUSED*/
2390static void
2391nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2392 p_nxge_dma_common_t *dma_cntl_p,
2393 p_tx_ring_t tx_ring_p,
2394 p_tx_mbox_t *tx_mbox_p)
2395{
2396 p_tx_mbox_t mboxp;
2397 p_nxge_dma_common_t cntl_dmap;
2398 p_nxge_dma_common_t dmap;
2399 p_tx_rng_cfig_t tx_ring_cfig_p;
2400 p_tx_ring_kick_t tx_ring_kick_p;
2401 p_tx_cs_t tx_cs_p;
2402 p_tx_dma_ent_msk_t tx_evmask_p;
2403 p_txdma_mbh_t mboxh_p;
2404 p_txdma_mbl_t mboxl_p;
2405 uint64_t tx_desc_len;
2406
2407 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2408 "==> nxge_map_txdma_channel_cfg_ring"));
2409
2410 cntl_dmap = *dma_cntl_p;
2411
2412 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2413 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2414 sizeof (tx_desc_t));
2415 /*
2416 * Zero out transmit ring descriptors.
2417 */
2418 bzero((caddr_t)dmap->kaddrp, dmap->alength);
2419 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2420 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2421 tx_cs_p = &(tx_ring_p->tx_cs);
2422 tx_evmask_p = &(tx_ring_p->tx_evmask);
2423 tx_ring_cfig_p->value = 0;
2424 tx_ring_kick_p->value = 0;
2425 tx_cs_p->value = 0;
2426 tx_evmask_p->value = 0;
2427
2428 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2429 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2430 dma_channel,
2431 dmap->dma_cookie.dmac_laddress));
2432
2433 tx_ring_cfig_p->value = 0;
2434 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2435 tx_ring_cfig_p->value =
2436 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2437 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2438
2439 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2440 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2441 dma_channel,
2442 tx_ring_cfig_p->value));
2443
2444 tx_cs_p->bits.ldw.rst = 1;
2445
2446 /* Map in mailbox */
2447 mboxp = (p_tx_mbox_t)
2448 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2449 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2450 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2451 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2452 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2453 mboxh_p->value = mboxl_p->value = 0;
2454
2455 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2456 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2457 dmap->dma_cookie.dmac_laddress));
2458
2459 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2460 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2461
2462 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2463 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2464
2465 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2466 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2467 dmap->dma_cookie.dmac_laddress));
2468 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2469 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2470 "mbox $%p",
2471 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2472 tx_ring_p->page_valid.value = 0;
2473 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2474 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2475 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2476 tx_ring_p->page_hdl.value = 0;
2477
2478 tx_ring_p->page_valid.bits.ldw.page0 = 1;
2479 tx_ring_p->page_valid.bits.ldw.page1 = 1;
2480
2481 tx_ring_p->max_burst.value = 0;
2482 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2483
2484 *tx_mbox_p = mboxp;
2485
2486 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2487 "<== nxge_map_txdma_channel_cfg_ring"));
2488}
2489
2490/*ARGSUSED*/
2491static void
2492nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2493 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2494{
2495 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2496 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2497 tx_ring_p->tdc));
2498
2499 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2500
2501 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2502 "<== nxge_unmap_txdma_channel_cfg_ring"));
2503}
2504
2505/*
2506 * nxge_map_txdma_channel_buf_ring
2507 *
2508 *
2509 * Arguments:
2510 * nxgep
2511 * channel The channel to map.
2512 * dma_buf_p
2513 * tx_desc_p channel's descriptor ring
2514 * num_chunks
2515 *
2516 * Notes:
2517 *
2518 * NPI/NXGE function calls:
2519 * nxge_setup_dma_common()
2520 *
2521 * Registers accessed:
2522 * none.
2523 *
2524 * Context:
2525 * Any domain
2526 */
2527static nxge_status_t
2528nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2529 p_nxge_dma_common_t *dma_buf_p,
2530 p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2531{
2532 p_nxge_dma_common_t dma_bufp, tmp_bufp;
2533 p_nxge_dma_common_t dmap;
2534 nxge_os_dma_handle_t tx_buf_dma_handle;
2535 p_tx_ring_t tx_ring_p;
1338 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1339 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1340 if ((1 << tdc) & set->owned.map) {
1341 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1342 if (ring) {
1343 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1344 "==> nxge_txdma_hw_mode: channel %d", tdc));
1345 if (enable) {
1346 rs = npi_txdma_channel_enable
1347 (handle, tdc);
1348 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1349 "==> nxge_txdma_hw_mode: "
1350 "channel %d (enable) rs 0x%x",
1351 tdc, rs));
1352 } else {
1353 rs = nxge_txdma_channel_disable
1354 (nxgep, tdc);
1355 }
1356 }
1357 }
1358 }
1359
1360 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1361
1362 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1363 "<== nxge_txdma_hw_mode: status 0x%x", status));
1364
1365 return (status);
1366}
1367
1368void
1369nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1370{
1371 npi_handle_t handle;
1372
1373 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1374 "==> nxge_txdma_enable_channel: channel %d", channel));
1375
1376 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1377 /* enable the transmit dma channels */
1378 (void) npi_txdma_channel_enable(handle, channel);
1379
1380 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1381}
1382
1383void
1384nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1385{
1386 npi_handle_t handle;
1387
1388 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1389 "==> nxge_txdma_disable_channel: channel %d", channel));
1390
1391 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1392 /* stop the transmit dma channels */
1393 (void) npi_txdma_channel_disable(handle, channel);
1394
1395 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1396}
1397
1398/*
1399 * nxge_txdma_stop_inj_err
1400 *
1401 * Stop a TDC. If at first we don't succeed, inject an error.
1402 *
1403 * Arguments:
1404 * nxgep
1405 * channel The channel to stop.
1406 *
1407 * Notes:
1408 *
1409 * NPI/NXGE function calls:
1410 * npi_txdma_channel_disable()
1411 * npi_txdma_inj_int_error_set()
1412 * #if defined(NXGE_DEBUG)
1413 * nxge_txdma_regs_dump_channels(nxgep);
1414 * #endif
1415 *
1416 * Registers accessed:
1417 * TX_CS DMC+0x40028 Transmit Control And Status
1418 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1419 *
1420 * Context:
1421 * Any domain
1422 */
1423int
1424nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1425{
1426 npi_handle_t handle;
1427 tdmc_intr_dbg_t intr_dbg;
1428 int status;
1429 npi_status_t rs = NPI_SUCCESS;
1430
1431 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1432 /*
1433 * Stop the dma channel waits for the stop done.
1434 * If the stop done bit is not set, then create
1435 * an error.
1436 */
1437 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1438 rs = npi_txdma_channel_disable(handle, channel);
1439 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1440 if (status == NXGE_OK) {
1441 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1442 "<== nxge_txdma_stop_inj_err (channel %d): "
1443 "stopped OK", channel));
1444 return (status);
1445 }
1446
1447 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1448 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1449 "injecting error", channel, rs));
1450 /* Inject any error */
1451 intr_dbg.value = 0;
1452 intr_dbg.bits.ldw.nack_pref = 1;
1453 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1454
1455 /* Stop done bit will be set as a result of error injection */
1456 rs = npi_txdma_channel_disable(handle, channel);
1457 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1458 if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1459 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1460 "<== nxge_txdma_stop_inj_err (channel %d): "
1461 "stopped OK ", channel));
1462 return (status);
1463 }
1464
1465#if defined(NXGE_DEBUG)
1466 nxge_txdma_regs_dump_channels(nxgep);
1467#endif
1468 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1469 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1470 " (injected error but still not stopped)", channel, rs));
1471
1472 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1473 return (status);
1474}
1475
1476/*ARGSUSED*/
1477void
1478nxge_fixup_txdma_rings(p_nxge_t nxgep)
1479{
1480 nxge_grp_set_t *set = &nxgep->tx_set;
1481 int tdc;
1482
1483 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1484
1485 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1486 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1487 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1488 return;
1489 }
1490
1491 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1492 if ((1 << tdc) & set->owned.map) {
1493 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1494 if (ring) {
1495 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1496 "==> nxge_fixup_txdma_rings: channel %d",
1497 tdc));
1498 nxge_txdma_fixup_channel(nxgep, ring, tdc);
1499 }
1500 }
1501 }
1502
1503 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1504}
1505
1506/*ARGSUSED*/
1507void
1508nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1509{
1510 p_tx_ring_t ring_p;
1511
1512 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1513 ring_p = nxge_txdma_get_ring(nxgep, channel);
1514 if (ring_p == NULL) {
1515 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1516 return;
1517 }
1518
1519 if (ring_p->tdc != channel) {
1520 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1521 "<== nxge_txdma_fix_channel: channel not matched "
1522 "ring tdc %d passed channel",
1523 ring_p->tdc, channel));
1524 return;
1525 }
1526
1527 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1528
1529 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1530}
1531
1532/*ARGSUSED*/
1533void
1534nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1535{
1536 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1537
1538 if (ring_p == NULL) {
1539 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1540 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1541 return;
1542 }
1543
1544 if (ring_p->tdc != channel) {
1545 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1546 "<== nxge_txdma_fixup_channel: channel not matched "
1547 "ring tdc %d passed channel",
1548 ring_p->tdc, channel));
1549 return;
1550 }
1551
1552 MUTEX_ENTER(&ring_p->lock);
1553 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1554 ring_p->rd_index = 0;
1555 ring_p->wr_index = 0;
1556 ring_p->ring_head.value = 0;
1557 ring_p->ring_kick_tail.value = 0;
1558 ring_p->descs_pending = 0;
1559 MUTEX_EXIT(&ring_p->lock);
1560
1561 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1562}
1563
1564/*ARGSUSED*/
1565void
1566nxge_txdma_hw_kick(p_nxge_t nxgep)
1567{
1568 nxge_grp_set_t *set = &nxgep->tx_set;
1569 int tdc;
1570
1571 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1572
1573 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1574 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1575 "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1576 return;
1577 }
1578
1579 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1580 if ((1 << tdc) & set->owned.map) {
1581 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1582 if (ring) {
1583 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1584 "==> nxge_txdma_hw_kick: channel %d", tdc));
1585 nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1586 }
1587 }
1588 }
1589
1590 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1591}
1592
1593/*ARGSUSED*/
1594void
1595nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1596{
1597 p_tx_ring_t ring_p;
1598
1599 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1600
1601 ring_p = nxge_txdma_get_ring(nxgep, channel);
1602 if (ring_p == NULL) {
1603 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1604 " nxge_txdma_kick_channel"));
1605 return;
1606 }
1607
1608 if (ring_p->tdc != channel) {
1609 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1610 "<== nxge_txdma_kick_channel: channel not matched "
1611 "ring tdc %d passed channel",
1612 ring_p->tdc, channel));
1613 return;
1614 }
1615
1616 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1617
1618 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1619}
1620
1621/*ARGSUSED*/
1622void
1623nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1624{
1625
1626 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1627
1628 if (ring_p == NULL) {
1629 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1630 "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1631 return;
1632 }
1633
1634 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1635}
1636
1637/*
1638 * nxge_check_tx_hang
1639 *
1640 * Check the state of all TDCs belonging to nxgep.
1641 *
1642 * Arguments:
1643 * nxgep
1644 *
1645 * Notes:
1646 * Called by nxge_hw.c:nxge_check_hw_state().
1647 *
1648 * NPI/NXGE function calls:
1649 *
1650 * Registers accessed:
1651 *
1652 * Context:
1653 * Any domain
1654 */
1655/*ARGSUSED*/
1656void
1657nxge_check_tx_hang(p_nxge_t nxgep)
1658{
1659 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1660
1661 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1662 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1663 goto nxge_check_tx_hang_exit;
1664 }
1665
1666 /*
1667 * Needs inputs from hardware for regs:
1668 * head index had not moved since last timeout.
1669 * packets not transmitted or stuffed registers.
1670 */
1671 if (nxge_txdma_hung(nxgep)) {
1672 nxge_fixup_hung_txdma_rings(nxgep);
1673 }
1674
1675nxge_check_tx_hang_exit:
1676 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1677}
1678
1679/*
1680 * nxge_txdma_hung
1681 *
1682 * Reset a TDC.
1683 *
1684 * Arguments:
1685 * nxgep
1686 * channel The channel to reset.
1687 * reg_data The current TX_CS.
1688 *
1689 * Notes:
1690 * Called by nxge_check_tx_hang()
1691 *
1692 * NPI/NXGE function calls:
1693 * nxge_txdma_channel_hung()
1694 *
1695 * Registers accessed:
1696 *
1697 * Context:
1698 * Any domain
1699 */
1700int
1701nxge_txdma_hung(p_nxge_t nxgep)
1702{
1703 nxge_grp_set_t *set = &nxgep->tx_set;
1704 int tdc;
1705 boolean_t shared;
1706
1707 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1708
1709 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1710 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1711 "<== nxge_txdma_hung: NULL ring pointer(s)"));
1712 return (B_FALSE);
1713 }
1714
1715 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1716 /*
1717 * Grab the shared state of the TDC.
1718 */
1719 if (isLDOMservice(nxgep)) {
1720 nxge_hio_data_t *nhd =
1721 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1722
1723 MUTEX_ENTER(&nhd->lock);
1724 shared = nxgep->tdc_is_shared[tdc];
1725 MUTEX_EXIT(&nhd->lock);
1726 } else {
1727 shared = B_FALSE;
1728 }
1729
1730 /*
1731 * Now, process continue to process.
1732 */
1733 if (((1 << tdc) & set->owned.map) && !shared) {
1734 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1735 if (ring) {
1736 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1737 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1738 "==> nxge_txdma_hung: TDC %d hung",
1739 tdc));
1740 return (B_TRUE);
1741 }
1742 }
1743 }
1744 }
1745
1746 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1747
1748 return (B_FALSE);
1749}
1750
1751/*
1752 * nxge_txdma_channel_hung
1753 *
1754 * Reset a TDC.
1755 *
1756 * Arguments:
1757 * nxgep
1758 * ring <channel>'s ring.
1759 * channel The channel to reset.
1760 *
1761 * Notes:
1762 * Called by nxge_txdma.c:nxge_txdma_hung()
1763 *
1764 * NPI/NXGE function calls:
1765 * npi_txdma_ring_head_get()
1766 *
1767 * Registers accessed:
1768 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1769 *
1770 * Context:
1771 * Any domain
1772 */
1773int
1774nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1775{
1776 uint16_t head_index, tail_index;
1777 boolean_t head_wrap, tail_wrap;
1778 npi_handle_t handle;
1779 tx_ring_hdl_t tx_head;
1780 uint_t tx_rd_index;
1781
1782 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1783
1784 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1785 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1786 "==> nxge_txdma_channel_hung: channel %d", channel));
1787 MUTEX_ENTER(&tx_ring_p->lock);
1788 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1789
1790 tail_index = tx_ring_p->wr_index;
1791 tail_wrap = tx_ring_p->wr_index_wrap;
1792 tx_rd_index = tx_ring_p->rd_index;
1793 MUTEX_EXIT(&tx_ring_p->lock);
1794
1795 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1796 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1797 "tail_index %d tail_wrap %d ",
1798 channel, tx_rd_index, tail_index, tail_wrap));
1799 /*
1800 * Read the hardware maintained transmit head
1801 * and wrap around bit.
1802 */
1803 (void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1804 head_index = tx_head.bits.ldw.head;
1805 head_wrap = tx_head.bits.ldw.wrap;
1806 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1807 "==> nxge_txdma_channel_hung: "
1808 "tx_rd_index %d tail %d tail_wrap %d "
1809 "head %d wrap %d",
1810 tx_rd_index, tail_index, tail_wrap,
1811 head_index, head_wrap));
1812
1813 if (TXDMA_RING_EMPTY(head_index, head_wrap,
1814 tail_index, tail_wrap) &&
1815 (head_index == tx_rd_index)) {
1816 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1817 "==> nxge_txdma_channel_hung: EMPTY"));
1818 return (B_FALSE);
1819 }
1820
1821 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1822 "==> nxge_txdma_channel_hung: Checking if ring full"));
1823 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1824 tail_wrap)) {
1825 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1826 "==> nxge_txdma_channel_hung: full"));
1827 return (B_TRUE);
1828 }
1829
1830 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1831
1832 return (B_FALSE);
1833}
1834
1835/*
1836 * nxge_fixup_hung_txdma_rings
1837 *
1838 * Disable a TDC.
1839 *
1840 * Arguments:
1841 * nxgep
1842 * channel The channel to reset.
1843 * reg_data The current TX_CS.
1844 *
1845 * Notes:
1846 * Called by nxge_check_tx_hang()
1847 *
1848 * NPI/NXGE function calls:
1849 * npi_txdma_ring_head_get()
1850 *
1851 * Registers accessed:
1852 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1853 *
1854 * Context:
1855 * Any domain
1856 */
1857/*ARGSUSED*/
1858void
1859nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1860{
1861 nxge_grp_set_t *set = &nxgep->tx_set;
1862 int tdc;
1863
1864 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1865
1866 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1867 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1868 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1869 return;
1870 }
1871
1872 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1873 if ((1 << tdc) & set->owned.map) {
1874 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1875 if (ring) {
1876 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1877 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1878 "==> nxge_fixup_hung_txdma_rings: TDC %d",
1879 tdc));
1880 }
1881 }
1882 }
1883
1884 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1885}
1886
1887/*
1888 * nxge_txdma_fixup_hung_channel
1889 *
1890 * 'Fix' a hung TDC.
1891 *
1892 * Arguments:
1893 * nxgep
1894 * channel The channel to fix.
1895 *
1896 * Notes:
1897 * Called by nxge_fixup_hung_txdma_rings()
1898 *
1899 * 1. Reclaim the TDC.
1900 * 2. Disable the TDC.
1901 *
1902 * NPI/NXGE function calls:
1903 * nxge_txdma_reclaim()
1904 * npi_txdma_channel_disable(TX_CS)
1905 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1906 *
1907 * Registers accessed:
1908 * TX_CS DMC+0x40028 Transmit Control And Status
1909 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1910 *
1911 * Context:
1912 * Any domain
1913 */
1914/*ARGSUSED*/
1915void
1916nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1917{
1918 p_tx_ring_t ring_p;
1919
1920 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1921 ring_p = nxge_txdma_get_ring(nxgep, channel);
1922 if (ring_p == NULL) {
1923 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1924 "<== nxge_txdma_fix_hung_channel"));
1925 return;
1926 }
1927
1928 if (ring_p->tdc != channel) {
1929 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1930 "<== nxge_txdma_fix_hung_channel: channel not matched "
1931 "ring tdc %d passed channel",
1932 ring_p->tdc, channel));
1933 return;
1934 }
1935
1936 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1937
1938 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1939}
1940
1941/*ARGSUSED*/
1942void
1943nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1944 uint16_t channel)
1945{
1946 npi_handle_t handle;
1947 tdmc_intr_dbg_t intr_dbg;
1948 int status = NXGE_OK;
1949
1950 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1951
1952 if (ring_p == NULL) {
1953 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1954 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1955 return;
1956 }
1957
1958 if (ring_p->tdc != channel) {
1959 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1960 "<== nxge_txdma_fixup_hung_channel: channel "
1961 "not matched "
1962 "ring tdc %d passed channel",
1963 ring_p->tdc, channel));
1964 return;
1965 }
1966
1967 /* Reclaim descriptors */
1968 MUTEX_ENTER(&ring_p->lock);
1969 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1970 MUTEX_EXIT(&ring_p->lock);
1971
1972 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1973 /*
1974 * Stop the dma channel waits for the stop done.
1975 * If the stop done bit is not set, then force
1976 * an error.
1977 */
1978 status = npi_txdma_channel_disable(handle, channel);
1979 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1980 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1981 "<== nxge_txdma_fixup_hung_channel: stopped OK "
1982 "ring tdc %d passed channel %d",
1983 ring_p->tdc, channel));
1984 return;
1985 }
1986
1987 /* Inject any error */
1988 intr_dbg.value = 0;
1989 intr_dbg.bits.ldw.nack_pref = 1;
1990 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1991
1992 /* Stop done bit will be set as a result of error injection */
1993 status = npi_txdma_channel_disable(handle, channel);
1994 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1995 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1996 "<== nxge_txdma_fixup_hung_channel: stopped again"
1997 "ring tdc %d passed channel",
1998 ring_p->tdc, channel));
1999 return;
2000 }
2001
2002 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2003 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
2004 "ring tdc %d passed channel",
2005 ring_p->tdc, channel));
2006
2007 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
2008}
2009
2010/*ARGSUSED*/
2011void
2012nxge_reclaim_rings(p_nxge_t nxgep)
2013{
2014 nxge_grp_set_t *set = &nxgep->tx_set;
2015 int tdc;
2016
2017 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
2018
2019 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2020 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2021 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2022 return;
2023 }
2024
2025 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2026 if ((1 << tdc) & set->owned.map) {
2027 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2028 if (ring) {
2029 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2030 "==> nxge_reclaim_rings: TDC %d", tdc));
2031 MUTEX_ENTER(&ring->lock);
2032 (void) nxge_txdma_reclaim(nxgep, ring, 0);
2033 MUTEX_EXIT(&ring->lock);
2034 }
2035 }
2036 }
2037
2038 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
2039}
2040
2041void
2042nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
2043{
2044 nxge_grp_set_t *set = &nxgep->tx_set;
2045 npi_handle_t handle;
2046 int tdc;
2047
2048 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
2049
2050 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2051
2052 if (!isLDOMguest(nxgep)) {
2053 (void) npi_txdma_dump_fzc_regs(handle);
2054
2055 /* Dump TXC registers. */
2056 (void) npi_txc_dump_fzc_regs(handle);
2057 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
2058 }
2059
2060 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2061 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2062 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2063 return;
2064 }
2065
2066 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2067 if ((1 << tdc) & set->owned.map) {
2068 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2069 if (ring) {
2070 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2071 "==> nxge_txdma_regs_dump_channels: "
2072 "TDC %d", tdc));
2073 (void) npi_txdma_dump_tdc_regs(handle, tdc);
2074
2075 /* Dump TXC registers, if able to. */
2076 if (!isLDOMguest(nxgep)) {
2077 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2078 "==> nxge_txdma_regs_dump_channels:"
2079 " FZC TDC %d", tdc));
2080 (void) npi_txc_dump_tdc_fzc_regs
2081 (handle, tdc);
2082 }
2083 nxge_txdma_regs_dump(nxgep, tdc);
2084 }
2085 }
2086 }
2087
2088 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
2089}
2090
2091void
2092nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
2093{
2094 npi_handle_t handle;
2095 tx_ring_hdl_t hdl;
2096 tx_ring_kick_t kick;
2097 tx_cs_t cs;
2098 txc_control_t control;
2099 uint32_t bitmap = 0;
2100 uint32_t burst = 0;
2101 uint32_t bytes = 0;
2102 dma_log_page_t cfg;
2103
2104 printf("\n\tfunc # %d tdc %d ",
2105 nxgep->function_num, channel);
2106 cfg.page_num = 0;
2107 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2108 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2109 printf("\n\tlog page func %d valid page 0 %d",
2110 cfg.func_num, cfg.valid);
2111 cfg.page_num = 1;
2112 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2113 printf("\n\tlog page func %d valid page 1 %d",
2114 cfg.func_num, cfg.valid);
2115
2116 (void) npi_txdma_ring_head_get(handle, channel, &hdl);
2117 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2118 printf("\n\thead value is 0x%0llx",
2119 (long long)hdl.value);
2120 printf("\n\thead index %d", hdl.bits.ldw.head);
2121 printf("\n\tkick value is 0x%0llx",
2122 (long long)kick.value);
2123 printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2124
2125 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2126 printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2127 printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2128
2129 (void) npi_txc_control(handle, OP_GET, &control);
2130 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2131 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2132 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2133
2134 printf("\n\tTXC port control 0x%0llx",
2135 (long long)control.value);
2136 printf("\n\tTXC port bitmap 0x%x", bitmap);
2137 printf("\n\tTXC max burst %d", burst);
2138 printf("\n\tTXC bytes xmt %d\n", bytes);
2139
2140 {
2141 ipp_status_t status;
2142
2143 (void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2144#if defined(__i386)
2145 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
2146#else
2147 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2148#endif
2149 }
2150}
2151
2152/*
2153 * nxge_tdc_hvio_setup
2154 *
2155 * I'm not exactly sure what this code does.
2156 *
2157 * Arguments:
2158 * nxgep
2159 * channel The channel to map.
2160 *
2161 * Notes:
2162 *
2163 * NPI/NXGE function calls:
2164 * na
2165 *
2166 * Context:
2167 * Service domain?
2168 */
2169#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2170static void
2171nxge_tdc_hvio_setup(
2172 nxge_t *nxgep, int channel)
2173{
2174 nxge_dma_common_t *data;
2175 nxge_dma_common_t *control;
2176 tx_ring_t *ring;
2177
2178 ring = nxgep->tx_rings->rings[channel];
2179 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2180
2181 ring->hv_set = B_FALSE;
2182
2183 ring->hv_tx_buf_base_ioaddr_pp =
2184 (uint64_t)data->orig_ioaddr_pp;
2185 ring->hv_tx_buf_ioaddr_size =
2186 (uint64_t)data->orig_alength;
2187
2188 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2189 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2190 "orig vatopa base io $%p orig_len 0x%llx (%d)",
2191 ring->hv_tx_buf_base_ioaddr_pp,
2192 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2193 data->ioaddr_pp, data->orig_vatopa,
2194 data->orig_alength, data->orig_alength));
2195
2196 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2197
2198 ring->hv_tx_cntl_base_ioaddr_pp =
2199 (uint64_t)control->orig_ioaddr_pp;
2200 ring->hv_tx_cntl_ioaddr_size =
2201 (uint64_t)control->orig_alength;
2202
2203 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2204 "hv cntl base io $%p orig ioaddr_pp ($%p) "
2205 "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2206 ring->hv_tx_cntl_base_ioaddr_pp,
2207 control->orig_ioaddr_pp, control->orig_vatopa,
2208 ring->hv_tx_cntl_ioaddr_size,
2209 control->orig_alength, control->orig_alength));
2210}
2211#endif
2212
2213static nxge_status_t
2214nxge_map_txdma(p_nxge_t nxgep, int channel)
2215{
2216 nxge_dma_common_t **pData;
2217 nxge_dma_common_t **pControl;
2218 tx_ring_t **pRing, *ring;
2219 tx_mbox_t **mailbox;
2220 uint32_t num_chunks;
2221
2222 nxge_status_t status = NXGE_OK;
2223
2224 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2225
2226 if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2227 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2228 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2229 "<== nxge_map_txdma: buf not allocated"));
2230 return (NXGE_ERROR);
2231 }
2232 }
2233
2234 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2235 return (NXGE_ERROR);
2236
2237 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2238 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2239 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2240 pRing = &nxgep->tx_rings->rings[channel];
2241 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2242
2243 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2244 "tx_rings $%p tx_desc_rings $%p",
2245 nxgep->tx_rings, nxgep->tx_rings->rings));
2246
2247 /*
2248 * Map descriptors from the buffer pools for <channel>.
2249 */
2250
2251 /*
2252 * Set up and prepare buffer blocks, descriptors
2253 * and mailbox.
2254 */
2255 status = nxge_map_txdma_channel(nxgep, channel,
2256 pData, pRing, num_chunks, pControl, mailbox);
2257 if (status != NXGE_OK) {
2258 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2259 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2260 "returned 0x%x",
2261 nxgep, channel, status));
2262 return (status);
2263 }
2264
2265 ring = *pRing;
2266
2267 ring->index = (uint16_t)channel;
2268 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2269
2270#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2271 if (isLDOMguest(nxgep)) {
2272 (void) nxge_tdc_lp_conf(nxgep, channel);
2273 } else {
2274 nxge_tdc_hvio_setup(nxgep, channel);
2275 }
2276#endif
2277
2278 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2279 "(status 0x%x channel %d)", status, channel));
2280
2281 return (status);
2282}
2283
2284static nxge_status_t
2285nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2286 p_nxge_dma_common_t *dma_buf_p,
2287 p_tx_ring_t *tx_desc_p,
2288 uint32_t num_chunks,
2289 p_nxge_dma_common_t *dma_cntl_p,
2290 p_tx_mbox_t *tx_mbox_p)
2291{
2292 int status = NXGE_OK;
2293
2294 /*
2295 * Set up and prepare buffer blocks, descriptors
2296 * and mailbox.
2297 */
2298 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2299 "==> nxge_map_txdma_channel (channel %d)", channel));
2300 /*
2301 * Transmit buffer blocks
2302 */
2303 status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2304 dma_buf_p, tx_desc_p, num_chunks);
2305 if (status != NXGE_OK) {
2306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2307 "==> nxge_map_txdma_channel (channel %d): "
2308 "map buffer failed 0x%x", channel, status));
2309 goto nxge_map_txdma_channel_exit;
2310 }
2311
2312 /*
2313 * Transmit block ring, and mailbox.
2314 */
2315 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2316 tx_mbox_p);
2317
2318 goto nxge_map_txdma_channel_exit;
2319
2320nxge_map_txdma_channel_fail1:
2321 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2322 "==> nxge_map_txdma_channel: unmap buf"
2323 "(status 0x%x channel %d)",
2324 status, channel));
2325 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2326
2327nxge_map_txdma_channel_exit:
2328 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2329 "<== nxge_map_txdma_channel: "
2330 "(status 0x%x channel %d)",
2331 status, channel));
2332
2333 return (status);
2334}
2335
2336/*ARGSUSED*/
2337static void
2338nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2339{
2340 tx_ring_t *ring;
2341 tx_mbox_t *mailbox;
2342
2343 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2344 "==> nxge_unmap_txdma_channel (channel %d)", channel));
2345 /*
2346 * unmap tx block ring, and mailbox.
2347 */
2348 ring = nxgep->tx_rings->rings[channel];
2349 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2350
2351 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2352
2353 /* unmap buffer blocks */
2354 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2355
2356 nxge_free_txb(nxgep, channel);
2357
2358 /*
2359 * Cleanup the reference to the ring now that it does not exist.
2360 */
2361 nxgep->tx_rings->rings[channel] = NULL;
2362
2363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2364}
2365
2366/*
2367 * nxge_map_txdma_channel_cfg_ring
2368 *
2369 * Map a TDC into our kernel space.
2370 * This function allocates all of the per-channel data structures.
2371 *
2372 * Arguments:
2373 * nxgep
2374 * dma_channel The channel to map.
2375 * dma_cntl_p
2376 * tx_ring_p dma_channel's transmit ring
2377 * tx_mbox_p dma_channel's mailbox
2378 *
2379 * Notes:
2380 *
2381 * NPI/NXGE function calls:
2382 * nxge_setup_dma_common()
2383 *
2384 * Registers accessed:
2385 * none.
2386 *
2387 * Context:
2388 * Any domain
2389 */
2390/*ARGSUSED*/
2391static void
2392nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2393 p_nxge_dma_common_t *dma_cntl_p,
2394 p_tx_ring_t tx_ring_p,
2395 p_tx_mbox_t *tx_mbox_p)
2396{
2397 p_tx_mbox_t mboxp;
2398 p_nxge_dma_common_t cntl_dmap;
2399 p_nxge_dma_common_t dmap;
2400 p_tx_rng_cfig_t tx_ring_cfig_p;
2401 p_tx_ring_kick_t tx_ring_kick_p;
2402 p_tx_cs_t tx_cs_p;
2403 p_tx_dma_ent_msk_t tx_evmask_p;
2404 p_txdma_mbh_t mboxh_p;
2405 p_txdma_mbl_t mboxl_p;
2406 uint64_t tx_desc_len;
2407
2408 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2409 "==> nxge_map_txdma_channel_cfg_ring"));
2410
2411 cntl_dmap = *dma_cntl_p;
2412
2413 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2414 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2415 sizeof (tx_desc_t));
2416 /*
2417 * Zero out transmit ring descriptors.
2418 */
2419 bzero((caddr_t)dmap->kaddrp, dmap->alength);
2420 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2421 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2422 tx_cs_p = &(tx_ring_p->tx_cs);
2423 tx_evmask_p = &(tx_ring_p->tx_evmask);
2424 tx_ring_cfig_p->value = 0;
2425 tx_ring_kick_p->value = 0;
2426 tx_cs_p->value = 0;
2427 tx_evmask_p->value = 0;
2428
2429 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2430 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2431 dma_channel,
2432 dmap->dma_cookie.dmac_laddress));
2433
2434 tx_ring_cfig_p->value = 0;
2435 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2436 tx_ring_cfig_p->value =
2437 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2438 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2439
2440 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2441 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2442 dma_channel,
2443 tx_ring_cfig_p->value));
2444
2445 tx_cs_p->bits.ldw.rst = 1;
2446
2447 /* Map in mailbox */
2448 mboxp = (p_tx_mbox_t)
2449 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2450 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2451 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2452 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2453 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2454 mboxh_p->value = mboxl_p->value = 0;
2455
2456 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2457 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2458 dmap->dma_cookie.dmac_laddress));
2459
2460 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2461 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2462
2463 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2464 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2465
2466 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2467 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2468 dmap->dma_cookie.dmac_laddress));
2469 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2470 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2471 "mbox $%p",
2472 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2473 tx_ring_p->page_valid.value = 0;
2474 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2475 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2476 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2477 tx_ring_p->page_hdl.value = 0;
2478
2479 tx_ring_p->page_valid.bits.ldw.page0 = 1;
2480 tx_ring_p->page_valid.bits.ldw.page1 = 1;
2481
2482 tx_ring_p->max_burst.value = 0;
2483 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2484
2485 *tx_mbox_p = mboxp;
2486
2487 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2488 "<== nxge_map_txdma_channel_cfg_ring"));
2489}
2490
2491/*ARGSUSED*/
2492static void
2493nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2494 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2495{
2496 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2497 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2498 tx_ring_p->tdc));
2499
2500 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2501
2502 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2503 "<== nxge_unmap_txdma_channel_cfg_ring"));
2504}
2505
2506/*
2507 * nxge_map_txdma_channel_buf_ring
2508 *
2509 *
2510 * Arguments:
2511 * nxgep
2512 * channel The channel to map.
2513 * dma_buf_p
2514 * tx_desc_p channel's descriptor ring
2515 * num_chunks
2516 *
2517 * Notes:
2518 *
2519 * NPI/NXGE function calls:
2520 * nxge_setup_dma_common()
2521 *
2522 * Registers accessed:
2523 * none.
2524 *
2525 * Context:
2526 * Any domain
2527 */
2528static nxge_status_t
2529nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2530 p_nxge_dma_common_t *dma_buf_p,
2531 p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2532{
2533 p_nxge_dma_common_t dma_bufp, tmp_bufp;
2534 p_nxge_dma_common_t dmap;
2535 nxge_os_dma_handle_t tx_buf_dma_handle;
2536 p_tx_ring_t tx_ring_p;
2536 p_tx_msg_t tx_msg_ring;
2537 p_tx_msg_t tx_msg_ring = NULL;
2537 nxge_status_t status = NXGE_OK;
2538 int ddi_status = DDI_SUCCESS;
2538 nxge_status_t status = NXGE_OK;
2539 int ddi_status = DDI_SUCCESS;
2539 int i, j, index;
2540 uint32_t size, bsize;
2540 int i, j, index = 0;
2541 uint32_t size = 0, bsize;
2541 uint32_t nblocks, nmsgs;
2542 char qname[TASKQ_NAMELEN];
2543
2544 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2545 "==> nxge_map_txdma_channel_buf_ring"));
2546
2547 dma_bufp = tmp_bufp = *dma_buf_p;
2548 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2549 " nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2550 "chunks bufp $%p",
2551 channel, num_chunks, dma_bufp));
2552
2553 nmsgs = 0;
2554 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2555 nmsgs += tmp_bufp->nblocks;
2556 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2557 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2558 "bufp $%p nblocks %d nmsgs %d",
2559 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2560 }
2561 if (!nmsgs) {
2562 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2563 "<== nxge_map_txdma_channel_buf_ring: channel %d "
2564 "no msg blocks",
2565 channel));
2566 status = NXGE_ERROR;
2567 goto nxge_map_txdma_channel_buf_ring_exit;
2568 }
2569
2570 tx_ring_p = (p_tx_ring_t)
2571 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2572 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2573 (void *)nxgep->interrupt_cookie);
2574
2575 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2576 tx_ring_p->tx_ring_busy = B_FALSE;
2577 tx_ring_p->nxgep = nxgep;
2578 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL;
2579 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d",
2580 nxgep->instance, channel);
2581 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1,
2582 TASKQ_DEFAULTPRI, 0);
2583 if (tx_ring_p->taskq == NULL) {
2584 goto nxge_map_txdma_channel_buf_ring_fail1;
2585 }
2586
2587 /*
2588 * Allocate transmit message rings and handles for packets
2589 * not to be copied to premapped buffers.
2590 */
2591 size = nmsgs * sizeof (tx_msg_t);
2592 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2593 for (i = 0; i < nmsgs; i++) {
2594 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2595 DDI_DMA_DONTWAIT, 0,
2596 &tx_msg_ring[i].dma_handle);
2597 if (ddi_status != DDI_SUCCESS) {
2598 status |= NXGE_DDI_FAILED;
2599 break;
2600 }
2601 }
2602 if (i < nmsgs) {
2603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2604 "Allocate handles failed."));
2605 goto nxge_map_txdma_channel_buf_ring_fail1;
2606 }
2607
2608 tx_ring_p->tdc = channel;
2609 tx_ring_p->tx_msg_ring = tx_msg_ring;
2610 tx_ring_p->tx_ring_size = nmsgs;
2611 tx_ring_p->num_chunks = num_chunks;
2612 if (!nxge_tx_intr_thres) {
2613 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2614 }
2615 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2616 tx_ring_p->rd_index = 0;
2617 tx_ring_p->wr_index = 0;
2618 tx_ring_p->ring_head.value = 0;
2619 tx_ring_p->ring_kick_tail.value = 0;
2620 tx_ring_p->descs_pending = 0;
2621
2622 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2623 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2624 "actual tx desc max %d nmsgs %d "
2625 "(config nxge_tx_ring_size %d)",
2626 channel, tx_ring_p->tx_ring_size, nmsgs,
2627 nxge_tx_ring_size));
2628
2629 /*
2630 * Map in buffers from the buffer pool.
2631 */
2632 index = 0;
2633 bsize = dma_bufp->block_size;
2634
2635 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2636 "dma_bufp $%p tx_rng_p $%p "
2637 "tx_msg_rng_p $%p bsize %d",
2638 dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2639
2640 tx_buf_dma_handle = dma_bufp->dma_handle;
2641 for (i = 0; i < num_chunks; i++, dma_bufp++) {
2642 bsize = dma_bufp->block_size;
2643 nblocks = dma_bufp->nblocks;
2644 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2645 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2646 "size %d dma_bufp $%p",
2647 i, sizeof (nxge_dma_common_t), dma_bufp));
2648
2649 for (j = 0; j < nblocks; j++) {
2650 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2651 dmap = &tx_msg_ring[index++].buf_dma;
2652#ifdef TX_MEM_DEBUG
2653 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2654 "==> nxge_map_txdma_channel_buf_ring: j %d"
2655 "dmap $%p", i, dmap));
2656#endif
2657 nxge_setup_dma_common(dmap, dma_bufp, 1,
2658 bsize);
2659 }
2660 }
2661
2662 if (i < num_chunks) {
2663 status = NXGE_ERROR;
2664 goto nxge_map_txdma_channel_buf_ring_fail1;
2665 }
2666
2667 *tx_desc_p = tx_ring_p;
2668
2669 goto nxge_map_txdma_channel_buf_ring_exit;
2670
2671nxge_map_txdma_channel_buf_ring_fail1:
2672 if (tx_ring_p->taskq) {
2673 ddi_taskq_destroy(tx_ring_p->taskq);
2674 tx_ring_p->taskq = NULL;
2675 }
2676
2677 index--;
2678 for (; index >= 0; index--) {
2679 if (tx_msg_ring[index].dma_handle != NULL) {
2680 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2681 }
2682 }
2683 MUTEX_DESTROY(&tx_ring_p->lock);
2684 KMEM_FREE(tx_msg_ring, size);
2685 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2686
2687 status = NXGE_ERROR;
2688
2689nxge_map_txdma_channel_buf_ring_exit:
2690 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2691 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2692
2693 return (status);
2694}
2695
2696/*ARGSUSED*/
2697static void
2698nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2699{
2700 p_tx_msg_t tx_msg_ring;
2701 p_tx_msg_t tx_msg_p;
2702 int i;
2703
2704 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2705 "==> nxge_unmap_txdma_channel_buf_ring"));
2706 if (tx_ring_p == NULL) {
2707 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2708 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2709 return;
2710 }
2711 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2712 "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2713 tx_ring_p->tdc));
2714
2715 tx_msg_ring = tx_ring_p->tx_msg_ring;
2716
2717 /*
2718 * Since the serialization thread, timer thread and
2719 * interrupt thread can all call the transmit reclaim,
2720 * the unmapping function needs to acquire the lock
2721 * to free those buffers which were transmitted
2722 * by the hardware already.
2723 */
2724 MUTEX_ENTER(&tx_ring_p->lock);
2725 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2726 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2727 "channel %d",
2728 tx_ring_p->tdc));
2729 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2730
2731 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2732 tx_msg_p = &tx_msg_ring[i];
2733 if (tx_msg_p->tx_message != NULL) {
2734 freemsg(tx_msg_p->tx_message);
2735 tx_msg_p->tx_message = NULL;
2736 }
2737 }
2738
2739 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2740 if (tx_msg_ring[i].dma_handle != NULL) {
2741 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2742 }
2743 tx_msg_ring[i].dma_handle = NULL;
2744 }
2745
2746 MUTEX_EXIT(&tx_ring_p->lock);
2747
2748 if (tx_ring_p->taskq) {
2749 ddi_taskq_destroy(tx_ring_p->taskq);
2750 tx_ring_p->taskq = NULL;
2751 }
2752
2753 MUTEX_DESTROY(&tx_ring_p->lock);
2754 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2755 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2756
2757 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2758 "<== nxge_unmap_txdma_channel_buf_ring"));
2759}
2760
2761static nxge_status_t
2762nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2763{
2764 p_tx_rings_t tx_rings;
2765 p_tx_ring_t *tx_desc_rings;
2766 p_tx_mbox_areas_t tx_mbox_areas_p;
2767 p_tx_mbox_t *tx_mbox_p;
2768 nxge_status_t status = NXGE_OK;
2769
2770 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2771
2772 tx_rings = nxgep->tx_rings;
2773 if (tx_rings == NULL) {
2774 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2775 "<== nxge_txdma_hw_start: NULL ring pointer"));
2776 return (NXGE_ERROR);
2777 }
2778 tx_desc_rings = tx_rings->rings;
2779 if (tx_desc_rings == NULL) {
2780 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2781 "<== nxge_txdma_hw_start: NULL ring pointers"));
2782 return (NXGE_ERROR);
2783 }
2784
2785 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2786 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2787
2788 tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2789 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2790
2791 status = nxge_txdma_start_channel(nxgep, channel,
2792 (p_tx_ring_t)tx_desc_rings[channel],
2793 (p_tx_mbox_t)tx_mbox_p[channel]);
2794 if (status != NXGE_OK) {
2795 goto nxge_txdma_hw_start_fail1;
2796 }
2797
2798 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2799 "tx_rings $%p rings $%p",
2800 nxgep->tx_rings, nxgep->tx_rings->rings));
2801 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2802 "tx_rings $%p tx_desc_rings $%p",
2803 nxgep->tx_rings, tx_desc_rings));
2804
2805 goto nxge_txdma_hw_start_exit;
2806
2807nxge_txdma_hw_start_fail1:
2808 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2809 "==> nxge_txdma_hw_start: disable "
2810 "(status 0x%x channel %d)", status, channel));
2811
2812nxge_txdma_hw_start_exit:
2813 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2814 "==> nxge_txdma_hw_start: (status 0x%x)", status));
2815
2816 return (status);
2817}
2818
2819/*
2820 * nxge_txdma_start_channel
2821 *
2822 * Start a TDC.
2823 *
2824 * Arguments:
2825 * nxgep
2826 * channel The channel to start.
2827 * tx_ring_p channel's transmit descriptor ring.
2828 * tx_mbox_p channel' smailbox.
2829 *
2830 * Notes:
2831 *
2832 * NPI/NXGE function calls:
2833 * nxge_reset_txdma_channel()
2834 * nxge_init_txdma_channel_event_mask()
2835 * nxge_enable_txdma_channel()
2836 *
2837 * Registers accessed:
2838 * none directly (see functions above).
2839 *
2840 * Context:
2841 * Any domain
2842 */
2843static nxge_status_t
2844nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2845 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2846
2847{
2848 nxge_status_t status = NXGE_OK;
2849
2850 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2851 "==> nxge_txdma_start_channel (channel %d)", channel));
2852 /*
2853 * TXDMA/TXC must be in stopped state.
2854 */
2855 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2856
2857 /*
2858 * Reset TXDMA channel
2859 */
2860 tx_ring_p->tx_cs.value = 0;
2861 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2862 status = nxge_reset_txdma_channel(nxgep, channel,
2863 tx_ring_p->tx_cs.value);
2864 if (status != NXGE_OK) {
2865 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2866 "==> nxge_txdma_start_channel (channel %d)"
2867 " reset channel failed 0x%x", channel, status));
2868 goto nxge_txdma_start_channel_exit;
2869 }
2870
2871 /*
2872 * Initialize the TXDMA channel specific FZC control
2873 * configurations. These FZC registers are pertaining
2874 * to each TX channel (i.e. logical pages).
2875 */
2876 if (!isLDOMguest(nxgep)) {
2877 status = nxge_init_fzc_txdma_channel(nxgep, channel,
2878 tx_ring_p, tx_mbox_p);
2879 if (status != NXGE_OK) {
2880 goto nxge_txdma_start_channel_exit;
2881 }
2882 }
2883
2884 /*
2885 * Initialize the event masks.
2886 */
2887 tx_ring_p->tx_evmask.value = 0;
2888 status = nxge_init_txdma_channel_event_mask(nxgep,
2889 channel, &tx_ring_p->tx_evmask);
2890 if (status != NXGE_OK) {
2891 goto nxge_txdma_start_channel_exit;
2892 }
2893
2894 /*
2895 * Load TXDMA descriptors, buffers, mailbox,
2896 * initialise the DMA channels and
2897 * enable each DMA channel.
2898 */
2899 status = nxge_enable_txdma_channel(nxgep, channel,
2900 tx_ring_p, tx_mbox_p);
2901 if (status != NXGE_OK) {
2902 goto nxge_txdma_start_channel_exit;
2903 }
2904
2905nxge_txdma_start_channel_exit:
2906 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2907
2908 return (status);
2909}
2910
2911/*
2912 * nxge_txdma_stop_channel
2913 *
2914 * Stop a TDC.
2915 *
2916 * Arguments:
2917 * nxgep
2918 * channel The channel to stop.
2919 * tx_ring_p channel's transmit descriptor ring.
2920 * tx_mbox_p channel' smailbox.
2921 *
2922 * Notes:
2923 *
2924 * NPI/NXGE function calls:
2925 * nxge_txdma_stop_inj_err()
2926 * nxge_reset_txdma_channel()
2927 * nxge_init_txdma_channel_event_mask()
2928 * nxge_init_txdma_channel_cntl_stat()
2929 * nxge_disable_txdma_channel()
2930 *
2931 * Registers accessed:
2932 * none directly (see functions above).
2933 *
2934 * Context:
2935 * Any domain
2936 */
2937/*ARGSUSED*/
2938static nxge_status_t
2939nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2940{
2941 p_tx_ring_t tx_ring_p;
2942 int status = NXGE_OK;
2943
2944 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2945 "==> nxge_txdma_stop_channel: channel %d", channel));
2946
2947 /*
2948 * Stop (disable) TXDMA and TXC (if stop bit is set
2949 * and STOP_N_GO bit not set, the TXDMA reset state will
2950 * not be set if reset TXDMA.
2951 */
2952 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2953
2954 if (nxgep->tx_rings == NULL) {
2955 status = NXGE_ERROR;
2956 goto nxge_txdma_stop_channel_exit;
2957 }
2958
2959 tx_ring_p = nxgep->tx_rings->rings[channel];
2960 if (tx_ring_p == NULL) {
2961 status = NXGE_ERROR;
2962 goto nxge_txdma_stop_channel_exit;
2963 }
2964
2965 /*
2966 * Reset TXDMA channel
2967 */
2968 tx_ring_p->tx_cs.value = 0;
2969 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2970 status = nxge_reset_txdma_channel(nxgep, channel,
2971 tx_ring_p->tx_cs.value);
2972 if (status != NXGE_OK) {
2973 goto nxge_txdma_stop_channel_exit;
2974 }
2975
2976#ifdef HARDWARE_REQUIRED
2977 /* Set up the interrupt event masks. */
2978 tx_ring_p->tx_evmask.value = 0;
2979 status = nxge_init_txdma_channel_event_mask(nxgep,
2980 channel, &tx_ring_p->tx_evmask);
2981 if (status != NXGE_OK) {
2982 goto nxge_txdma_stop_channel_exit;
2983 }
2984
2985 /* Initialize the DMA control and status register */
2986 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2987 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2988 tx_ring_p->tx_cs.value);
2989 if (status != NXGE_OK) {
2990 goto nxge_txdma_stop_channel_exit;
2991 }
2992
2993 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2994
2995 /* Disable channel */
2996 status = nxge_disable_txdma_channel(nxgep, channel,
2997 tx_ring_p, tx_mbox_p);
2998 if (status != NXGE_OK) {
2999 goto nxge_txdma_start_channel_exit;
3000 }
3001
3002 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3003 "==> nxge_txdma_stop_channel: event done"));
3004
3005#endif
3006
3007nxge_txdma_stop_channel_exit:
3008 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
3009 return (status);
3010}
3011
3012/*
3013 * nxge_txdma_get_ring
3014 *
3015 * Get the ring for a TDC.
3016 *
3017 * Arguments:
3018 * nxgep
3019 * channel
3020 *
3021 * Notes:
3022 *
3023 * NPI/NXGE function calls:
3024 *
3025 * Registers accessed:
3026 *
3027 * Context:
3028 * Any domain
3029 */
3030static p_tx_ring_t
3031nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
3032{
3033 nxge_grp_set_t *set = &nxgep->tx_set;
3034 int tdc;
3035
3036 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
3037
3038 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3039 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3040 "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
3041 goto return_null;
3042 }
3043
3044 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3045 if ((1 << tdc) & set->owned.map) {
3046 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3047 if (ring) {
3048 if (channel == ring->tdc) {
3049 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3050 "<== nxge_txdma_get_ring: "
3051 "tdc %d ring $%p", tdc, ring));
3052 return (ring);
3053 }
3054 }
3055 }
3056 }
3057
3058return_null:
3059 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
3060 "ring not found"));
3061
3062 return (NULL);
3063}
3064
3065/*
3066 * nxge_txdma_get_mbox
3067 *
3068 * Get the mailbox for a TDC.
3069 *
3070 * Arguments:
3071 * nxgep
3072 * channel
3073 *
3074 * Notes:
3075 *
3076 * NPI/NXGE function calls:
3077 *
3078 * Registers accessed:
3079 *
3080 * Context:
3081 * Any domain
3082 */
3083static p_tx_mbox_t
3084nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
3085{
3086 nxge_grp_set_t *set = &nxgep->tx_set;
3087 int tdc;
3088
3089 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
3090
3091 if (nxgep->tx_mbox_areas_p == 0 ||
3092 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
3093 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3094 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
3095 goto return_null;
3096 }
3097
3098 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3099 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3100 "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
3101 goto return_null;
3102 }
3103
3104 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3105 if ((1 << tdc) & set->owned.map) {
3106 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3107 if (ring) {
3108 if (channel == ring->tdc) {
3109 tx_mbox_t *mailbox = nxgep->
3110 tx_mbox_areas_p->
3111 txmbox_areas_p[tdc];
3112 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3113 "<== nxge_txdma_get_mbox: tdc %d "
3114 "ring $%p", tdc, mailbox));
3115 return (mailbox);
3116 }
3117 }
3118 }
3119 }
3120
3121return_null:
3122 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
3123 "mailbox not found"));
3124
3125 return (NULL);
3126}
3127
3128/*
3129 * nxge_tx_err_evnts
3130 *
3131 * Recover a TDC.
3132 *
3133 * Arguments:
3134 * nxgep
3135 * index The index to the TDC ring.
3136 * ldvp Used to get the channel number ONLY.
3137 * cs A copy of the bits from TX_CS.
3138 *
3139 * Notes:
3140 * Calling tree:
3141 * nxge_tx_intr()
3142 *
3143 * NPI/NXGE function calls:
3144 * npi_txdma_ring_error_get()
3145 * npi_txdma_inj_par_error_get()
3146 * nxge_txdma_fatal_err_recover()
3147 *
3148 * Registers accessed:
3149 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High
3150 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3151 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3152 *
3153 * Context:
3154 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3155 */
3156/*ARGSUSED*/
3157static nxge_status_t
3158nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3159{
3160 npi_handle_t handle;
3161 npi_status_t rs;
3162 uint8_t channel;
3163 p_tx_ring_t *tx_rings;
3164 p_tx_ring_t tx_ring_p;
3165 p_nxge_tx_ring_stats_t tdc_stats;
3166 boolean_t txchan_fatal = B_FALSE;
3167 nxge_status_t status = NXGE_OK;
3168 tdmc_inj_par_err_t par_err;
3169 uint32_t value;
3170
3171 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3172 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3173 channel = ldvp->channel;
3174
3175 tx_rings = nxgep->tx_rings->rings;
3176 tx_ring_p = tx_rings[index];
3177 tdc_stats = tx_ring_p->tdc_stats;
3178 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3179 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3180 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3181 if ((rs = npi_txdma_ring_error_get(handle, channel,
3182 &tdc_stats->errlog)) != NPI_SUCCESS)
3183 return (NXGE_ERROR | rs);
3184 }
3185
3186 if (cs.bits.ldw.mbox_err) {
3187 tdc_stats->mbox_err++;
3188 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3189 NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3190 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3191 "==> nxge_tx_err_evnts(channel %d): "
3192 "fatal error: mailbox", channel));
3193 txchan_fatal = B_TRUE;
3194 }
3195 if (cs.bits.ldw.pkt_size_err) {
3196 tdc_stats->pkt_size_err++;
3197 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3198 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3199 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3200 "==> nxge_tx_err_evnts(channel %d): "
3201 "fatal error: pkt_size_err", channel));
3202 txchan_fatal = B_TRUE;
3203 }
3204 if (cs.bits.ldw.tx_ring_oflow) {
3205 tdc_stats->tx_ring_oflow++;
3206 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3207 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3208 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3209 "==> nxge_tx_err_evnts(channel %d): "
3210 "fatal error: tx_ring_oflow", channel));
3211 txchan_fatal = B_TRUE;
3212 }
3213 if (cs.bits.ldw.pref_buf_par_err) {
3214 tdc_stats->pre_buf_par_err++;
3215 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3216 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3218 "==> nxge_tx_err_evnts(channel %d): "
3219 "fatal error: pre_buf_par_err", channel));
3220 /* Clear error injection source for parity error */
3221 (void) npi_txdma_inj_par_error_get(handle, &value);
3222 par_err.value = value;
3223 par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3224 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3225 txchan_fatal = B_TRUE;
3226 }
3227 if (cs.bits.ldw.nack_pref) {
3228 tdc_stats->nack_pref++;
3229 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3230 NXGE_FM_EREPORT_TDMC_NACK_PREF);
3231 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3232 "==> nxge_tx_err_evnts(channel %d): "
3233 "fatal error: nack_pref", channel));
3234 txchan_fatal = B_TRUE;
3235 }
3236 if (cs.bits.ldw.nack_pkt_rd) {
3237 tdc_stats->nack_pkt_rd++;
3238 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3239 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3240 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3241 "==> nxge_tx_err_evnts(channel %d): "
3242 "fatal error: nack_pkt_rd", channel));
3243 txchan_fatal = B_TRUE;
3244 }
3245 if (cs.bits.ldw.conf_part_err) {
3246 tdc_stats->conf_part_err++;
3247 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3248 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3250 "==> nxge_tx_err_evnts(channel %d): "
3251 "fatal error: config_partition_err", channel));
3252 txchan_fatal = B_TRUE;
3253 }
3254 if (cs.bits.ldw.pkt_prt_err) {
3255 tdc_stats->pkt_part_err++;
3256 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3257 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3258 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3259 "==> nxge_tx_err_evnts(channel %d): "
3260 "fatal error: pkt_prt_err", channel));
3261 txchan_fatal = B_TRUE;
3262 }
3263
3264 /* Clear error injection source in case this is an injected error */
3265 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3266
3267 if (txchan_fatal) {
3268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3269 " nxge_tx_err_evnts: "
3270 " fatal error on channel %d cs 0x%llx\n",
3271 channel, cs.value));
3272 status = nxge_txdma_fatal_err_recover(nxgep, channel,
3273 tx_ring_p);
3274 if (status == NXGE_OK) {
3275 FM_SERVICE_RESTORED(nxgep);
3276 }
3277 }
3278
3279 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3280
3281 return (status);
3282}
3283
3284static nxge_status_t
3285nxge_txdma_fatal_err_recover(
3286 p_nxge_t nxgep,
3287 uint16_t channel,
3288 p_tx_ring_t tx_ring_p)
3289{
3290 npi_handle_t handle;
3291 npi_status_t rs = NPI_SUCCESS;
3292 p_tx_mbox_t tx_mbox_p;
3293 nxge_status_t status = NXGE_OK;
3294
3295 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3296 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3297 "Recovering from TxDMAChannel#%d error...", channel));
3298
3299 /*
3300 * Stop the dma channel waits for the stop done.
3301 * If the stop done bit is not set, then create
3302 * an error.
3303 */
3304
3305 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3306 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3307 MUTEX_ENTER(&tx_ring_p->lock);
3308 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3309 if (rs != NPI_SUCCESS) {
3310 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3311 "==> nxge_txdma_fatal_err_recover (channel %d): "
3312 "stop failed ", channel));
3313 goto fail;
3314 }
3315
3316 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3317 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3318
3319 /*
3320 * Reset TXDMA channel
3321 */
3322 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3323 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3324 NPI_SUCCESS) {
3325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3326 "==> nxge_txdma_fatal_err_recover (channel %d)"
3327 " reset channel failed 0x%x", channel, rs));
3328 goto fail;
3329 }
3330
3331 /*
3332 * Reset the tail (kick) register to 0.
3333 * (Hardware will not reset it. Tx overflow fatal
3334 * error if tail is not set to 0 after reset!
3335 */
3336 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3337
3338 /* Restart TXDMA channel */
3339
2542 uint32_t nblocks, nmsgs;
2543 char qname[TASKQ_NAMELEN];
2544
2545 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2546 "==> nxge_map_txdma_channel_buf_ring"));
2547
2548 dma_bufp = tmp_bufp = *dma_buf_p;
2549 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2550 " nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2551 "chunks bufp $%p",
2552 channel, num_chunks, dma_bufp));
2553
2554 nmsgs = 0;
2555 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2556 nmsgs += tmp_bufp->nblocks;
2557 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2558 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2559 "bufp $%p nblocks %d nmsgs %d",
2560 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2561 }
2562 if (!nmsgs) {
2563 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2564 "<== nxge_map_txdma_channel_buf_ring: channel %d "
2565 "no msg blocks",
2566 channel));
2567 status = NXGE_ERROR;
2568 goto nxge_map_txdma_channel_buf_ring_exit;
2569 }
2570
2571 tx_ring_p = (p_tx_ring_t)
2572 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2573 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2574 (void *)nxgep->interrupt_cookie);
2575
2576 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2577 tx_ring_p->tx_ring_busy = B_FALSE;
2578 tx_ring_p->nxgep = nxgep;
2579 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL;
2580 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d",
2581 nxgep->instance, channel);
2582 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1,
2583 TASKQ_DEFAULTPRI, 0);
2584 if (tx_ring_p->taskq == NULL) {
2585 goto nxge_map_txdma_channel_buf_ring_fail1;
2586 }
2587
2588 /*
2589 * Allocate transmit message rings and handles for packets
2590 * not to be copied to premapped buffers.
2591 */
2592 size = nmsgs * sizeof (tx_msg_t);
2593 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2594 for (i = 0; i < nmsgs; i++) {
2595 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2596 DDI_DMA_DONTWAIT, 0,
2597 &tx_msg_ring[i].dma_handle);
2598 if (ddi_status != DDI_SUCCESS) {
2599 status |= NXGE_DDI_FAILED;
2600 break;
2601 }
2602 }
2603 if (i < nmsgs) {
2604 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2605 "Allocate handles failed."));
2606 goto nxge_map_txdma_channel_buf_ring_fail1;
2607 }
2608
2609 tx_ring_p->tdc = channel;
2610 tx_ring_p->tx_msg_ring = tx_msg_ring;
2611 tx_ring_p->tx_ring_size = nmsgs;
2612 tx_ring_p->num_chunks = num_chunks;
2613 if (!nxge_tx_intr_thres) {
2614 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2615 }
2616 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2617 tx_ring_p->rd_index = 0;
2618 tx_ring_p->wr_index = 0;
2619 tx_ring_p->ring_head.value = 0;
2620 tx_ring_p->ring_kick_tail.value = 0;
2621 tx_ring_p->descs_pending = 0;
2622
2623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2624 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2625 "actual tx desc max %d nmsgs %d "
2626 "(config nxge_tx_ring_size %d)",
2627 channel, tx_ring_p->tx_ring_size, nmsgs,
2628 nxge_tx_ring_size));
2629
2630 /*
2631 * Map in buffers from the buffer pool.
2632 */
2633 index = 0;
2634 bsize = dma_bufp->block_size;
2635
2636 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2637 "dma_bufp $%p tx_rng_p $%p "
2638 "tx_msg_rng_p $%p bsize %d",
2639 dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2640
2641 tx_buf_dma_handle = dma_bufp->dma_handle;
2642 for (i = 0; i < num_chunks; i++, dma_bufp++) {
2643 bsize = dma_bufp->block_size;
2644 nblocks = dma_bufp->nblocks;
2645 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2646 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2647 "size %d dma_bufp $%p",
2648 i, sizeof (nxge_dma_common_t), dma_bufp));
2649
2650 for (j = 0; j < nblocks; j++) {
2651 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2652 dmap = &tx_msg_ring[index++].buf_dma;
2653#ifdef TX_MEM_DEBUG
2654 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2655 "==> nxge_map_txdma_channel_buf_ring: j %d"
2656 "dmap $%p", i, dmap));
2657#endif
2658 nxge_setup_dma_common(dmap, dma_bufp, 1,
2659 bsize);
2660 }
2661 }
2662
2663 if (i < num_chunks) {
2664 status = NXGE_ERROR;
2665 goto nxge_map_txdma_channel_buf_ring_fail1;
2666 }
2667
2668 *tx_desc_p = tx_ring_p;
2669
2670 goto nxge_map_txdma_channel_buf_ring_exit;
2671
2672nxge_map_txdma_channel_buf_ring_fail1:
2673 if (tx_ring_p->taskq) {
2674 ddi_taskq_destroy(tx_ring_p->taskq);
2675 tx_ring_p->taskq = NULL;
2676 }
2677
2678 index--;
2679 for (; index >= 0; index--) {
2680 if (tx_msg_ring[index].dma_handle != NULL) {
2681 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2682 }
2683 }
2684 MUTEX_DESTROY(&tx_ring_p->lock);
2685 KMEM_FREE(tx_msg_ring, size);
2686 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2687
2688 status = NXGE_ERROR;
2689
2690nxge_map_txdma_channel_buf_ring_exit:
2691 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2692 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2693
2694 return (status);
2695}
2696
2697/*ARGSUSED*/
2698static void
2699nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2700{
2701 p_tx_msg_t tx_msg_ring;
2702 p_tx_msg_t tx_msg_p;
2703 int i;
2704
2705 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2706 "==> nxge_unmap_txdma_channel_buf_ring"));
2707 if (tx_ring_p == NULL) {
2708 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2709 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2710 return;
2711 }
2712 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2713 "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2714 tx_ring_p->tdc));
2715
2716 tx_msg_ring = tx_ring_p->tx_msg_ring;
2717
2718 /*
2719 * Since the serialization thread, timer thread and
2720 * interrupt thread can all call the transmit reclaim,
2721 * the unmapping function needs to acquire the lock
2722 * to free those buffers which were transmitted
2723 * by the hardware already.
2724 */
2725 MUTEX_ENTER(&tx_ring_p->lock);
2726 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2727 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2728 "channel %d",
2729 tx_ring_p->tdc));
2730 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2731
2732 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2733 tx_msg_p = &tx_msg_ring[i];
2734 if (tx_msg_p->tx_message != NULL) {
2735 freemsg(tx_msg_p->tx_message);
2736 tx_msg_p->tx_message = NULL;
2737 }
2738 }
2739
2740 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2741 if (tx_msg_ring[i].dma_handle != NULL) {
2742 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2743 }
2744 tx_msg_ring[i].dma_handle = NULL;
2745 }
2746
2747 MUTEX_EXIT(&tx_ring_p->lock);
2748
2749 if (tx_ring_p->taskq) {
2750 ddi_taskq_destroy(tx_ring_p->taskq);
2751 tx_ring_p->taskq = NULL;
2752 }
2753
2754 MUTEX_DESTROY(&tx_ring_p->lock);
2755 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2756 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2757
2758 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2759 "<== nxge_unmap_txdma_channel_buf_ring"));
2760}
2761
2762static nxge_status_t
2763nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2764{
2765 p_tx_rings_t tx_rings;
2766 p_tx_ring_t *tx_desc_rings;
2767 p_tx_mbox_areas_t tx_mbox_areas_p;
2768 p_tx_mbox_t *tx_mbox_p;
2769 nxge_status_t status = NXGE_OK;
2770
2771 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2772
2773 tx_rings = nxgep->tx_rings;
2774 if (tx_rings == NULL) {
2775 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2776 "<== nxge_txdma_hw_start: NULL ring pointer"));
2777 return (NXGE_ERROR);
2778 }
2779 tx_desc_rings = tx_rings->rings;
2780 if (tx_desc_rings == NULL) {
2781 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2782 "<== nxge_txdma_hw_start: NULL ring pointers"));
2783 return (NXGE_ERROR);
2784 }
2785
2786 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2787 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2788
2789 tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2790 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2791
2792 status = nxge_txdma_start_channel(nxgep, channel,
2793 (p_tx_ring_t)tx_desc_rings[channel],
2794 (p_tx_mbox_t)tx_mbox_p[channel]);
2795 if (status != NXGE_OK) {
2796 goto nxge_txdma_hw_start_fail1;
2797 }
2798
2799 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2800 "tx_rings $%p rings $%p",
2801 nxgep->tx_rings, nxgep->tx_rings->rings));
2802 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2803 "tx_rings $%p tx_desc_rings $%p",
2804 nxgep->tx_rings, tx_desc_rings));
2805
2806 goto nxge_txdma_hw_start_exit;
2807
2808nxge_txdma_hw_start_fail1:
2809 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2810 "==> nxge_txdma_hw_start: disable "
2811 "(status 0x%x channel %d)", status, channel));
2812
2813nxge_txdma_hw_start_exit:
2814 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2815 "==> nxge_txdma_hw_start: (status 0x%x)", status));
2816
2817 return (status);
2818}
2819
2820/*
2821 * nxge_txdma_start_channel
2822 *
2823 * Start a TDC.
2824 *
2825 * Arguments:
2826 * nxgep
2827 * channel The channel to start.
2828 * tx_ring_p channel's transmit descriptor ring.
2829 * tx_mbox_p channel' smailbox.
2830 *
2831 * Notes:
2832 *
2833 * NPI/NXGE function calls:
2834 * nxge_reset_txdma_channel()
2835 * nxge_init_txdma_channel_event_mask()
2836 * nxge_enable_txdma_channel()
2837 *
2838 * Registers accessed:
2839 * none directly (see functions above).
2840 *
2841 * Context:
2842 * Any domain
2843 */
2844static nxge_status_t
2845nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2846 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2847
2848{
2849 nxge_status_t status = NXGE_OK;
2850
2851 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2852 "==> nxge_txdma_start_channel (channel %d)", channel));
2853 /*
2854 * TXDMA/TXC must be in stopped state.
2855 */
2856 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2857
2858 /*
2859 * Reset TXDMA channel
2860 */
2861 tx_ring_p->tx_cs.value = 0;
2862 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2863 status = nxge_reset_txdma_channel(nxgep, channel,
2864 tx_ring_p->tx_cs.value);
2865 if (status != NXGE_OK) {
2866 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2867 "==> nxge_txdma_start_channel (channel %d)"
2868 " reset channel failed 0x%x", channel, status));
2869 goto nxge_txdma_start_channel_exit;
2870 }
2871
2872 /*
2873 * Initialize the TXDMA channel specific FZC control
2874 * configurations. These FZC registers are pertaining
2875 * to each TX channel (i.e. logical pages).
2876 */
2877 if (!isLDOMguest(nxgep)) {
2878 status = nxge_init_fzc_txdma_channel(nxgep, channel,
2879 tx_ring_p, tx_mbox_p);
2880 if (status != NXGE_OK) {
2881 goto nxge_txdma_start_channel_exit;
2882 }
2883 }
2884
2885 /*
2886 * Initialize the event masks.
2887 */
2888 tx_ring_p->tx_evmask.value = 0;
2889 status = nxge_init_txdma_channel_event_mask(nxgep,
2890 channel, &tx_ring_p->tx_evmask);
2891 if (status != NXGE_OK) {
2892 goto nxge_txdma_start_channel_exit;
2893 }
2894
2895 /*
2896 * Load TXDMA descriptors, buffers, mailbox,
2897 * initialise the DMA channels and
2898 * enable each DMA channel.
2899 */
2900 status = nxge_enable_txdma_channel(nxgep, channel,
2901 tx_ring_p, tx_mbox_p);
2902 if (status != NXGE_OK) {
2903 goto nxge_txdma_start_channel_exit;
2904 }
2905
2906nxge_txdma_start_channel_exit:
2907 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2908
2909 return (status);
2910}
2911
2912/*
2913 * nxge_txdma_stop_channel
2914 *
2915 * Stop a TDC.
2916 *
2917 * Arguments:
2918 * nxgep
2919 * channel The channel to stop.
2920 * tx_ring_p channel's transmit descriptor ring.
2921 * tx_mbox_p channel' smailbox.
2922 *
2923 * Notes:
2924 *
2925 * NPI/NXGE function calls:
2926 * nxge_txdma_stop_inj_err()
2927 * nxge_reset_txdma_channel()
2928 * nxge_init_txdma_channel_event_mask()
2929 * nxge_init_txdma_channel_cntl_stat()
2930 * nxge_disable_txdma_channel()
2931 *
2932 * Registers accessed:
2933 * none directly (see functions above).
2934 *
2935 * Context:
2936 * Any domain
2937 */
2938/*ARGSUSED*/
2939static nxge_status_t
2940nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2941{
2942 p_tx_ring_t tx_ring_p;
2943 int status = NXGE_OK;
2944
2945 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2946 "==> nxge_txdma_stop_channel: channel %d", channel));
2947
2948 /*
2949 * Stop (disable) TXDMA and TXC (if stop bit is set
2950 * and STOP_N_GO bit not set, the TXDMA reset state will
2951 * not be set if reset TXDMA.
2952 */
2953 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2954
2955 if (nxgep->tx_rings == NULL) {
2956 status = NXGE_ERROR;
2957 goto nxge_txdma_stop_channel_exit;
2958 }
2959
2960 tx_ring_p = nxgep->tx_rings->rings[channel];
2961 if (tx_ring_p == NULL) {
2962 status = NXGE_ERROR;
2963 goto nxge_txdma_stop_channel_exit;
2964 }
2965
2966 /*
2967 * Reset TXDMA channel
2968 */
2969 tx_ring_p->tx_cs.value = 0;
2970 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2971 status = nxge_reset_txdma_channel(nxgep, channel,
2972 tx_ring_p->tx_cs.value);
2973 if (status != NXGE_OK) {
2974 goto nxge_txdma_stop_channel_exit;
2975 }
2976
2977#ifdef HARDWARE_REQUIRED
2978 /* Set up the interrupt event masks. */
2979 tx_ring_p->tx_evmask.value = 0;
2980 status = nxge_init_txdma_channel_event_mask(nxgep,
2981 channel, &tx_ring_p->tx_evmask);
2982 if (status != NXGE_OK) {
2983 goto nxge_txdma_stop_channel_exit;
2984 }
2985
2986 /* Initialize the DMA control and status register */
2987 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2988 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2989 tx_ring_p->tx_cs.value);
2990 if (status != NXGE_OK) {
2991 goto nxge_txdma_stop_channel_exit;
2992 }
2993
2994 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2995
2996 /* Disable channel */
2997 status = nxge_disable_txdma_channel(nxgep, channel,
2998 tx_ring_p, tx_mbox_p);
2999 if (status != NXGE_OK) {
3000 goto nxge_txdma_start_channel_exit;
3001 }
3002
3003 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3004 "==> nxge_txdma_stop_channel: event done"));
3005
3006#endif
3007
3008nxge_txdma_stop_channel_exit:
3009 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
3010 return (status);
3011}
3012
3013/*
3014 * nxge_txdma_get_ring
3015 *
3016 * Get the ring for a TDC.
3017 *
3018 * Arguments:
3019 * nxgep
3020 * channel
3021 *
3022 * Notes:
3023 *
3024 * NPI/NXGE function calls:
3025 *
3026 * Registers accessed:
3027 *
3028 * Context:
3029 * Any domain
3030 */
3031static p_tx_ring_t
3032nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
3033{
3034 nxge_grp_set_t *set = &nxgep->tx_set;
3035 int tdc;
3036
3037 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
3038
3039 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3040 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3041 "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
3042 goto return_null;
3043 }
3044
3045 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3046 if ((1 << tdc) & set->owned.map) {
3047 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3048 if (ring) {
3049 if (channel == ring->tdc) {
3050 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3051 "<== nxge_txdma_get_ring: "
3052 "tdc %d ring $%p", tdc, ring));
3053 return (ring);
3054 }
3055 }
3056 }
3057 }
3058
3059return_null:
3060 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
3061 "ring not found"));
3062
3063 return (NULL);
3064}
3065
3066/*
3067 * nxge_txdma_get_mbox
3068 *
3069 * Get the mailbox for a TDC.
3070 *
3071 * Arguments:
3072 * nxgep
3073 * channel
3074 *
3075 * Notes:
3076 *
3077 * NPI/NXGE function calls:
3078 *
3079 * Registers accessed:
3080 *
3081 * Context:
3082 * Any domain
3083 */
3084static p_tx_mbox_t
3085nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
3086{
3087 nxge_grp_set_t *set = &nxgep->tx_set;
3088 int tdc;
3089
3090 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
3091
3092 if (nxgep->tx_mbox_areas_p == 0 ||
3093 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
3094 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3095 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
3096 goto return_null;
3097 }
3098
3099 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3100 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3101 "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
3102 goto return_null;
3103 }
3104
3105 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3106 if ((1 << tdc) & set->owned.map) {
3107 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3108 if (ring) {
3109 if (channel == ring->tdc) {
3110 tx_mbox_t *mailbox = nxgep->
3111 tx_mbox_areas_p->
3112 txmbox_areas_p[tdc];
3113 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3114 "<== nxge_txdma_get_mbox: tdc %d "
3115 "ring $%p", tdc, mailbox));
3116 return (mailbox);
3117 }
3118 }
3119 }
3120 }
3121
3122return_null:
3123 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
3124 "mailbox not found"));
3125
3126 return (NULL);
3127}
3128
3129/*
3130 * nxge_tx_err_evnts
3131 *
3132 * Recover a TDC.
3133 *
3134 * Arguments:
3135 * nxgep
3136 * index The index to the TDC ring.
3137 * ldvp Used to get the channel number ONLY.
3138 * cs A copy of the bits from TX_CS.
3139 *
3140 * Notes:
3141 * Calling tree:
3142 * nxge_tx_intr()
3143 *
3144 * NPI/NXGE function calls:
3145 * npi_txdma_ring_error_get()
3146 * npi_txdma_inj_par_error_get()
3147 * nxge_txdma_fatal_err_recover()
3148 *
3149 * Registers accessed:
3150 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High
3151 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3152 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3153 *
3154 * Context:
3155 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3156 */
3157/*ARGSUSED*/
3158static nxge_status_t
3159nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3160{
3161 npi_handle_t handle;
3162 npi_status_t rs;
3163 uint8_t channel;
3164 p_tx_ring_t *tx_rings;
3165 p_tx_ring_t tx_ring_p;
3166 p_nxge_tx_ring_stats_t tdc_stats;
3167 boolean_t txchan_fatal = B_FALSE;
3168 nxge_status_t status = NXGE_OK;
3169 tdmc_inj_par_err_t par_err;
3170 uint32_t value;
3171
3172 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3173 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3174 channel = ldvp->channel;
3175
3176 tx_rings = nxgep->tx_rings->rings;
3177 tx_ring_p = tx_rings[index];
3178 tdc_stats = tx_ring_p->tdc_stats;
3179 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3180 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3181 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3182 if ((rs = npi_txdma_ring_error_get(handle, channel,
3183 &tdc_stats->errlog)) != NPI_SUCCESS)
3184 return (NXGE_ERROR | rs);
3185 }
3186
3187 if (cs.bits.ldw.mbox_err) {
3188 tdc_stats->mbox_err++;
3189 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3190 NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3191 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3192 "==> nxge_tx_err_evnts(channel %d): "
3193 "fatal error: mailbox", channel));
3194 txchan_fatal = B_TRUE;
3195 }
3196 if (cs.bits.ldw.pkt_size_err) {
3197 tdc_stats->pkt_size_err++;
3198 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3199 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3200 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3201 "==> nxge_tx_err_evnts(channel %d): "
3202 "fatal error: pkt_size_err", channel));
3203 txchan_fatal = B_TRUE;
3204 }
3205 if (cs.bits.ldw.tx_ring_oflow) {
3206 tdc_stats->tx_ring_oflow++;
3207 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3208 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3209 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3210 "==> nxge_tx_err_evnts(channel %d): "
3211 "fatal error: tx_ring_oflow", channel));
3212 txchan_fatal = B_TRUE;
3213 }
3214 if (cs.bits.ldw.pref_buf_par_err) {
3215 tdc_stats->pre_buf_par_err++;
3216 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3217 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3218 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3219 "==> nxge_tx_err_evnts(channel %d): "
3220 "fatal error: pre_buf_par_err", channel));
3221 /* Clear error injection source for parity error */
3222 (void) npi_txdma_inj_par_error_get(handle, &value);
3223 par_err.value = value;
3224 par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3225 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3226 txchan_fatal = B_TRUE;
3227 }
3228 if (cs.bits.ldw.nack_pref) {
3229 tdc_stats->nack_pref++;
3230 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3231 NXGE_FM_EREPORT_TDMC_NACK_PREF);
3232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3233 "==> nxge_tx_err_evnts(channel %d): "
3234 "fatal error: nack_pref", channel));
3235 txchan_fatal = B_TRUE;
3236 }
3237 if (cs.bits.ldw.nack_pkt_rd) {
3238 tdc_stats->nack_pkt_rd++;
3239 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3240 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3242 "==> nxge_tx_err_evnts(channel %d): "
3243 "fatal error: nack_pkt_rd", channel));
3244 txchan_fatal = B_TRUE;
3245 }
3246 if (cs.bits.ldw.conf_part_err) {
3247 tdc_stats->conf_part_err++;
3248 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3249 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3250 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3251 "==> nxge_tx_err_evnts(channel %d): "
3252 "fatal error: config_partition_err", channel));
3253 txchan_fatal = B_TRUE;
3254 }
3255 if (cs.bits.ldw.pkt_prt_err) {
3256 tdc_stats->pkt_part_err++;
3257 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3258 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3260 "==> nxge_tx_err_evnts(channel %d): "
3261 "fatal error: pkt_prt_err", channel));
3262 txchan_fatal = B_TRUE;
3263 }
3264
3265 /* Clear error injection source in case this is an injected error */
3266 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3267
3268 if (txchan_fatal) {
3269 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3270 " nxge_tx_err_evnts: "
3271 " fatal error on channel %d cs 0x%llx\n",
3272 channel, cs.value));
3273 status = nxge_txdma_fatal_err_recover(nxgep, channel,
3274 tx_ring_p);
3275 if (status == NXGE_OK) {
3276 FM_SERVICE_RESTORED(nxgep);
3277 }
3278 }
3279
3280 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3281
3282 return (status);
3283}
3284
3285static nxge_status_t
3286nxge_txdma_fatal_err_recover(
3287 p_nxge_t nxgep,
3288 uint16_t channel,
3289 p_tx_ring_t tx_ring_p)
3290{
3291 npi_handle_t handle;
3292 npi_status_t rs = NPI_SUCCESS;
3293 p_tx_mbox_t tx_mbox_p;
3294 nxge_status_t status = NXGE_OK;
3295
3296 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3297 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3298 "Recovering from TxDMAChannel#%d error...", channel));
3299
3300 /*
3301 * Stop the dma channel waits for the stop done.
3302 * If the stop done bit is not set, then create
3303 * an error.
3304 */
3305
3306 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3307 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3308 MUTEX_ENTER(&tx_ring_p->lock);
3309 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3310 if (rs != NPI_SUCCESS) {
3311 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3312 "==> nxge_txdma_fatal_err_recover (channel %d): "
3313 "stop failed ", channel));
3314 goto fail;
3315 }
3316
3317 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3318 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3319
3320 /*
3321 * Reset TXDMA channel
3322 */
3323 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3324 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3325 NPI_SUCCESS) {
3326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3327 "==> nxge_txdma_fatal_err_recover (channel %d)"
3328 " reset channel failed 0x%x", channel, rs));
3329 goto fail;
3330 }
3331
3332 /*
3333 * Reset the tail (kick) register to 0.
3334 * (Hardware will not reset it. Tx overflow fatal
3335 * error if tail is not set to 0 after reset!
3336 */
3337 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3338
3339 /* Restart TXDMA channel */
3340
3341 tx_mbox_p = NULL;
3340 if (!isLDOMguest(nxgep)) {
3341 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3342
3343 // XXX This is a problem in HIO!
3344 /*
3345 * Initialize the TXDMA channel specific FZC control
3346 * configurations. These FZC registers are pertaining
3347 * to each TX channel (i.e. logical pages).
3348 */
3349 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3350 status = nxge_init_fzc_txdma_channel(nxgep, channel,
3351 tx_ring_p, tx_mbox_p);
3352 if (status != NXGE_OK)
3353 goto fail;
3354 }
3355
3356 /*
3357 * Initialize the event masks.
3358 */
3359 tx_ring_p->tx_evmask.value = 0;
3360 status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3361 &tx_ring_p->tx_evmask);
3362 if (status != NXGE_OK)
3363 goto fail;
3364
3365 tx_ring_p->wr_index_wrap = B_FALSE;
3366 tx_ring_p->wr_index = 0;
3367 tx_ring_p->rd_index = 0;
3368
3369 /*
3370 * Load TXDMA descriptors, buffers, mailbox,
3371 * initialise the DMA channels and
3372 * enable each DMA channel.
3373 */
3374 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3375 status = nxge_enable_txdma_channel(nxgep, channel,
3376 tx_ring_p, tx_mbox_p);
3377 MUTEX_EXIT(&tx_ring_p->lock);
3378 if (status != NXGE_OK)
3379 goto fail;
3380
3381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3382 "Recovery Successful, TxDMAChannel#%d Restored",
3383 channel));
3384 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3385
3386 return (NXGE_OK);
3387
3388fail:
3389 MUTEX_EXIT(&tx_ring_p->lock);
3390
3391 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3392 "nxge_txdma_fatal_err_recover (channel %d): "
3393 "failed to recover this txdma channel", channel));
3394 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3395
3396 return (status);
3397}
3398
3399/*
3400 * nxge_tx_port_fatal_err_recover
3401 *
3402 * Attempt to recover from a fatal port error.
3403 *
3404 * Arguments:
3405 * nxgep
3406 *
3407 * Notes:
3408 * How would a guest do this?
3409 *
3410 * NPI/NXGE function calls:
3411 *
3412 * Registers accessed:
3413 *
3414 * Context:
3415 * Service domain
3416 */
3417nxge_status_t
3418nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3419{
3420 nxge_grp_set_t *set = &nxgep->tx_set;
3421 nxge_channel_t tdc;
3422
3423 tx_ring_t *ring;
3424 tx_mbox_t *mailbox;
3425
3426 npi_handle_t handle;
3342 if (!isLDOMguest(nxgep)) {
3343 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3344
3345 // XXX This is a problem in HIO!
3346 /*
3347 * Initialize the TXDMA channel specific FZC control
3348 * configurations. These FZC registers are pertaining
3349 * to each TX channel (i.e. logical pages).
3350 */
3351 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3352 status = nxge_init_fzc_txdma_channel(nxgep, channel,
3353 tx_ring_p, tx_mbox_p);
3354 if (status != NXGE_OK)
3355 goto fail;
3356 }
3357
3358 /*
3359 * Initialize the event masks.
3360 */
3361 tx_ring_p->tx_evmask.value = 0;
3362 status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3363 &tx_ring_p->tx_evmask);
3364 if (status != NXGE_OK)
3365 goto fail;
3366
3367 tx_ring_p->wr_index_wrap = B_FALSE;
3368 tx_ring_p->wr_index = 0;
3369 tx_ring_p->rd_index = 0;
3370
3371 /*
3372 * Load TXDMA descriptors, buffers, mailbox,
3373 * initialise the DMA channels and
3374 * enable each DMA channel.
3375 */
3376 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3377 status = nxge_enable_txdma_channel(nxgep, channel,
3378 tx_ring_p, tx_mbox_p);
3379 MUTEX_EXIT(&tx_ring_p->lock);
3380 if (status != NXGE_OK)
3381 goto fail;
3382
3383 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3384 "Recovery Successful, TxDMAChannel#%d Restored",
3385 channel));
3386 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3387
3388 return (NXGE_OK);
3389
3390fail:
3391 MUTEX_EXIT(&tx_ring_p->lock);
3392
3393 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3394 "nxge_txdma_fatal_err_recover (channel %d): "
3395 "failed to recover this txdma channel", channel));
3396 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3397
3398 return (status);
3399}
3400
3401/*
3402 * nxge_tx_port_fatal_err_recover
3403 *
3404 * Attempt to recover from a fatal port error.
3405 *
3406 * Arguments:
3407 * nxgep
3408 *
3409 * Notes:
3410 * How would a guest do this?
3411 *
3412 * NPI/NXGE function calls:
3413 *
3414 * Registers accessed:
3415 *
3416 * Context:
3417 * Service domain
3418 */
3419nxge_status_t
3420nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3421{
3422 nxge_grp_set_t *set = &nxgep->tx_set;
3423 nxge_channel_t tdc;
3424
3425 tx_ring_t *ring;
3426 tx_mbox_t *mailbox;
3427
3428 npi_handle_t handle;
3427 nxge_status_t status;
3429 nxge_status_t status = NXGE_OK;
3428 npi_status_t rs;
3429
3430 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3431 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3432 "Recovering from TxPort error..."));
3433
3434 if (isLDOMguest(nxgep)) {
3435 return (NXGE_OK);
3436 }
3437
3438 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3439 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3440 "<== nxge_tx_port_fatal_err_recover: not initialized"));
3441 return (NXGE_ERROR);
3442 }
3443
3444 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3445 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3446 "<== nxge_tx_port_fatal_err_recover: "
3447 "NULL ring pointer(s)"));
3448 return (NXGE_ERROR);
3449 }
3450
3451 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3452 if ((1 << tdc) & set->owned.map) {
3453 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3454 if (ring)
3455 MUTEX_ENTER(&ring->lock);
3456 }
3457 }
3458
3459 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3460
3461 /*
3462 * Stop all the TDCs owned by us.
3463 * (The shared TDCs will have been stopped by their owners.)
3464 */
3465 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3466 if ((1 << tdc) & set->owned.map) {
3467 ring = nxgep->tx_rings->rings[tdc];
3468 if (ring) {
3469 rs = npi_txdma_channel_control
3470 (handle, TXDMA_STOP, tdc);
3471 if (rs != NPI_SUCCESS) {
3472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3473 "nxge_tx_port_fatal_err_recover "
3474 "(channel %d): stop failed ", tdc));
3475 goto fail;
3476 }
3477 }
3478 }
3479 }
3480
3481 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3482
3483 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3484 if ((1 << tdc) & set->owned.map) {
3485 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3486 if (ring) {
3487 (void) nxge_txdma_reclaim(nxgep, ring, 0);
3488 }
3489 }
3490 }
3491
3492 /*
3493 * Reset all the TDCs.
3494 */
3495 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3496
3497 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3498 if ((1 << tdc) & set->owned.map) {
3499 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3500 if (ring) {
3501 if ((rs = npi_txdma_channel_control
3502 (handle, TXDMA_RESET, tdc))
3503 != NPI_SUCCESS) {
3504 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3505 "nxge_tx_port_fatal_err_recover "
3506 "(channel %d) reset channel "
3507 "failed 0x%x", tdc, rs));
3508 goto fail;
3509 }
3510 }
3511 /*
3512 * Reset the tail (kick) register to 0.
3513 * (Hardware will not reset it. Tx overflow fatal
3514 * error if tail is not set to 0 after reset!
3515 */
3516 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3517 }
3518 }
3519
3520 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3521
3522 /* Restart all the TDCs */
3523 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3524 if ((1 << tdc) & set->owned.map) {
3525 ring = nxgep->tx_rings->rings[tdc];
3526 if (ring) {
3527 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3528 status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3529 ring, mailbox);
3530 ring->tx_evmask.value = 0;
3531 /*
3532 * Initialize the event masks.
3533 */
3534 status = nxge_init_txdma_channel_event_mask
3535 (nxgep, tdc, &ring->tx_evmask);
3536
3537 ring->wr_index_wrap = B_FALSE;
3538 ring->wr_index = 0;
3539 ring->rd_index = 0;
3540
3541 if (status != NXGE_OK)
3542 goto fail;
3543 if (status != NXGE_OK)
3544 goto fail;
3545 }
3546 }
3547 }
3548
3549 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3550
3551 /* Re-enable all the TDCs */
3552 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3553 if ((1 << tdc) & set->owned.map) {
3554 ring = nxgep->tx_rings->rings[tdc];
3555 if (ring) {
3556 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3557 status = nxge_enable_txdma_channel(nxgep, tdc,
3558 ring, mailbox);
3559 if (status != NXGE_OK)
3560 goto fail;
3561 }
3562 }
3563 }
3564
3565 /*
3566 * Unlock all the TDCs.
3567 */
3568 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3569 if ((1 << tdc) & set->owned.map) {
3570 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3571 if (ring)
3572 MUTEX_EXIT(&ring->lock);
3573 }
3574 }
3575
3576 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3577 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3578
3579 return (NXGE_OK);
3580
3581fail:
3582 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3583 if ((1 << tdc) & set->owned.map) {
3584 ring = nxgep->tx_rings->rings[tdc];
3585 if (ring)
3586 MUTEX_EXIT(&ring->lock);
3587 }
3588 }
3589
3590 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3591 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3592
3593 return (status);
3594}
3595
3596/*
3597 * nxge_txdma_inject_err
3598 *
3599 * Inject an error into a TDC.
3600 *
3601 * Arguments:
3602 * nxgep
3603 * err_id The error to inject.
3604 * chan The channel to inject into.
3605 *
3606 * Notes:
3607 * This is called from nxge_main.c:nxge_err_inject()
3608 * Has this ioctl ever been used?
3609 *
3610 * NPI/NXGE function calls:
3611 * npi_txdma_inj_par_error_get()
3612 * npi_txdma_inj_par_error_set()
3613 *
3614 * Registers accessed:
3615 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3616 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3617 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3618 *
3619 * Context:
3620 * Service domain
3621 */
3622void
3623nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3624{
3625 tdmc_intr_dbg_t tdi;
3626 tdmc_inj_par_err_t par_err;
3627 uint32_t value;
3628 npi_handle_t handle;
3629
3630 switch (err_id) {
3631
3632 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3633 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3634 /* Clear error injection source for parity error */
3635 (void) npi_txdma_inj_par_error_get(handle, &value);
3636 par_err.value = value;
3637 par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3638 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3639
3640 par_err.bits.ldw.inject_parity_error = (1 << chan);
3641 (void) npi_txdma_inj_par_error_get(handle, &value);
3642 par_err.value = value;
3643 par_err.bits.ldw.inject_parity_error |= (1 << chan);
3644 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3645 (unsigned long long)par_err.value);
3646 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3647 break;
3648
3649 case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3650 case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3651 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3652 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3653 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3654 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3655 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3656 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3657 chan, &tdi.value);
3658 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3659 tdi.bits.ldw.pref_buf_par_err = 1;
3660 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3661 tdi.bits.ldw.mbox_err = 1;
3662 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3663 tdi.bits.ldw.nack_pref = 1;
3664 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3665 tdi.bits.ldw.nack_pkt_rd = 1;
3666 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3667 tdi.bits.ldw.pkt_size_err = 1;
3668 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3669 tdi.bits.ldw.tx_ring_oflow = 1;
3670 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3671 tdi.bits.ldw.conf_part_err = 1;
3672 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3673 tdi.bits.ldw.pkt_part_err = 1;
3674#if defined(__i386)
3675 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
3676 tdi.value);
3677#else
3678 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3679 tdi.value);
3680#endif
3681 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3682 chan, tdi.value);
3683
3684 break;
3685 }
3686}
3430 npi_status_t rs;
3431
3432 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3434 "Recovering from TxPort error..."));
3435
3436 if (isLDOMguest(nxgep)) {
3437 return (NXGE_OK);
3438 }
3439
3440 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3441 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3442 "<== nxge_tx_port_fatal_err_recover: not initialized"));
3443 return (NXGE_ERROR);
3444 }
3445
3446 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3447 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3448 "<== nxge_tx_port_fatal_err_recover: "
3449 "NULL ring pointer(s)"));
3450 return (NXGE_ERROR);
3451 }
3452
3453 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3454 if ((1 << tdc) & set->owned.map) {
3455 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3456 if (ring)
3457 MUTEX_ENTER(&ring->lock);
3458 }
3459 }
3460
3461 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3462
3463 /*
3464 * Stop all the TDCs owned by us.
3465 * (The shared TDCs will have been stopped by their owners.)
3466 */
3467 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3468 if ((1 << tdc) & set->owned.map) {
3469 ring = nxgep->tx_rings->rings[tdc];
3470 if (ring) {
3471 rs = npi_txdma_channel_control
3472 (handle, TXDMA_STOP, tdc);
3473 if (rs != NPI_SUCCESS) {
3474 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3475 "nxge_tx_port_fatal_err_recover "
3476 "(channel %d): stop failed ", tdc));
3477 goto fail;
3478 }
3479 }
3480 }
3481 }
3482
3483 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3484
3485 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3486 if ((1 << tdc) & set->owned.map) {
3487 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3488 if (ring) {
3489 (void) nxge_txdma_reclaim(nxgep, ring, 0);
3490 }
3491 }
3492 }
3493
3494 /*
3495 * Reset all the TDCs.
3496 */
3497 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3498
3499 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3500 if ((1 << tdc) & set->owned.map) {
3501 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3502 if (ring) {
3503 if ((rs = npi_txdma_channel_control
3504 (handle, TXDMA_RESET, tdc))
3505 != NPI_SUCCESS) {
3506 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3507 "nxge_tx_port_fatal_err_recover "
3508 "(channel %d) reset channel "
3509 "failed 0x%x", tdc, rs));
3510 goto fail;
3511 }
3512 }
3513 /*
3514 * Reset the tail (kick) register to 0.
3515 * (Hardware will not reset it. Tx overflow fatal
3516 * error if tail is not set to 0 after reset!
3517 */
3518 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3519 }
3520 }
3521
3522 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3523
3524 /* Restart all the TDCs */
3525 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3526 if ((1 << tdc) & set->owned.map) {
3527 ring = nxgep->tx_rings->rings[tdc];
3528 if (ring) {
3529 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3530 status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3531 ring, mailbox);
3532 ring->tx_evmask.value = 0;
3533 /*
3534 * Initialize the event masks.
3535 */
3536 status = nxge_init_txdma_channel_event_mask
3537 (nxgep, tdc, &ring->tx_evmask);
3538
3539 ring->wr_index_wrap = B_FALSE;
3540 ring->wr_index = 0;
3541 ring->rd_index = 0;
3542
3543 if (status != NXGE_OK)
3544 goto fail;
3545 if (status != NXGE_OK)
3546 goto fail;
3547 }
3548 }
3549 }
3550
3551 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3552
3553 /* Re-enable all the TDCs */
3554 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3555 if ((1 << tdc) & set->owned.map) {
3556 ring = nxgep->tx_rings->rings[tdc];
3557 if (ring) {
3558 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3559 status = nxge_enable_txdma_channel(nxgep, tdc,
3560 ring, mailbox);
3561 if (status != NXGE_OK)
3562 goto fail;
3563 }
3564 }
3565 }
3566
3567 /*
3568 * Unlock all the TDCs.
3569 */
3570 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3571 if ((1 << tdc) & set->owned.map) {
3572 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3573 if (ring)
3574 MUTEX_EXIT(&ring->lock);
3575 }
3576 }
3577
3578 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3579 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3580
3581 return (NXGE_OK);
3582
3583fail:
3584 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3585 if ((1 << tdc) & set->owned.map) {
3586 ring = nxgep->tx_rings->rings[tdc];
3587 if (ring)
3588 MUTEX_EXIT(&ring->lock);
3589 }
3590 }
3591
3592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3593 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3594
3595 return (status);
3596}
3597
3598/*
3599 * nxge_txdma_inject_err
3600 *
3601 * Inject an error into a TDC.
3602 *
3603 * Arguments:
3604 * nxgep
3605 * err_id The error to inject.
3606 * chan The channel to inject into.
3607 *
3608 * Notes:
3609 * This is called from nxge_main.c:nxge_err_inject()
3610 * Has this ioctl ever been used?
3611 *
3612 * NPI/NXGE function calls:
3613 * npi_txdma_inj_par_error_get()
3614 * npi_txdma_inj_par_error_set()
3615 *
3616 * Registers accessed:
3617 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3618 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3619 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3620 *
3621 * Context:
3622 * Service domain
3623 */
3624void
3625nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3626{
3627 tdmc_intr_dbg_t tdi;
3628 tdmc_inj_par_err_t par_err;
3629 uint32_t value;
3630 npi_handle_t handle;
3631
3632 switch (err_id) {
3633
3634 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3635 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3636 /* Clear error injection source for parity error */
3637 (void) npi_txdma_inj_par_error_get(handle, &value);
3638 par_err.value = value;
3639 par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3640 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3641
3642 par_err.bits.ldw.inject_parity_error = (1 << chan);
3643 (void) npi_txdma_inj_par_error_get(handle, &value);
3644 par_err.value = value;
3645 par_err.bits.ldw.inject_parity_error |= (1 << chan);
3646 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3647 (unsigned long long)par_err.value);
3648 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3649 break;
3650
3651 case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3652 case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3653 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3654 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3655 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3656 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3657 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3658 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3659 chan, &tdi.value);
3660 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3661 tdi.bits.ldw.pref_buf_par_err = 1;
3662 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3663 tdi.bits.ldw.mbox_err = 1;
3664 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3665 tdi.bits.ldw.nack_pref = 1;
3666 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3667 tdi.bits.ldw.nack_pkt_rd = 1;
3668 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3669 tdi.bits.ldw.pkt_size_err = 1;
3670 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3671 tdi.bits.ldw.tx_ring_oflow = 1;
3672 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3673 tdi.bits.ldw.conf_part_err = 1;
3674 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3675 tdi.bits.ldw.pkt_part_err = 1;
3676#if defined(__i386)
3677 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
3678 tdi.value);
3679#else
3680 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3681 tdi.value);
3682#endif
3683 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3684 chan, tdi.value);
3685
3686 break;
3687 }
3688}