16f45ec7bSml29623 /* 26f45ec7bSml29623 * CDDL HEADER START 36f45ec7bSml29623 * 46f45ec7bSml29623 * The contents of this file are subject to the terms of the 56f45ec7bSml29623 * Common Development and Distribution License (the "License"). 66f45ec7bSml29623 * You may not use this file except in compliance with the License. 76f45ec7bSml29623 * 86f45ec7bSml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96f45ec7bSml29623 * or http://www.opensolaris.org/os/licensing. 106f45ec7bSml29623 * See the License for the specific language governing permissions 116f45ec7bSml29623 * and limitations under the License. 126f45ec7bSml29623 * 136f45ec7bSml29623 * When distributing Covered Code, include this CDDL HEADER in each 146f45ec7bSml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156f45ec7bSml29623 * If applicable, add the following below this CDDL HEADER, with the 166f45ec7bSml29623 * fields enclosed by brackets "[]" replaced with your own identifying 176f45ec7bSml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 186f45ec7bSml29623 * 196f45ec7bSml29623 * CDDL HEADER END 206f45ec7bSml29623 */ 216f45ec7bSml29623 /* 22*7b26d9ffSSantwona Behera * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 236f45ec7bSml29623 * Use is subject to license terms. 246f45ec7bSml29623 */ 256f45ec7bSml29623 266f45ec7bSml29623 #include <sys/nxge/nxge_impl.h> 276f45ec7bSml29623 #include <sys/nxge/nxge_rxdma.h> 28678453a8Sspeer #include <sys/nxge/nxge_hio.h> 29678453a8Sspeer 30678453a8Sspeer #if !defined(_BIG_ENDIAN) 31678453a8Sspeer #include <npi_rx_rd32.h> 32678453a8Sspeer #endif 33678453a8Sspeer #include <npi_rx_rd64.h> 34678453a8Sspeer #include <npi_rx_wr64.h> 356f45ec7bSml29623 366f45ec7bSml29623 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37678453a8Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 386f45ec7bSml29623 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 396f45ec7bSml29623 (rdc + nxgep->pt_config.hw_config.start_rdc) 406f45ec7bSml29623 416f45ec7bSml29623 /* 42da14cebeSEric Cheng * XXX: This is a tunable to limit the number of packets each interrupt 43da14cebeSEric Cheng * handles. 0 (default) means that each interrupt takes as much packets 44da14cebeSEric Cheng * as it finds. 45da14cebeSEric Cheng */ 46da14cebeSEric Cheng extern int nxge_max_intr_pkts; 47da14cebeSEric Cheng 48da14cebeSEric Cheng /* 496f45ec7bSml29623 * Globals: tunable parameters (/etc/system or adb) 506f45ec7bSml29623 * 516f45ec7bSml29623 */ 526f45ec7bSml29623 extern uint32_t nxge_rbr_size; 536f45ec7bSml29623 extern uint32_t nxge_rcr_size; 546f45ec7bSml29623 extern uint32_t nxge_rbr_spare_size; 556f45ec7bSml29623 566f45ec7bSml29623 extern uint32_t nxge_mblks_pending; 576f45ec7bSml29623 586f45ec7bSml29623 /* 596f45ec7bSml29623 * Tunable to reduce the amount of time spent in the 606f45ec7bSml29623 * ISR doing Rx Processing. 616f45ec7bSml29623 */ 626f45ec7bSml29623 extern uint32_t nxge_max_rx_pkts; 636f45ec7bSml29623 boolean_t nxge_jumbo_enable; 646f45ec7bSml29623 65*7b26d9ffSSantwona Behera extern uint16_t nxge_rcr_timeout; 66*7b26d9ffSSantwona Behera extern uint16_t nxge_rcr_threshold; 67*7b26d9ffSSantwona Behera 686f45ec7bSml29623 /* 696f45ec7bSml29623 * Tunables to manage the receive buffer blocks. 706f45ec7bSml29623 * 716f45ec7bSml29623 * nxge_rx_threshold_hi: copy all buffers. 726f45ec7bSml29623 * nxge_rx_bcopy_size_type: receive buffer block size type. 736f45ec7bSml29623 * nxge_rx_threshold_lo: copy only up to tunable block size type. 746f45ec7bSml29623 */ 756f45ec7bSml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 766f45ec7bSml29623 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 776f45ec7bSml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 786f45ec7bSml29623 79b4d05839Sml29623 extern uint32_t nxge_cksum_offload; 80678453a8Sspeer 81678453a8Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 82678453a8Sspeer static void nxge_unmap_rxdma(p_nxge_t, int); 836f45ec7bSml29623 846f45ec7bSml29623 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 856f45ec7bSml29623 86678453a8Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 87678453a8Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int); 886f45ec7bSml29623 896f45ec7bSml29623 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 906f45ec7bSml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 916f45ec7bSml29623 uint32_t, 926f45ec7bSml29623 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 936f45ec7bSml29623 p_rx_mbox_t *); 946f45ec7bSml29623 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 956f45ec7bSml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 966f45ec7bSml29623 976f45ec7bSml29623 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 986f45ec7bSml29623 uint16_t, 996f45ec7bSml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 1006f45ec7bSml29623 p_rx_rcr_ring_t *, p_rx_mbox_t *); 1016f45ec7bSml29623 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 1026f45ec7bSml29623 p_rx_rcr_ring_t, p_rx_mbox_t); 1036f45ec7bSml29623 1046f45ec7bSml29623 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 1056f45ec7bSml29623 uint16_t, 1066f45ec7bSml29623 p_nxge_dma_common_t *, 1076f45ec7bSml29623 p_rx_rbr_ring_t *, uint32_t); 1086f45ec7bSml29623 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 1096f45ec7bSml29623 p_rx_rbr_ring_t); 1106f45ec7bSml29623 1116f45ec7bSml29623 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 1126f45ec7bSml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 1136f45ec7bSml29623 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 1146f45ec7bSml29623 115678453a8Sspeer static mblk_t * 116678453a8Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 1176f45ec7bSml29623 1186f45ec7bSml29623 static void nxge_receive_packet(p_nxge_t, 1196f45ec7bSml29623 p_rx_rcr_ring_t, 1206f45ec7bSml29623 p_rcr_entry_t, 1216f45ec7bSml29623 boolean_t *, 1226f45ec7bSml29623 mblk_t **, mblk_t **); 1236f45ec7bSml29623 1246f45ec7bSml29623 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 1256f45ec7bSml29623 1266f45ec7bSml29623 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 1276f45ec7bSml29623 static void nxge_freeb(p_rx_msg_t); 128da14cebeSEric Cheng static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 129678453a8Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 1306f45ec7bSml29623 1316f45ec7bSml29623 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 1326f45ec7bSml29623 uint32_t, uint32_t); 1336f45ec7bSml29623 1346f45ec7bSml29623 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 1356f45ec7bSml29623 p_rx_rbr_ring_t); 1366f45ec7bSml29623 1376f45ec7bSml29623 1386f45ec7bSml29623 static nxge_status_t 1396f45ec7bSml29623 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 1406f45ec7bSml29623 1416f45ec7bSml29623 nxge_status_t 1426f45ec7bSml29623 nxge_rx_port_fatal_err_recover(p_nxge_t); 1436f45ec7bSml29623 144678453a8Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 145678453a8Sspeer 1466f45ec7bSml29623 nxge_status_t 1476f45ec7bSml29623 nxge_init_rxdma_channels(p_nxge_t nxgep) 1486f45ec7bSml29623 { 149678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 150da14cebeSEric Cheng int i, count, channel; 151e11f0814SMichael Speer nxge_grp_t *group; 152da14cebeSEric Cheng dc_map_t map; 153da14cebeSEric Cheng int dev_gindex; 1546f45ec7bSml29623 1556f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 1566f45ec7bSml29623 157678453a8Sspeer if (!isLDOMguest(nxgep)) { 158678453a8Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 159678453a8Sspeer cmn_err(CE_NOTE, "hw_start_common"); 160678453a8Sspeer return (NXGE_ERROR); 161678453a8Sspeer } 162678453a8Sspeer } 163678453a8Sspeer 164678453a8Sspeer /* 165678453a8Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 166678453a8Sspeer * We only have 8 hardware RDC tables, but we may have 167678453a8Sspeer * up to 16 logical (software-defined) groups of RDCS, 168678453a8Sspeer * if we make use of layer 3 & 4 hardware classification. 169678453a8Sspeer */ 170678453a8Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 171678453a8Sspeer if ((1 << i) & set->lg.map) { 172e11f0814SMichael Speer group = set->group[i]; 173da14cebeSEric Cheng dev_gindex = 174da14cebeSEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 175da14cebeSEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 176678453a8Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 177da14cebeSEric Cheng if ((1 << channel) & map) { 178678453a8Sspeer if ((nxge_grp_dc_add(nxgep, 1796920a987SMisaki Miyashita group, VP_BOUND_RX, channel))) 180e11f0814SMichael Speer goto init_rxdma_channels_exit; 181678453a8Sspeer } 182678453a8Sspeer } 183678453a8Sspeer } 184678453a8Sspeer if (++count == set->lg.count) 185678453a8Sspeer break; 186678453a8Sspeer } 187678453a8Sspeer 188678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 189678453a8Sspeer return (NXGE_OK); 190e11f0814SMichael Speer 191e11f0814SMichael Speer init_rxdma_channels_exit: 192e11f0814SMichael Speer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 193e11f0814SMichael Speer if ((1 << i) & set->lg.map) { 194e11f0814SMichael Speer group = set->group[i]; 195da14cebeSEric Cheng dev_gindex = 196da14cebeSEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 197da14cebeSEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 198da14cebeSEric Cheng for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 199da14cebeSEric Cheng if ((1 << channel) & map) { 200e11f0814SMichael Speer nxge_grp_dc_remove(nxgep, 201da14cebeSEric Cheng VP_BOUND_RX, channel); 202e11f0814SMichael Speer } 203e11f0814SMichael Speer } 204e11f0814SMichael Speer } 205e11f0814SMichael Speer if (++count == set->lg.count) 206e11f0814SMichael Speer break; 207e11f0814SMichael Speer } 208e11f0814SMichael Speer 209e11f0814SMichael Speer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 210e11f0814SMichael Speer return (NXGE_ERROR); 211678453a8Sspeer } 212678453a8Sspeer 213678453a8Sspeer nxge_status_t 214678453a8Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 215678453a8Sspeer { 216678453a8Sspeer nxge_status_t status; 217678453a8Sspeer 218678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 219678453a8Sspeer 220678453a8Sspeer status = nxge_map_rxdma(nxge, channel); 2216f45ec7bSml29623 if (status != NXGE_OK) { 222678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2236f45ec7bSml29623 "<== nxge_init_rxdma: status 0x%x", status)); 2246f45ec7bSml29623 return (status); 2256f45ec7bSml29623 } 2266f45ec7bSml29623 22708ac1c49SNicolas Droux #if defined(sun4v) 22808ac1c49SNicolas Droux if (isLDOMguest(nxge)) { 22908ac1c49SNicolas Droux /* set rcr_ring */ 23008ac1c49SNicolas Droux p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 23108ac1c49SNicolas Droux 23208ac1c49SNicolas Droux status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 23308ac1c49SNicolas Droux if (status != NXGE_OK) { 23408ac1c49SNicolas Droux nxge_unmap_rxdma(nxge, channel); 23508ac1c49SNicolas Droux return (status); 23608ac1c49SNicolas Droux } 23708ac1c49SNicolas Droux } 23808ac1c49SNicolas Droux #endif 23908ac1c49SNicolas Droux 240678453a8Sspeer status = nxge_rxdma_hw_start(nxge, channel); 2416f45ec7bSml29623 if (status != NXGE_OK) { 242678453a8Sspeer nxge_unmap_rxdma(nxge, channel); 2436f45ec7bSml29623 } 2446f45ec7bSml29623 245678453a8Sspeer if (!nxge->statsp->rdc_ksp[channel]) 246678453a8Sspeer nxge_setup_rdc_kstats(nxge, channel); 2476f45ec7bSml29623 248678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, 249678453a8Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status)); 2506f45ec7bSml29623 2516f45ec7bSml29623 return (status); 2526f45ec7bSml29623 } 2536f45ec7bSml29623 2546f45ec7bSml29623 void 2556f45ec7bSml29623 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 2566f45ec7bSml29623 { 257678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 258678453a8Sspeer int rdc; 259678453a8Sspeer 2606f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 2616f45ec7bSml29623 262678453a8Sspeer if (set->owned.map == 0) { 2636f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 264678453a8Sspeer "nxge_uninit_rxdma_channels: no channels")); 265678453a8Sspeer return; 266678453a8Sspeer } 267678453a8Sspeer 268678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 269678453a8Sspeer if ((1 << rdc) & set->owned.map) { 270678453a8Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 271678453a8Sspeer } 272678453a8Sspeer } 273678453a8Sspeer 274678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 275678453a8Sspeer } 276678453a8Sspeer 277678453a8Sspeer void 278678453a8Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 279678453a8Sspeer { 280678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 281678453a8Sspeer 282678453a8Sspeer if (nxgep->statsp->rdc_ksp[channel]) { 283678453a8Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]); 284678453a8Sspeer nxgep->statsp->rdc_ksp[channel] = 0; 285678453a8Sspeer } 286678453a8Sspeer 287678453a8Sspeer nxge_rxdma_hw_stop(nxgep, channel); 288678453a8Sspeer nxge_unmap_rxdma(nxgep, channel); 289678453a8Sspeer 290678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 2916f45ec7bSml29623 } 2926f45ec7bSml29623 2936f45ec7bSml29623 nxge_status_t 2946f45ec7bSml29623 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 2956f45ec7bSml29623 { 2966f45ec7bSml29623 npi_handle_t handle; 2976f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 2986f45ec7bSml29623 nxge_status_t status = NXGE_OK; 2996f45ec7bSml29623 300330cd344SMichael Speer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 3016f45ec7bSml29623 3026f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3036f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3046f45ec7bSml29623 3056f45ec7bSml29623 if (rs != NPI_SUCCESS) { 3066f45ec7bSml29623 status = NXGE_ERROR | rs; 3076f45ec7bSml29623 } 3086f45ec7bSml29623 309330cd344SMichael Speer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 310330cd344SMichael Speer 3116f45ec7bSml29623 return (status); 3126f45ec7bSml29623 } 3136f45ec7bSml29623 3146f45ec7bSml29623 void 3156f45ec7bSml29623 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 3166f45ec7bSml29623 { 317678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 318678453a8Sspeer int rdc; 3196f45ec7bSml29623 3206f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 3216f45ec7bSml29623 322678453a8Sspeer if (!isLDOMguest(nxgep)) { 323678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 3246f45ec7bSml29623 (void) npi_rxdma_dump_fzc_regs(handle); 3256f45ec7bSml29623 } 326678453a8Sspeer 327678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 328678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 329678453a8Sspeer "nxge_rxdma_regs_dump_channels: " 330678453a8Sspeer "NULL ring pointer(s)")); 3316f45ec7bSml29623 return; 3326f45ec7bSml29623 } 3336f45ec7bSml29623 334678453a8Sspeer if (set->owned.map == 0) { 3356f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 336678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 3376f45ec7bSml29623 return; 3386f45ec7bSml29623 } 3396f45ec7bSml29623 340678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 341678453a8Sspeer if ((1 << rdc) & set->owned.map) { 342678453a8Sspeer rx_rbr_ring_t *ring = 343678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 344678453a8Sspeer if (ring) { 345678453a8Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc); 3466f45ec7bSml29623 } 347678453a8Sspeer } 3486f45ec7bSml29623 } 3496f45ec7bSml29623 3506f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 3516f45ec7bSml29623 } 3526f45ec7bSml29623 3536f45ec7bSml29623 nxge_status_t 3546f45ec7bSml29623 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 3556f45ec7bSml29623 { 3566f45ec7bSml29623 npi_handle_t handle; 3576f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 3586f45ec7bSml29623 nxge_status_t status = NXGE_OK; 3596f45ec7bSml29623 3606f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 3616f45ec7bSml29623 3626f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3636f45ec7bSml29623 rs = npi_rxdma_dump_rdc_regs(handle, channel); 3646f45ec7bSml29623 3656f45ec7bSml29623 if (rs != NPI_SUCCESS) { 3666f45ec7bSml29623 status = NXGE_ERROR | rs; 3676f45ec7bSml29623 } 3686f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 3696f45ec7bSml29623 return (status); 3706f45ec7bSml29623 } 3716f45ec7bSml29623 3726f45ec7bSml29623 nxge_status_t 3736f45ec7bSml29623 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 3746f45ec7bSml29623 p_rx_dma_ent_msk_t mask_p) 3756f45ec7bSml29623 { 3766f45ec7bSml29623 npi_handle_t handle; 3776f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 3786f45ec7bSml29623 nxge_status_t status = NXGE_OK; 3796f45ec7bSml29623 3806f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3816f45ec7bSml29623 "<== nxge_init_rxdma_channel_event_mask")); 3826f45ec7bSml29623 3836f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3846f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 3856f45ec7bSml29623 if (rs != NPI_SUCCESS) { 3866f45ec7bSml29623 status = NXGE_ERROR | rs; 3876f45ec7bSml29623 } 3886f45ec7bSml29623 3896f45ec7bSml29623 return (status); 3906f45ec7bSml29623 } 3916f45ec7bSml29623 3926f45ec7bSml29623 nxge_status_t 3936f45ec7bSml29623 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3946f45ec7bSml29623 p_rx_dma_ctl_stat_t cs_p) 3956f45ec7bSml29623 { 3966f45ec7bSml29623 npi_handle_t handle; 3976f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 3986f45ec7bSml29623 nxge_status_t status = NXGE_OK; 3996f45ec7bSml29623 4006f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 4016f45ec7bSml29623 "<== nxge_init_rxdma_channel_cntl_stat")); 4026f45ec7bSml29623 4036f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4046f45ec7bSml29623 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 4056f45ec7bSml29623 4066f45ec7bSml29623 if (rs != NPI_SUCCESS) { 4076f45ec7bSml29623 status = NXGE_ERROR | rs; 4086f45ec7bSml29623 } 4096f45ec7bSml29623 4106f45ec7bSml29623 return (status); 4116f45ec7bSml29623 } 4126f45ec7bSml29623 413678453a8Sspeer /* 414678453a8Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc 415678453a8Sspeer * 416678453a8Sspeer * Set the default RDC for an RDC Group (Table) 417678453a8Sspeer * 418678453a8Sspeer * Arguments: 419678453a8Sspeer * nxgep 420678453a8Sspeer * rdcgrp The group to modify 421678453a8Sspeer * rdc The new default RDC. 422678453a8Sspeer * 423678453a8Sspeer * Notes: 424678453a8Sspeer * 425678453a8Sspeer * NPI/NXGE function calls: 426678453a8Sspeer * npi_rxdma_cfg_rdc_table_default_rdc() 427678453a8Sspeer * 428678453a8Sspeer * Registers accessed: 429678453a8Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000 430678453a8Sspeer * 431678453a8Sspeer * Context: 432678453a8Sspeer * Service domain 433678453a8Sspeer */ 4346f45ec7bSml29623 nxge_status_t 435678453a8Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc( 436678453a8Sspeer p_nxge_t nxgep, 437678453a8Sspeer uint8_t rdcgrp, 4386f45ec7bSml29623 uint8_t rdc) 4396f45ec7bSml29623 { 4406f45ec7bSml29623 npi_handle_t handle; 4416f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 4426f45ec7bSml29623 p_nxge_dma_pt_cfg_t p_dma_cfgp; 4436f45ec7bSml29623 p_nxge_rdc_grp_t rdc_grp_p; 4446f45ec7bSml29623 uint8_t actual_rdcgrp, actual_rdc; 4456f45ec7bSml29623 4466f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4476f45ec7bSml29623 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 4486f45ec7bSml29623 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 4496f45ec7bSml29623 4506f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4516f45ec7bSml29623 452678453a8Sspeer /* 453678453a8Sspeer * This has to be rewritten. Do we even allow this anymore? 454678453a8Sspeer */ 4556f45ec7bSml29623 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 456678453a8Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc); 457678453a8Sspeer rdc_grp_p->def_rdc = rdc; 4586f45ec7bSml29623 4596f45ec7bSml29623 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 4606f45ec7bSml29623 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 4616f45ec7bSml29623 462678453a8Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc( 463678453a8Sspeer handle, actual_rdcgrp, actual_rdc); 4646f45ec7bSml29623 4656f45ec7bSml29623 if (rs != NPI_SUCCESS) { 4666f45ec7bSml29623 return (NXGE_ERROR | rs); 4676f45ec7bSml29623 } 4686f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4696f45ec7bSml29623 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 4706f45ec7bSml29623 return (NXGE_OK); 4716f45ec7bSml29623 } 4726f45ec7bSml29623 4736f45ec7bSml29623 nxge_status_t 4746f45ec7bSml29623 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 4756f45ec7bSml29623 { 4766f45ec7bSml29623 npi_handle_t handle; 4776f45ec7bSml29623 4786f45ec7bSml29623 uint8_t actual_rdc; 4796f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 4806f45ec7bSml29623 4816f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4826f45ec7bSml29623 " ==> nxge_rxdma_cfg_port_default_rdc")); 4836f45ec7bSml29623 4846f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 485678453a8Sspeer actual_rdc = rdc; /* XXX Hack! */ 4866f45ec7bSml29623 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 4876f45ec7bSml29623 4886f45ec7bSml29623 4896f45ec7bSml29623 if (rs != NPI_SUCCESS) { 4906f45ec7bSml29623 return (NXGE_ERROR | rs); 4916f45ec7bSml29623 } 4926f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4936f45ec7bSml29623 " <== nxge_rxdma_cfg_port_default_rdc")); 4946f45ec7bSml29623 4956f45ec7bSml29623 return (NXGE_OK); 4966f45ec7bSml29623 } 4976f45ec7bSml29623 4986f45ec7bSml29623 nxge_status_t 4996f45ec7bSml29623 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 5006f45ec7bSml29623 uint16_t pkts) 5016f45ec7bSml29623 { 5026f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 5036f45ec7bSml29623 npi_handle_t handle; 5046f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 5056f45ec7bSml29623 " ==> nxge_rxdma_cfg_rcr_threshold")); 5066f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5076f45ec7bSml29623 5086f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 5096f45ec7bSml29623 5106f45ec7bSml29623 if (rs != NPI_SUCCESS) { 5116f45ec7bSml29623 return (NXGE_ERROR | rs); 5126f45ec7bSml29623 } 5136f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 5146f45ec7bSml29623 return (NXGE_OK); 5156f45ec7bSml29623 } 5166f45ec7bSml29623 5176f45ec7bSml29623 nxge_status_t 5186f45ec7bSml29623 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 5196f45ec7bSml29623 uint16_t tout, uint8_t enable) 5206f45ec7bSml29623 { 5216f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 5226f45ec7bSml29623 npi_handle_t handle; 5236f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 5246f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5256f45ec7bSml29623 if (enable == 0) { 5266f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 5276f45ec7bSml29623 } else { 5286f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 5296f45ec7bSml29623 tout); 5306f45ec7bSml29623 } 5316f45ec7bSml29623 5326f45ec7bSml29623 if (rs != NPI_SUCCESS) { 5336f45ec7bSml29623 return (NXGE_ERROR | rs); 5346f45ec7bSml29623 } 5356f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 5366f45ec7bSml29623 return (NXGE_OK); 5376f45ec7bSml29623 } 5386f45ec7bSml29623 5396f45ec7bSml29623 nxge_status_t 5406f45ec7bSml29623 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 5416f45ec7bSml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 5426f45ec7bSml29623 { 5436f45ec7bSml29623 npi_handle_t handle; 5446f45ec7bSml29623 rdc_desc_cfg_t rdc_desc; 5456f45ec7bSml29623 p_rcrcfig_b_t cfgb_p; 5466f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 5476f45ec7bSml29623 5486f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 5496f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5506f45ec7bSml29623 /* 5516f45ec7bSml29623 * Use configuration data composed at init time. 5526f45ec7bSml29623 * Write to hardware the receive ring configurations. 5536f45ec7bSml29623 */ 5546f45ec7bSml29623 rdc_desc.mbox_enable = 1; 5556f45ec7bSml29623 rdc_desc.mbox_addr = mbox_p->mbox_addr; 5566f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5576f45ec7bSml29623 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 5586f45ec7bSml29623 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 5596f45ec7bSml29623 5606f45ec7bSml29623 rdc_desc.rbr_len = rbr_p->rbb_max; 5616f45ec7bSml29623 rdc_desc.rbr_addr = rbr_p->rbr_addr; 5626f45ec7bSml29623 5636f45ec7bSml29623 switch (nxgep->rx_bksize_code) { 5646f45ec7bSml29623 case RBR_BKSIZE_4K: 5656f45ec7bSml29623 rdc_desc.page_size = SIZE_4KB; 5666f45ec7bSml29623 break; 5676f45ec7bSml29623 case RBR_BKSIZE_8K: 5686f45ec7bSml29623 rdc_desc.page_size = SIZE_8KB; 5696f45ec7bSml29623 break; 5706f45ec7bSml29623 case RBR_BKSIZE_16K: 5716f45ec7bSml29623 rdc_desc.page_size = SIZE_16KB; 5726f45ec7bSml29623 break; 5736f45ec7bSml29623 case RBR_BKSIZE_32K: 5746f45ec7bSml29623 rdc_desc.page_size = SIZE_32KB; 5756f45ec7bSml29623 break; 5766f45ec7bSml29623 } 5776f45ec7bSml29623 5786f45ec7bSml29623 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 5796f45ec7bSml29623 rdc_desc.valid0 = 1; 5806f45ec7bSml29623 5816f45ec7bSml29623 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 5826f45ec7bSml29623 rdc_desc.valid1 = 1; 5836f45ec7bSml29623 5846f45ec7bSml29623 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 5856f45ec7bSml29623 rdc_desc.valid2 = 1; 5866f45ec7bSml29623 5876f45ec7bSml29623 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 5886f45ec7bSml29623 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 5896f45ec7bSml29623 5906f45ec7bSml29623 rdc_desc.rcr_len = rcr_p->comp_size; 5916f45ec7bSml29623 rdc_desc.rcr_addr = rcr_p->rcr_addr; 5926f45ec7bSml29623 5936f45ec7bSml29623 cfgb_p = &(rcr_p->rcr_cfgb); 5946f45ec7bSml29623 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 595678453a8Sspeer /* For now, disable this timeout in a guest domain. */ 596678453a8Sspeer if (isLDOMguest(nxgep)) { 597678453a8Sspeer rdc_desc.rcr_timeout = 0; 598678453a8Sspeer rdc_desc.rcr_timeout_enable = 0; 599678453a8Sspeer } else { 6006f45ec7bSml29623 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 6016f45ec7bSml29623 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 602678453a8Sspeer } 6036f45ec7bSml29623 6046f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 6056f45ec7bSml29623 "rbr_len qlen %d pagesize code %d rcr_len %d", 6066f45ec7bSml29623 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 6076f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 6086f45ec7bSml29623 "size 0 %d size 1 %d size 2 %d", 6096f45ec7bSml29623 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 6106f45ec7bSml29623 rbr_p->npi_pkt_buf_size2)); 6116f45ec7bSml29623 6126f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 6136f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6146f45ec7bSml29623 return (NXGE_ERROR | rs); 6156f45ec7bSml29623 } 6166f45ec7bSml29623 6176f45ec7bSml29623 /* 6186f45ec7bSml29623 * Enable the timeout and threshold. 6196f45ec7bSml29623 */ 6206f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 6216f45ec7bSml29623 rdc_desc.rcr_threshold); 6226f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6236f45ec7bSml29623 return (NXGE_ERROR | rs); 6246f45ec7bSml29623 } 6256f45ec7bSml29623 6266f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 6276f45ec7bSml29623 rdc_desc.rcr_timeout); 6286f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6296f45ec7bSml29623 return (NXGE_ERROR | rs); 6306f45ec7bSml29623 } 6316f45ec7bSml29623 6326f45ec7bSml29623 /* Enable the DMA */ 6336f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 6346f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6356f45ec7bSml29623 return (NXGE_ERROR | rs); 6366f45ec7bSml29623 } 6376f45ec7bSml29623 6386f45ec7bSml29623 /* Kick the DMA engine. */ 6396f45ec7bSml29623 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 6406f45ec7bSml29623 /* Clear the rbr empty bit */ 6416f45ec7bSml29623 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 6426f45ec7bSml29623 6436f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 6446f45ec7bSml29623 6456f45ec7bSml29623 return (NXGE_OK); 6466f45ec7bSml29623 } 6476f45ec7bSml29623 6486f45ec7bSml29623 nxge_status_t 6496f45ec7bSml29623 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 6506f45ec7bSml29623 { 6516f45ec7bSml29623 npi_handle_t handle; 6526f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 6536f45ec7bSml29623 6546f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 6556f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6566f45ec7bSml29623 6576f45ec7bSml29623 /* disable the DMA */ 6586f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 6596f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6606f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 6616f45ec7bSml29623 "<== nxge_disable_rxdma_channel:failed (0x%x)", 6626f45ec7bSml29623 rs)); 6636f45ec7bSml29623 return (NXGE_ERROR | rs); 6646f45ec7bSml29623 } 6656f45ec7bSml29623 6666f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 6676f45ec7bSml29623 return (NXGE_OK); 6686f45ec7bSml29623 } 6696f45ec7bSml29623 6706f45ec7bSml29623 nxge_status_t 6716f45ec7bSml29623 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 6726f45ec7bSml29623 { 6736f45ec7bSml29623 npi_handle_t handle; 6746f45ec7bSml29623 nxge_status_t status = NXGE_OK; 6756f45ec7bSml29623 6766f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6776f45ec7bSml29623 "<== nxge_init_rxdma_channel_rcrflush")); 6786f45ec7bSml29623 6796f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6806f45ec7bSml29623 npi_rxdma_rdc_rcr_flush(handle, channel); 6816f45ec7bSml29623 6826f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6836f45ec7bSml29623 "<== nxge_init_rxdma_channel_rcrflsh")); 6846f45ec7bSml29623 return (status); 6856f45ec7bSml29623 6866f45ec7bSml29623 } 6876f45ec7bSml29623 6886f45ec7bSml29623 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 6896f45ec7bSml29623 6906f45ec7bSml29623 #define TO_LEFT -1 6916f45ec7bSml29623 #define TO_RIGHT 1 6926f45ec7bSml29623 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 6936f45ec7bSml29623 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 6946f45ec7bSml29623 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 6956f45ec7bSml29623 #define NO_HINT 0xffffffff 6966f45ec7bSml29623 6976f45ec7bSml29623 /*ARGSUSED*/ 6986f45ec7bSml29623 nxge_status_t 6996f45ec7bSml29623 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 7006f45ec7bSml29623 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 7016f45ec7bSml29623 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 7026f45ec7bSml29623 { 7036f45ec7bSml29623 int bufsize; 7046f45ec7bSml29623 uint64_t pktbuf_pp; 7056f45ec7bSml29623 uint64_t dvma_addr; 7066f45ec7bSml29623 rxring_info_t *ring_info; 7076f45ec7bSml29623 int base_side, end_side; 7086f45ec7bSml29623 int r_index, l_index, anchor_index; 7096f45ec7bSml29623 int found, search_done; 7106f45ec7bSml29623 uint32_t offset, chunk_size, block_size, page_size_mask; 7116f45ec7bSml29623 uint32_t chunk_index, block_index, total_index; 7126f45ec7bSml29623 int max_iterations, iteration; 7136f45ec7bSml29623 rxbuf_index_info_t *bufinfo; 7146f45ec7bSml29623 7156f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 7166f45ec7bSml29623 7176f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7186f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 7196f45ec7bSml29623 pkt_buf_addr_pp, 7206f45ec7bSml29623 pktbufsz_type)); 721adfcba55Sjoycey #if defined(__i386) 722adfcba55Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 723adfcba55Sjoycey #else 7246f45ec7bSml29623 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 725adfcba55Sjoycey #endif 7266f45ec7bSml29623 7276f45ec7bSml29623 switch (pktbufsz_type) { 7286f45ec7bSml29623 case 0: 7296f45ec7bSml29623 bufsize = rbr_p->pkt_buf_size0; 7306f45ec7bSml29623 break; 7316f45ec7bSml29623 case 1: 7326f45ec7bSml29623 bufsize = rbr_p->pkt_buf_size1; 7336f45ec7bSml29623 break; 7346f45ec7bSml29623 case 2: 7356f45ec7bSml29623 bufsize = rbr_p->pkt_buf_size2; 7366f45ec7bSml29623 break; 7376f45ec7bSml29623 case RCR_SINGLE_BLOCK: 7386f45ec7bSml29623 bufsize = 0; 7396f45ec7bSml29623 anchor_index = 0; 7406f45ec7bSml29623 break; 7416f45ec7bSml29623 default: 7426f45ec7bSml29623 return (NXGE_ERROR); 7436f45ec7bSml29623 } 7446f45ec7bSml29623 7456f45ec7bSml29623 if (rbr_p->num_blocks == 1) { 7466f45ec7bSml29623 anchor_index = 0; 7476f45ec7bSml29623 ring_info = rbr_p->ring_info; 7486f45ec7bSml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7496f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7506f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 7516f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d " 7526f45ec7bSml29623 "bufinfo $%p", 7536f45ec7bSml29623 pkt_buf_addr_pp, 7546f45ec7bSml29623 pktbufsz_type, 7556f45ec7bSml29623 anchor_index, 7566f45ec7bSml29623 bufinfo)); 7576f45ec7bSml29623 7586f45ec7bSml29623 goto found_index; 7596f45ec7bSml29623 } 7606f45ec7bSml29623 7616f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7626f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: " 7636f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d", 7646f45ec7bSml29623 pkt_buf_addr_pp, 7656f45ec7bSml29623 pktbufsz_type, 7666f45ec7bSml29623 anchor_index)); 7676f45ec7bSml29623 7686f45ec7bSml29623 ring_info = rbr_p->ring_info; 7696f45ec7bSml29623 found = B_FALSE; 7706f45ec7bSml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7716f45ec7bSml29623 iteration = 0; 7726f45ec7bSml29623 max_iterations = ring_info->max_iterations; 7736f45ec7bSml29623 /* 7746f45ec7bSml29623 * First check if this block has been seen 7756f45ec7bSml29623 * recently. This is indicated by a hint which 7766f45ec7bSml29623 * is initialized when the first buffer of the block 7776f45ec7bSml29623 * is seen. The hint is reset when the last buffer of 7786f45ec7bSml29623 * the block has been processed. 7796f45ec7bSml29623 * As three block sizes are supported, three hints 7806f45ec7bSml29623 * are kept. The idea behind the hints is that once 7816f45ec7bSml29623 * the hardware uses a block for a buffer of that 7826f45ec7bSml29623 * size, it will use it exclusively for that size 7836f45ec7bSml29623 * and will use it until it is exhausted. It is assumed 7846f45ec7bSml29623 * that there would a single block being used for the same 7856f45ec7bSml29623 * buffer sizes at any given time. 7866f45ec7bSml29623 */ 7876f45ec7bSml29623 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 7886f45ec7bSml29623 anchor_index = ring_info->hint[pktbufsz_type]; 7896f45ec7bSml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 7906f45ec7bSml29623 chunk_size = bufinfo[anchor_index].buf_size; 7916f45ec7bSml29623 if ((pktbuf_pp >= dvma_addr) && 7926f45ec7bSml29623 (pktbuf_pp < (dvma_addr + chunk_size))) { 7936f45ec7bSml29623 found = B_TRUE; 7946f45ec7bSml29623 /* 7956f45ec7bSml29623 * check if this is the last buffer in the block 7966f45ec7bSml29623 * If so, then reset the hint for the size; 7976f45ec7bSml29623 */ 7986f45ec7bSml29623 7996f45ec7bSml29623 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 8006f45ec7bSml29623 ring_info->hint[pktbufsz_type] = NO_HINT; 8016f45ec7bSml29623 } 8026f45ec7bSml29623 } 8036f45ec7bSml29623 8046f45ec7bSml29623 if (found == B_FALSE) { 8056f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8066f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (!found)" 8076f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d", 8086f45ec7bSml29623 pkt_buf_addr_pp, 8096f45ec7bSml29623 pktbufsz_type, 8106f45ec7bSml29623 anchor_index)); 8116f45ec7bSml29623 8126f45ec7bSml29623 /* 8136f45ec7bSml29623 * This is the first buffer of the block of this 8146f45ec7bSml29623 * size. Need to search the whole information 8156f45ec7bSml29623 * array. 8166f45ec7bSml29623 * the search algorithm uses a binary tree search 8176f45ec7bSml29623 * algorithm. It assumes that the information is 8186f45ec7bSml29623 * already sorted with increasing order 8196f45ec7bSml29623 * info[0] < info[1] < info[2] .... < info[n-1] 8206f45ec7bSml29623 * where n is the size of the information array 8216f45ec7bSml29623 */ 8226f45ec7bSml29623 r_index = rbr_p->num_blocks - 1; 8236f45ec7bSml29623 l_index = 0; 8246f45ec7bSml29623 search_done = B_FALSE; 8256f45ec7bSml29623 anchor_index = MID_INDEX(r_index, l_index); 8266f45ec7bSml29623 while (search_done == B_FALSE) { 8276f45ec7bSml29623 if ((r_index == l_index) || 8286f45ec7bSml29623 (iteration >= max_iterations)) 8296f45ec7bSml29623 search_done = B_TRUE; 8306f45ec7bSml29623 end_side = TO_RIGHT; /* to the right */ 8316f45ec7bSml29623 base_side = TO_LEFT; /* to the left */ 8326f45ec7bSml29623 /* read the DVMA address information and sort it */ 8336f45ec7bSml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 8346f45ec7bSml29623 chunk_size = bufinfo[anchor_index].buf_size; 8356f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8366f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (searching)" 8376f45ec7bSml29623 "buf_pp $%p btype %d " 8386f45ec7bSml29623 "anchor_index %d chunk_size %d dvmaaddr $%p", 8396f45ec7bSml29623 pkt_buf_addr_pp, 8406f45ec7bSml29623 pktbufsz_type, 8416f45ec7bSml29623 anchor_index, 8426f45ec7bSml29623 chunk_size, 8436f45ec7bSml29623 dvma_addr)); 8446f45ec7bSml29623 8456f45ec7bSml29623 if (pktbuf_pp >= dvma_addr) 8466f45ec7bSml29623 base_side = TO_RIGHT; /* to the right */ 8476f45ec7bSml29623 if (pktbuf_pp < (dvma_addr + chunk_size)) 8486f45ec7bSml29623 end_side = TO_LEFT; /* to the left */ 8496f45ec7bSml29623 8506f45ec7bSml29623 switch (base_side + end_side) { 8516f45ec7bSml29623 case IN_MIDDLE: 8526f45ec7bSml29623 /* found */ 8536f45ec7bSml29623 found = B_TRUE; 8546f45ec7bSml29623 search_done = B_TRUE; 8556f45ec7bSml29623 if ((pktbuf_pp + bufsize) < 8566f45ec7bSml29623 (dvma_addr + chunk_size)) 8576f45ec7bSml29623 ring_info->hint[pktbufsz_type] = 8586f45ec7bSml29623 bufinfo[anchor_index].buf_index; 8596f45ec7bSml29623 break; 8606f45ec7bSml29623 case BOTH_RIGHT: 8616f45ec7bSml29623 /* not found: go to the right */ 8626f45ec7bSml29623 l_index = anchor_index + 1; 86352ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 8646f45ec7bSml29623 break; 8656f45ec7bSml29623 8666f45ec7bSml29623 case BOTH_LEFT: 8676f45ec7bSml29623 /* not found: go to the left */ 8686f45ec7bSml29623 r_index = anchor_index - 1; 86952ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 8706f45ec7bSml29623 break; 8716f45ec7bSml29623 default: /* should not come here */ 8726f45ec7bSml29623 return (NXGE_ERROR); 8736f45ec7bSml29623 } 8746f45ec7bSml29623 iteration++; 8756f45ec7bSml29623 } 8766f45ec7bSml29623 8776f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8786f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (search done)" 8796f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d", 8806f45ec7bSml29623 pkt_buf_addr_pp, 8816f45ec7bSml29623 pktbufsz_type, 8826f45ec7bSml29623 anchor_index)); 8836f45ec7bSml29623 } 8846f45ec7bSml29623 8856f45ec7bSml29623 if (found == B_FALSE) { 8866f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8876f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (search failed)" 8886f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d", 8896f45ec7bSml29623 pkt_buf_addr_pp, 8906f45ec7bSml29623 pktbufsz_type, 8916f45ec7bSml29623 anchor_index)); 8926f45ec7bSml29623 return (NXGE_ERROR); 8936f45ec7bSml29623 } 8946f45ec7bSml29623 8956f45ec7bSml29623 found_index: 8966f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8976f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 8986f45ec7bSml29623 "buf_pp $%p btype %d bufsize %d anchor_index %d", 8996f45ec7bSml29623 pkt_buf_addr_pp, 9006f45ec7bSml29623 pktbufsz_type, 9016f45ec7bSml29623 bufsize, 9026f45ec7bSml29623 anchor_index)); 9036f45ec7bSml29623 9046f45ec7bSml29623 /* index of the first block in this chunk */ 9056f45ec7bSml29623 chunk_index = bufinfo[anchor_index].start_index; 9066f45ec7bSml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 9076f45ec7bSml29623 page_size_mask = ring_info->block_size_mask; 9086f45ec7bSml29623 9096f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9106f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 9116f45ec7bSml29623 "buf_pp $%p btype %d bufsize %d " 9126f45ec7bSml29623 "anchor_index %d chunk_index %d dvma $%p", 9136f45ec7bSml29623 pkt_buf_addr_pp, 9146f45ec7bSml29623 pktbufsz_type, 9156f45ec7bSml29623 bufsize, 9166f45ec7bSml29623 anchor_index, 9176f45ec7bSml29623 chunk_index, 9186f45ec7bSml29623 dvma_addr)); 9196f45ec7bSml29623 9206f45ec7bSml29623 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 9216f45ec7bSml29623 block_size = rbr_p->block_size; /* System block(page) size */ 9226f45ec7bSml29623 9236f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9246f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 9256f45ec7bSml29623 "buf_pp $%p btype %d bufsize %d " 9266f45ec7bSml29623 "anchor_index %d chunk_index %d dvma $%p " 9276f45ec7bSml29623 "offset %d block_size %d", 9286f45ec7bSml29623 pkt_buf_addr_pp, 9296f45ec7bSml29623 pktbufsz_type, 9306f45ec7bSml29623 bufsize, 9316f45ec7bSml29623 anchor_index, 9326f45ec7bSml29623 chunk_index, 9336f45ec7bSml29623 dvma_addr, 9346f45ec7bSml29623 offset, 9356f45ec7bSml29623 block_size)); 9366f45ec7bSml29623 9376f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 9386f45ec7bSml29623 9396f45ec7bSml29623 block_index = (offset / block_size); /* index within chunk */ 9406f45ec7bSml29623 total_index = chunk_index + block_index; 9416f45ec7bSml29623 9426f45ec7bSml29623 9436f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9446f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: " 9456f45ec7bSml29623 "total_index %d dvma_addr $%p " 9466f45ec7bSml29623 "offset %d block_size %d " 9476f45ec7bSml29623 "block_index %d ", 9486f45ec7bSml29623 total_index, dvma_addr, 9496f45ec7bSml29623 offset, block_size, 9506f45ec7bSml29623 block_index)); 951adfcba55Sjoycey #if defined(__i386) 952adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 953adfcba55Sjoycey (uint32_t)offset); 954adfcba55Sjoycey #else 955adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 956adfcba55Sjoycey (uint64_t)offset); 957adfcba55Sjoycey #endif 9586f45ec7bSml29623 9596f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9606f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: " 9616f45ec7bSml29623 "total_index %d dvma_addr $%p " 9626f45ec7bSml29623 "offset %d block_size %d " 9636f45ec7bSml29623 "block_index %d " 9646f45ec7bSml29623 "*pkt_buf_addr_p $%p", 9656f45ec7bSml29623 total_index, dvma_addr, 9666f45ec7bSml29623 offset, block_size, 9676f45ec7bSml29623 block_index, 9686f45ec7bSml29623 *pkt_buf_addr_p)); 9696f45ec7bSml29623 9706f45ec7bSml29623 9716f45ec7bSml29623 *msg_index = total_index; 9726f45ec7bSml29623 *bufoffset = (offset & page_size_mask); 9736f45ec7bSml29623 9746f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9756f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: get msg index: " 9766f45ec7bSml29623 "msg_index %d bufoffset_index %d", 9776f45ec7bSml29623 *msg_index, 9786f45ec7bSml29623 *bufoffset)); 9796f45ec7bSml29623 9806f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 9816f45ec7bSml29623 9826f45ec7bSml29623 return (NXGE_OK); 9836f45ec7bSml29623 } 9846f45ec7bSml29623 9856f45ec7bSml29623 /* 9866f45ec7bSml29623 * used by quick sort (qsort) function 9876f45ec7bSml29623 * to perform comparison 9886f45ec7bSml29623 */ 9896f45ec7bSml29623 static int 9906f45ec7bSml29623 nxge_sort_compare(const void *p1, const void *p2) 9916f45ec7bSml29623 { 9926f45ec7bSml29623 9936f45ec7bSml29623 rxbuf_index_info_t *a, *b; 9946f45ec7bSml29623 9956f45ec7bSml29623 a = (rxbuf_index_info_t *)p1; 9966f45ec7bSml29623 b = (rxbuf_index_info_t *)p2; 9976f45ec7bSml29623 9986f45ec7bSml29623 if (a->dvma_addr > b->dvma_addr) 9996f45ec7bSml29623 return (1); 10006f45ec7bSml29623 if (a->dvma_addr < b->dvma_addr) 10016f45ec7bSml29623 return (-1); 10026f45ec7bSml29623 return (0); 10036f45ec7bSml29623 } 10046f45ec7bSml29623 10056f45ec7bSml29623 10066f45ec7bSml29623 10076f45ec7bSml29623 /* 10086f45ec7bSml29623 * grabbed this sort implementation from common/syscall/avl.c 10096f45ec7bSml29623 * 10106f45ec7bSml29623 */ 10116f45ec7bSml29623 /* 10126f45ec7bSml29623 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 10136f45ec7bSml29623 * v = Ptr to array/vector of objs 10146f45ec7bSml29623 * n = # objs in the array 10156f45ec7bSml29623 * s = size of each obj (must be multiples of a word size) 10166f45ec7bSml29623 * f = ptr to function to compare two objs 10176f45ec7bSml29623 * returns (-1 = less than, 0 = equal, 1 = greater than 10186f45ec7bSml29623 */ 10196f45ec7bSml29623 void 10206f45ec7bSml29623 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 10216f45ec7bSml29623 { 10226f45ec7bSml29623 int g, i, j, ii; 10236f45ec7bSml29623 unsigned int *p1, *p2; 10246f45ec7bSml29623 unsigned int tmp; 10256f45ec7bSml29623 10266f45ec7bSml29623 /* No work to do */ 10276f45ec7bSml29623 if (v == NULL || n <= 1) 10286f45ec7bSml29623 return; 10296f45ec7bSml29623 /* Sanity check on arguments */ 10306f45ec7bSml29623 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 10316f45ec7bSml29623 ASSERT(s > 0); 10326f45ec7bSml29623 10336f45ec7bSml29623 for (g = n / 2; g > 0; g /= 2) { 10346f45ec7bSml29623 for (i = g; i < n; i++) { 10356f45ec7bSml29623 for (j = i - g; j >= 0 && 10366f45ec7bSml29623 (*f)(v + j * s, v + (j + g) * s) == 1; 10376f45ec7bSml29623 j -= g) { 10386f45ec7bSml29623 p1 = (unsigned *)(v + j * s); 10396f45ec7bSml29623 p2 = (unsigned *)(v + (j + g) * s); 10406f45ec7bSml29623 for (ii = 0; ii < s / 4; ii++) { 10416f45ec7bSml29623 tmp = *p1; 10426f45ec7bSml29623 *p1++ = *p2; 10436f45ec7bSml29623 *p2++ = tmp; 10446f45ec7bSml29623 } 10456f45ec7bSml29623 } 10466f45ec7bSml29623 } 10476f45ec7bSml29623 } 10486f45ec7bSml29623 } 10496f45ec7bSml29623 10506f45ec7bSml29623 /* 10516f45ec7bSml29623 * Initialize data structures required for rxdma 10526f45ec7bSml29623 * buffer dvma->vmem address lookup 10536f45ec7bSml29623 */ 10546f45ec7bSml29623 /*ARGSUSED*/ 10556f45ec7bSml29623 static nxge_status_t 10566f45ec7bSml29623 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 10576f45ec7bSml29623 { 10586f45ec7bSml29623 10596f45ec7bSml29623 int index; 10606f45ec7bSml29623 rxring_info_t *ring_info; 10616f45ec7bSml29623 int max_iteration = 0, max_index = 0; 10626f45ec7bSml29623 10636f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 10646f45ec7bSml29623 10656f45ec7bSml29623 ring_info = rbrp->ring_info; 10666f45ec7bSml29623 ring_info->hint[0] = NO_HINT; 10676f45ec7bSml29623 ring_info->hint[1] = NO_HINT; 10686f45ec7bSml29623 ring_info->hint[2] = NO_HINT; 10696f45ec7bSml29623 max_index = rbrp->num_blocks; 10706f45ec7bSml29623 10716f45ec7bSml29623 /* read the DVMA address information and sort it */ 10726f45ec7bSml29623 /* do init of the information array */ 10736f45ec7bSml29623 10746f45ec7bSml29623 10756f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10766f45ec7bSml29623 " nxge_rxbuf_index_info_init Sort ptrs")); 10776f45ec7bSml29623 10786f45ec7bSml29623 /* sort the array */ 10796f45ec7bSml29623 nxge_ksort((void *)ring_info->buffer, max_index, 10806f45ec7bSml29623 sizeof (rxbuf_index_info_t), nxge_sort_compare); 10816f45ec7bSml29623 10826f45ec7bSml29623 10836f45ec7bSml29623 10846f45ec7bSml29623 for (index = 0; index < max_index; index++) { 10856f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10866f45ec7bSml29623 " nxge_rxbuf_index_info_init: sorted chunk %d " 10876f45ec7bSml29623 " ioaddr $%p kaddr $%p size %x", 10886f45ec7bSml29623 index, ring_info->buffer[index].dvma_addr, 10896f45ec7bSml29623 ring_info->buffer[index].kaddr, 10906f45ec7bSml29623 ring_info->buffer[index].buf_size)); 10916f45ec7bSml29623 } 10926f45ec7bSml29623 10936f45ec7bSml29623 max_iteration = 0; 10946f45ec7bSml29623 while (max_index >= (1ULL << max_iteration)) 10956f45ec7bSml29623 max_iteration++; 10966f45ec7bSml29623 ring_info->max_iterations = max_iteration + 1; 10976f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10986f45ec7bSml29623 " nxge_rxbuf_index_info_init Find max iter %d", 10996f45ec7bSml29623 ring_info->max_iterations)); 11006f45ec7bSml29623 11016f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 11026f45ec7bSml29623 return (NXGE_OK); 11036f45ec7bSml29623 } 11046f45ec7bSml29623 11056f45ec7bSml29623 /* ARGSUSED */ 11066f45ec7bSml29623 void 11076f45ec7bSml29623 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 11086f45ec7bSml29623 { 11096f45ec7bSml29623 #ifdef NXGE_DEBUG 11106f45ec7bSml29623 11116f45ec7bSml29623 uint32_t bptr; 11126f45ec7bSml29623 uint64_t pp; 11136f45ec7bSml29623 11146f45ec7bSml29623 bptr = entry_p->bits.hdw.pkt_buf_addr; 11156f45ec7bSml29623 11166f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11176f45ec7bSml29623 "\trcr entry $%p " 11186f45ec7bSml29623 "\trcr entry 0x%0llx " 11196f45ec7bSml29623 "\trcr entry 0x%08x " 11206f45ec7bSml29623 "\trcr entry 0x%08x " 11216f45ec7bSml29623 "\tvalue 0x%0llx\n" 11226f45ec7bSml29623 "\tmulti = %d\n" 11236f45ec7bSml29623 "\tpkt_type = 0x%x\n" 11246f45ec7bSml29623 "\tzero_copy = %d\n" 11256f45ec7bSml29623 "\tnoport = %d\n" 11266f45ec7bSml29623 "\tpromis = %d\n" 11276f45ec7bSml29623 "\terror = 0x%04x\n" 11286f45ec7bSml29623 "\tdcf_err = 0x%01x\n" 11296f45ec7bSml29623 "\tl2_len = %d\n" 11306f45ec7bSml29623 "\tpktbufsize = %d\n" 11316f45ec7bSml29623 "\tpkt_buf_addr = $%p\n" 11326f45ec7bSml29623 "\tpkt_buf_addr (<< 6) = $%p\n", 11336f45ec7bSml29623 entry_p, 11346f45ec7bSml29623 *(int64_t *)entry_p, 11356f45ec7bSml29623 *(int32_t *)entry_p, 11366f45ec7bSml29623 *(int32_t *)((char *)entry_p + 32), 11376f45ec7bSml29623 entry_p->value, 11386f45ec7bSml29623 entry_p->bits.hdw.multi, 11396f45ec7bSml29623 entry_p->bits.hdw.pkt_type, 11406f45ec7bSml29623 entry_p->bits.hdw.zero_copy, 11416f45ec7bSml29623 entry_p->bits.hdw.noport, 11426f45ec7bSml29623 entry_p->bits.hdw.promis, 11436f45ec7bSml29623 entry_p->bits.hdw.error, 11446f45ec7bSml29623 entry_p->bits.hdw.dcf_err, 11456f45ec7bSml29623 entry_p->bits.hdw.l2_len, 11466f45ec7bSml29623 entry_p->bits.hdw.pktbufsz, 11476f45ec7bSml29623 bptr, 11486f45ec7bSml29623 entry_p->bits.ldw.pkt_buf_addr)); 11496f45ec7bSml29623 11506f45ec7bSml29623 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 11516f45ec7bSml29623 RCR_PKT_BUF_ADDR_SHIFT; 11526f45ec7bSml29623 11536f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 11546f45ec7bSml29623 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 11556f45ec7bSml29623 #endif 11566f45ec7bSml29623 } 11576f45ec7bSml29623 11586f45ec7bSml29623 void 11596f45ec7bSml29623 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 11606f45ec7bSml29623 { 11616f45ec7bSml29623 npi_handle_t handle; 11626f45ec7bSml29623 rbr_stat_t rbr_stat; 11636f45ec7bSml29623 addr44_t hd_addr; 11646f45ec7bSml29623 addr44_t tail_addr; 11656f45ec7bSml29623 uint16_t qlen; 11666f45ec7bSml29623 11676f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11686f45ec7bSml29623 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 11696f45ec7bSml29623 11706f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11716f45ec7bSml29623 11726f45ec7bSml29623 /* RBR head */ 11736f45ec7bSml29623 hd_addr.addr = 0; 11746f45ec7bSml29623 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1175adfcba55Sjoycey #if defined(__i386) 117653f3d8ecSyc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1177adfcba55Sjoycey (void *)(uint32_t)hd_addr.addr); 1178adfcba55Sjoycey #else 117953f3d8ecSyc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11806f45ec7bSml29623 (void *)hd_addr.addr); 1181adfcba55Sjoycey #endif 11826f45ec7bSml29623 11836f45ec7bSml29623 /* RBR stats */ 11846f45ec7bSml29623 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 11856f45ec7bSml29623 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 11866f45ec7bSml29623 11876f45ec7bSml29623 /* RCR tail */ 11886f45ec7bSml29623 tail_addr.addr = 0; 11896f45ec7bSml29623 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1190adfcba55Sjoycey #if defined(__i386) 119153f3d8ecSyc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1192adfcba55Sjoycey (void *)(uint32_t)tail_addr.addr); 1193adfcba55Sjoycey #else 119453f3d8ecSyc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11956f45ec7bSml29623 (void *)tail_addr.addr); 1196adfcba55Sjoycey #endif 11976f45ec7bSml29623 11986f45ec7bSml29623 /* RCR qlen */ 11996f45ec7bSml29623 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 12006f45ec7bSml29623 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 12016f45ec7bSml29623 12026f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12036f45ec7bSml29623 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 12046f45ec7bSml29623 } 12056f45ec7bSml29623 12066f45ec7bSml29623 nxge_status_t 12076f45ec7bSml29623 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 12086f45ec7bSml29623 { 1209678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1210678453a8Sspeer nxge_status_t status; 1211678453a8Sspeer npi_status_t rs; 1212678453a8Sspeer int rdc; 12136f45ec7bSml29623 12146f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12156f45ec7bSml29623 "==> nxge_rxdma_hw_mode: mode %d", enable)); 12166f45ec7bSml29623 12176f45ec7bSml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 12186f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12196f45ec7bSml29623 "<== nxge_rxdma_mode: not initialized")); 12206f45ec7bSml29623 return (NXGE_ERROR); 12216f45ec7bSml29623 } 12226f45ec7bSml29623 1223678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1224678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1225678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1226678453a8Sspeer "NULL ring pointer(s)")); 12276f45ec7bSml29623 return (NXGE_ERROR); 12286f45ec7bSml29623 } 12296f45ec7bSml29623 1230678453a8Sspeer if (set->owned.map == 0) { 12316f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1232678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 1233678453a8Sspeer return (NULL); 12346f45ec7bSml29623 } 12356f45ec7bSml29623 1236678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1237678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1238678453a8Sspeer rx_rbr_ring_t *ring = 1239678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1240678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1241678453a8Sspeer if (ring) { 12426f45ec7bSml29623 if (enable) { 12436f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1244678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1245678453a8Sspeer "channel %d (enable)", rdc)); 1246678453a8Sspeer rs = npi_rxdma_cfg_rdc_enable 1247678453a8Sspeer (handle, rdc); 12486f45ec7bSml29623 } else { 12496f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1250678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1251678453a8Sspeer "channel %d disable)", rdc)); 1252678453a8Sspeer rs = npi_rxdma_cfg_rdc_disable 1253678453a8Sspeer (handle, rdc); 1254678453a8Sspeer } 1255678453a8Sspeer } 12566f45ec7bSml29623 } 12576f45ec7bSml29623 } 12586f45ec7bSml29623 12596f45ec7bSml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 12606f45ec7bSml29623 12616f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12626f45ec7bSml29623 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 12636f45ec7bSml29623 12646f45ec7bSml29623 return (status); 12656f45ec7bSml29623 } 12666f45ec7bSml29623 12676f45ec7bSml29623 void 12686f45ec7bSml29623 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 12696f45ec7bSml29623 { 12706f45ec7bSml29623 npi_handle_t handle; 12716f45ec7bSml29623 12726f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12736f45ec7bSml29623 "==> nxge_rxdma_enable_channel: channel %d", channel)); 12746f45ec7bSml29623 12756f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12766f45ec7bSml29623 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 12776f45ec7bSml29623 12786f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 12796f45ec7bSml29623 } 12806f45ec7bSml29623 12816f45ec7bSml29623 void 12826f45ec7bSml29623 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 12836f45ec7bSml29623 { 12846f45ec7bSml29623 npi_handle_t handle; 12856f45ec7bSml29623 12866f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12876f45ec7bSml29623 "==> nxge_rxdma_disable_channel: channel %d", channel)); 12886f45ec7bSml29623 12896f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12906f45ec7bSml29623 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 12916f45ec7bSml29623 12926f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 12936f45ec7bSml29623 } 12946f45ec7bSml29623 12956f45ec7bSml29623 void 12966f45ec7bSml29623 nxge_hw_start_rx(p_nxge_t nxgep) 12976f45ec7bSml29623 { 12986f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 12996f45ec7bSml29623 13006f45ec7bSml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 13016f45ec7bSml29623 (void) nxge_rx_mac_enable(nxgep); 13026f45ec7bSml29623 13036f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 13046f45ec7bSml29623 } 13056f45ec7bSml29623 13066f45ec7bSml29623 /*ARGSUSED*/ 13076f45ec7bSml29623 void 13086f45ec7bSml29623 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 13096f45ec7bSml29623 { 1310678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1311678453a8Sspeer int rdc; 13126f45ec7bSml29623 13136f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 13146f45ec7bSml29623 1315678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1316678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1317678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1318678453a8Sspeer "NULL ring pointer(s)")); 13196f45ec7bSml29623 return; 13206f45ec7bSml29623 } 13216f45ec7bSml29623 1322678453a8Sspeer if (set->owned.map == 0) { 13236f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1324678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 13256f45ec7bSml29623 return; 13266f45ec7bSml29623 } 13276f45ec7bSml29623 1328678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1329678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1330678453a8Sspeer rx_rbr_ring_t *ring = 1331678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1332678453a8Sspeer if (ring) { 1333678453a8Sspeer nxge_rxdma_hw_stop(nxgep, rdc); 13346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1335678453a8Sspeer "==> nxge_fixup_rxdma_rings: " 1336678453a8Sspeer "channel %d ring $%px", 1337678453a8Sspeer rdc, ring)); 1338678453a8Sspeer (void) nxge_rxdma_fixup_channel 1339678453a8Sspeer (nxgep, rdc, rdc); 1340678453a8Sspeer } 1341678453a8Sspeer } 13426f45ec7bSml29623 } 13436f45ec7bSml29623 13446f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 13456f45ec7bSml29623 } 13466f45ec7bSml29623 13476f45ec7bSml29623 void 13486f45ec7bSml29623 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 13496f45ec7bSml29623 { 13506f45ec7bSml29623 int i; 13516f45ec7bSml29623 13526f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 13536f45ec7bSml29623 i = nxge_rxdma_get_ring_index(nxgep, channel); 13546f45ec7bSml29623 if (i < 0) { 13556f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13566f45ec7bSml29623 "<== nxge_rxdma_fix_channel: no entry found")); 13576f45ec7bSml29623 return; 13586f45ec7bSml29623 } 13596f45ec7bSml29623 13606f45ec7bSml29623 nxge_rxdma_fixup_channel(nxgep, channel, i); 13616f45ec7bSml29623 1362678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 13636f45ec7bSml29623 } 13646f45ec7bSml29623 13656f45ec7bSml29623 void 13666f45ec7bSml29623 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 13676f45ec7bSml29623 { 13686f45ec7bSml29623 int ndmas; 13696f45ec7bSml29623 p_rx_rbr_rings_t rx_rbr_rings; 13706f45ec7bSml29623 p_rx_rbr_ring_t *rbr_rings; 13716f45ec7bSml29623 p_rx_rcr_rings_t rx_rcr_rings; 13726f45ec7bSml29623 p_rx_rcr_ring_t *rcr_rings; 13736f45ec7bSml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 13746f45ec7bSml29623 p_rx_mbox_t *rx_mbox_p; 13756f45ec7bSml29623 p_nxge_dma_pool_t dma_buf_poolp; 13766f45ec7bSml29623 p_nxge_dma_pool_t dma_cntl_poolp; 13776f45ec7bSml29623 p_rx_rbr_ring_t rbrp; 13786f45ec7bSml29623 p_rx_rcr_ring_t rcrp; 13796f45ec7bSml29623 p_rx_mbox_t mboxp; 13806f45ec7bSml29623 p_nxge_dma_common_t dmap; 13816f45ec7bSml29623 nxge_status_t status = NXGE_OK; 13826f45ec7bSml29623 13836f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 13846f45ec7bSml29623 13856f45ec7bSml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 13866f45ec7bSml29623 13876f45ec7bSml29623 dma_buf_poolp = nxgep->rx_buf_pool_p; 13886f45ec7bSml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 13896f45ec7bSml29623 13906f45ec7bSml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 13916f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13926f45ec7bSml29623 "<== nxge_rxdma_fixup_channel: buf not allocated")); 13936f45ec7bSml29623 return; 13946f45ec7bSml29623 } 13956f45ec7bSml29623 13966f45ec7bSml29623 ndmas = dma_buf_poolp->ndmas; 13976f45ec7bSml29623 if (!ndmas) { 13986f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13996f45ec7bSml29623 "<== nxge_rxdma_fixup_channel: no dma allocated")); 14006f45ec7bSml29623 return; 14016f45ec7bSml29623 } 14026f45ec7bSml29623 14036f45ec7bSml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 14046f45ec7bSml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 14056f45ec7bSml29623 rbr_rings = rx_rbr_rings->rbr_rings; 14066f45ec7bSml29623 rcr_rings = rx_rcr_rings->rcr_rings; 14076f45ec7bSml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 14086f45ec7bSml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 14096f45ec7bSml29623 14106f45ec7bSml29623 /* Reinitialize the receive block and completion rings */ 14116f45ec7bSml29623 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 14126f45ec7bSml29623 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 14136f45ec7bSml29623 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 14146f45ec7bSml29623 14156f45ec7bSml29623 14166f45ec7bSml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 14176f45ec7bSml29623 rbrp->rbr_rd_index = 0; 14186f45ec7bSml29623 rcrp->comp_rd_index = 0; 14196f45ec7bSml29623 rcrp->comp_wt_index = 0; 14206f45ec7bSml29623 14216f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 14226f45ec7bSml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 14236f45ec7bSml29623 14246f45ec7bSml29623 status = nxge_rxdma_start_channel(nxgep, channel, 14256f45ec7bSml29623 rbrp, rcrp, mboxp); 14266f45ec7bSml29623 if (status != NXGE_OK) { 14276f45ec7bSml29623 goto nxge_rxdma_fixup_channel_fail; 14286f45ec7bSml29623 } 14296f45ec7bSml29623 if (status != NXGE_OK) { 14306f45ec7bSml29623 goto nxge_rxdma_fixup_channel_fail; 14316f45ec7bSml29623 } 14326f45ec7bSml29623 14336f45ec7bSml29623 nxge_rxdma_fixup_channel_fail: 14346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14356f45ec7bSml29623 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 14366f45ec7bSml29623 14376f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 14386f45ec7bSml29623 } 14396f45ec7bSml29623 1440da14cebeSEric Cheng /* 1441da14cebeSEric Cheng * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 1442da14cebeSEric Cheng * map <channel> to an index into nxgep->rx_rbr_rings. 1443da14cebeSEric Cheng * (device ring index -> port ring index) 1444da14cebeSEric Cheng */ 14456f45ec7bSml29623 int 14466f45ec7bSml29623 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 14476f45ec7bSml29623 { 1448da14cebeSEric Cheng int i, ndmas; 1449da14cebeSEric Cheng uint16_t rdc; 1450da14cebeSEric Cheng p_rx_rbr_rings_t rx_rbr_rings; 1451da14cebeSEric Cheng p_rx_rbr_ring_t *rbr_rings; 1452da14cebeSEric Cheng 1453da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1454da14cebeSEric Cheng "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1455da14cebeSEric Cheng 1456da14cebeSEric Cheng rx_rbr_rings = nxgep->rx_rbr_rings; 1457da14cebeSEric Cheng if (rx_rbr_rings == NULL) { 1458da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1459da14cebeSEric Cheng "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1460da14cebeSEric Cheng return (-1); 1461da14cebeSEric Cheng } 1462da14cebeSEric Cheng ndmas = rx_rbr_rings->ndmas; 1463da14cebeSEric Cheng if (!ndmas) { 1464da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1465da14cebeSEric Cheng "<== nxge_rxdma_get_ring_index: no channel")); 1466da14cebeSEric Cheng return (-1); 1467da14cebeSEric Cheng } 1468da14cebeSEric Cheng 1469da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1470da14cebeSEric Cheng "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1471da14cebeSEric Cheng 1472da14cebeSEric Cheng rbr_rings = rx_rbr_rings->rbr_rings; 1473da14cebeSEric Cheng for (i = 0; i < ndmas; i++) { 1474da14cebeSEric Cheng rdc = rbr_rings[i]->rdc; 1475da14cebeSEric Cheng if (channel == rdc) { 1476da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1477da14cebeSEric Cheng "==> nxge_rxdma_get_rbr_ring: channel %d " 1478da14cebeSEric Cheng "(index %d) ring %d", channel, i, rbr_rings[i])); 1479da14cebeSEric Cheng return (i); 1480da14cebeSEric Cheng } 1481da14cebeSEric Cheng } 1482da14cebeSEric Cheng 1483da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1484da14cebeSEric Cheng "<== nxge_rxdma_get_rbr_ring_index: not found")); 1485da14cebeSEric Cheng 1486da14cebeSEric Cheng return (-1); 14876f45ec7bSml29623 } 14886f45ec7bSml29623 14896f45ec7bSml29623 p_rx_rbr_ring_t 14906f45ec7bSml29623 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 14916f45ec7bSml29623 { 1492678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1493678453a8Sspeer nxge_channel_t rdc; 14946f45ec7bSml29623 14956f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14966f45ec7bSml29623 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 14976f45ec7bSml29623 1498678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1499678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1500678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: " 1501678453a8Sspeer "NULL ring pointer(s)")); 15026f45ec7bSml29623 return (NULL); 15036f45ec7bSml29623 } 15046f45ec7bSml29623 1505678453a8Sspeer if (set->owned.map == 0) { 15066f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1507678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 1508678453a8Sspeer return (NULL); 1509678453a8Sspeer } 15106f45ec7bSml29623 1511678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1512678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1513678453a8Sspeer rx_rbr_ring_t *ring = 1514678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1515678453a8Sspeer if (ring) { 1516678453a8Sspeer if (channel == ring->rdc) { 15176f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1518678453a8Sspeer "==> nxge_rxdma_get_rbr_ring: " 1519678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1520678453a8Sspeer return (ring); 1521678453a8Sspeer } 1522678453a8Sspeer } 15236f45ec7bSml29623 } 15246f45ec7bSml29623 } 15256f45ec7bSml29623 15266f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15276f45ec7bSml29623 "<== nxge_rxdma_get_rbr_ring: not found")); 15286f45ec7bSml29623 15296f45ec7bSml29623 return (NULL); 15306f45ec7bSml29623 } 15316f45ec7bSml29623 15326f45ec7bSml29623 p_rx_rcr_ring_t 15336f45ec7bSml29623 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 15346f45ec7bSml29623 { 1535678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1536678453a8Sspeer nxge_channel_t rdc; 15376f45ec7bSml29623 15386f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15396f45ec7bSml29623 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 15406f45ec7bSml29623 1541678453a8Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1542678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1543678453a8Sspeer "<== nxge_rxdma_get_rcr_ring: " 1544678453a8Sspeer "NULL ring pointer(s)")); 15456f45ec7bSml29623 return (NULL); 15466f45ec7bSml29623 } 15476f45ec7bSml29623 1548678453a8Sspeer if (set->owned.map == 0) { 15496f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1550678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 1551678453a8Sspeer return (NULL); 1552678453a8Sspeer } 15536f45ec7bSml29623 1554678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1555678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1556678453a8Sspeer rx_rcr_ring_t *ring = 1557678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc]; 1558678453a8Sspeer if (ring) { 1559678453a8Sspeer if (channel == ring->rdc) { 15606f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1561678453a8Sspeer "==> nxge_rxdma_get_rcr_ring: " 1562678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1563678453a8Sspeer return (ring); 1564678453a8Sspeer } 1565678453a8Sspeer } 15666f45ec7bSml29623 } 15676f45ec7bSml29623 } 15686f45ec7bSml29623 15696f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15706f45ec7bSml29623 "<== nxge_rxdma_get_rcr_ring: not found")); 15716f45ec7bSml29623 15726f45ec7bSml29623 return (NULL); 15736f45ec7bSml29623 } 15746f45ec7bSml29623 15756f45ec7bSml29623 /* 15766f45ec7bSml29623 * Static functions start here. 15776f45ec7bSml29623 */ 15786f45ec7bSml29623 static p_rx_msg_t 15796f45ec7bSml29623 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 15806f45ec7bSml29623 { 15816f45ec7bSml29623 p_rx_msg_t nxge_mp = NULL; 15826f45ec7bSml29623 p_nxge_dma_common_t dmamsg_p; 15836f45ec7bSml29623 uchar_t *buffer; 15846f45ec7bSml29623 15856f45ec7bSml29623 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 15866f45ec7bSml29623 if (nxge_mp == NULL) { 158756d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 15886f45ec7bSml29623 "Allocation of a rx msg failed.")); 15896f45ec7bSml29623 goto nxge_allocb_exit; 15906f45ec7bSml29623 } 15916f45ec7bSml29623 15926f45ec7bSml29623 nxge_mp->use_buf_pool = B_FALSE; 15936f45ec7bSml29623 if (dmabuf_p) { 15946f45ec7bSml29623 nxge_mp->use_buf_pool = B_TRUE; 15956f45ec7bSml29623 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 15966f45ec7bSml29623 *dmamsg_p = *dmabuf_p; 15976f45ec7bSml29623 dmamsg_p->nblocks = 1; 15986f45ec7bSml29623 dmamsg_p->block_size = size; 15996f45ec7bSml29623 dmamsg_p->alength = size; 16006f45ec7bSml29623 buffer = (uchar_t *)dmabuf_p->kaddrp; 16016f45ec7bSml29623 16026f45ec7bSml29623 dmabuf_p->kaddrp = (void *) 16036f45ec7bSml29623 ((char *)dmabuf_p->kaddrp + size); 16046f45ec7bSml29623 dmabuf_p->ioaddr_pp = (void *) 16056f45ec7bSml29623 ((char *)dmabuf_p->ioaddr_pp + size); 16066f45ec7bSml29623 dmabuf_p->alength -= size; 16076f45ec7bSml29623 dmabuf_p->offset += size; 16086f45ec7bSml29623 dmabuf_p->dma_cookie.dmac_laddress += size; 16096f45ec7bSml29623 dmabuf_p->dma_cookie.dmac_size -= size; 16106f45ec7bSml29623 16116f45ec7bSml29623 } else { 16126f45ec7bSml29623 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 16136f45ec7bSml29623 if (buffer == NULL) { 161456d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 16156f45ec7bSml29623 "Allocation of a receive page failed.")); 16166f45ec7bSml29623 goto nxge_allocb_fail1; 16176f45ec7bSml29623 } 16186f45ec7bSml29623 } 16196f45ec7bSml29623 16206f45ec7bSml29623 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 16216f45ec7bSml29623 if (nxge_mp->rx_mblk_p == NULL) { 162256d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 16236f45ec7bSml29623 goto nxge_allocb_fail2; 16246f45ec7bSml29623 } 16256f45ec7bSml29623 16266f45ec7bSml29623 nxge_mp->buffer = buffer; 16276f45ec7bSml29623 nxge_mp->block_size = size; 16286f45ec7bSml29623 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 16296f45ec7bSml29623 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 16306f45ec7bSml29623 nxge_mp->ref_cnt = 1; 16316f45ec7bSml29623 nxge_mp->free = B_TRUE; 16326f45ec7bSml29623 nxge_mp->rx_use_bcopy = B_FALSE; 16336f45ec7bSml29623 16346f45ec7bSml29623 atomic_inc_32(&nxge_mblks_pending); 16356f45ec7bSml29623 16366f45ec7bSml29623 goto nxge_allocb_exit; 16376f45ec7bSml29623 16386f45ec7bSml29623 nxge_allocb_fail2: 16396f45ec7bSml29623 if (!nxge_mp->use_buf_pool) { 16406f45ec7bSml29623 KMEM_FREE(buffer, size); 16416f45ec7bSml29623 } 16426f45ec7bSml29623 16436f45ec7bSml29623 nxge_allocb_fail1: 16446f45ec7bSml29623 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 16456f45ec7bSml29623 nxge_mp = NULL; 16466f45ec7bSml29623 16476f45ec7bSml29623 nxge_allocb_exit: 16486f45ec7bSml29623 return (nxge_mp); 16496f45ec7bSml29623 } 16506f45ec7bSml29623 16516f45ec7bSml29623 p_mblk_t 16526f45ec7bSml29623 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16536f45ec7bSml29623 { 16546f45ec7bSml29623 p_mblk_t mp; 16556f45ec7bSml29623 16566f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 16576f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 16586f45ec7bSml29623 "offset = 0x%08X " 16596f45ec7bSml29623 "size = 0x%08X", 16606f45ec7bSml29623 nxge_mp, offset, size)); 16616f45ec7bSml29623 16626f45ec7bSml29623 mp = desballoc(&nxge_mp->buffer[offset], size, 16636f45ec7bSml29623 0, &nxge_mp->freeb); 16646f45ec7bSml29623 if (mp == NULL) { 16656f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16666f45ec7bSml29623 goto nxge_dupb_exit; 16676f45ec7bSml29623 } 16686f45ec7bSml29623 atomic_inc_32(&nxge_mp->ref_cnt); 16696f45ec7bSml29623 16706f45ec7bSml29623 16716f45ec7bSml29623 nxge_dupb_exit: 16726f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16736f45ec7bSml29623 nxge_mp)); 16746f45ec7bSml29623 return (mp); 16756f45ec7bSml29623 } 16766f45ec7bSml29623 16776f45ec7bSml29623 p_mblk_t 16786f45ec7bSml29623 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16796f45ec7bSml29623 { 16806f45ec7bSml29623 p_mblk_t mp; 16816f45ec7bSml29623 uchar_t *dp; 16826f45ec7bSml29623 16836f45ec7bSml29623 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 16846f45ec7bSml29623 if (mp == NULL) { 16856f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16866f45ec7bSml29623 goto nxge_dupb_bcopy_exit; 16876f45ec7bSml29623 } 16886f45ec7bSml29623 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 16896f45ec7bSml29623 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 16906f45ec7bSml29623 mp->b_wptr = dp + size; 16916f45ec7bSml29623 16926f45ec7bSml29623 nxge_dupb_bcopy_exit: 16936f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16946f45ec7bSml29623 nxge_mp)); 16956f45ec7bSml29623 return (mp); 16966f45ec7bSml29623 } 16976f45ec7bSml29623 16986f45ec7bSml29623 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 16996f45ec7bSml29623 p_rx_msg_t rx_msg_p); 17006f45ec7bSml29623 17016f45ec7bSml29623 void 17026f45ec7bSml29623 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 17036f45ec7bSml29623 { 17046f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 17056f45ec7bSml29623 17066f45ec7bSml29623 /* Reuse this buffer */ 17076f45ec7bSml29623 rx_msg_p->free = B_FALSE; 17086f45ec7bSml29623 rx_msg_p->cur_usage_cnt = 0; 17096f45ec7bSml29623 rx_msg_p->max_usage_cnt = 0; 17106f45ec7bSml29623 rx_msg_p->pkt_buf_size = 0; 17116f45ec7bSml29623 17126f45ec7bSml29623 if (rx_rbr_p->rbr_use_bcopy) { 17136f45ec7bSml29623 rx_msg_p->rx_use_bcopy = B_FALSE; 17146f45ec7bSml29623 atomic_dec_32(&rx_rbr_p->rbr_consumed); 17156f45ec7bSml29623 } 17166f45ec7bSml29623 17176f45ec7bSml29623 /* 17186f45ec7bSml29623 * Get the rbr header pointer and its offset index. 17196f45ec7bSml29623 */ 17206f45ec7bSml29623 MUTEX_ENTER(&rx_rbr_p->post_lock); 17216f45ec7bSml29623 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 17226f45ec7bSml29623 rx_rbr_p->rbr_wrap_mask); 17236f45ec7bSml29623 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 17246f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->post_lock); 172530ac2e7bSml29623 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 172630ac2e7bSml29623 rx_rbr_p->rdc, 1); 17276f45ec7bSml29623 17286f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 17296f45ec7bSml29623 "<== nxge_post_page (channel %d post_next_index %d)", 17306f45ec7bSml29623 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 17316f45ec7bSml29623 17326f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 17336f45ec7bSml29623 } 17346f45ec7bSml29623 17356f45ec7bSml29623 void 17366f45ec7bSml29623 nxge_freeb(p_rx_msg_t rx_msg_p) 17376f45ec7bSml29623 { 17386f45ec7bSml29623 size_t size; 17396f45ec7bSml29623 uchar_t *buffer = NULL; 17406f45ec7bSml29623 int ref_cnt; 1741958cea9eSml29623 boolean_t free_state = B_FALSE; 17426f45ec7bSml29623 1743007969e0Stm144005 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1744007969e0Stm144005 17456f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 17466f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 17476f45ec7bSml29623 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 17486f45ec7bSml29623 rx_msg_p, nxge_mblks_pending)); 17496f45ec7bSml29623 1750958cea9eSml29623 /* 1751958cea9eSml29623 * First we need to get the free state, then 1752958cea9eSml29623 * atomic decrement the reference count to prevent 1753958cea9eSml29623 * the race condition with the interrupt thread that 1754958cea9eSml29623 * is processing a loaned up buffer block. 1755958cea9eSml29623 */ 1756958cea9eSml29623 free_state = rx_msg_p->free; 1757958cea9eSml29623 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 17586f45ec7bSml29623 if (!ref_cnt) { 175930ac2e7bSml29623 atomic_dec_32(&nxge_mblks_pending); 17606f45ec7bSml29623 buffer = rx_msg_p->buffer; 17616f45ec7bSml29623 size = rx_msg_p->block_size; 17626f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 17636f45ec7bSml29623 "will free: rx_msg_p = $%p (block pending %d)", 176456d930aeSspeer rx_msg_p, nxge_mblks_pending)); 17656f45ec7bSml29623 17666f45ec7bSml29623 if (!rx_msg_p->use_buf_pool) { 17676f45ec7bSml29623 KMEM_FREE(buffer, size); 17686f45ec7bSml29623 } 17696f45ec7bSml29623 17706f45ec7bSml29623 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1771007969e0Stm144005 17723e82a89eSmisaki if (ring) { 17733e82a89eSmisaki /* 17743e82a89eSmisaki * Decrement the receive buffer ring's reference 17753e82a89eSmisaki * count, too. 17763e82a89eSmisaki */ 1777007969e0Stm144005 atomic_dec_32(&ring->rbr_ref_cnt); 1778007969e0Stm144005 1779007969e0Stm144005 /* 1780678453a8Sspeer * Free the receive buffer ring, if 1781007969e0Stm144005 * 1. all the receive buffers have been freed 1782007969e0Stm144005 * 2. and we are in the proper state (that is, 1783007969e0Stm144005 * we are not UNMAPPING). 1784007969e0Stm144005 */ 1785007969e0Stm144005 if (ring->rbr_ref_cnt == 0 && 1786007969e0Stm144005 ring->rbr_state == RBR_UNMAPPED) { 1787678453a8Sspeer /* 1788678453a8Sspeer * Free receive data buffers, 1789678453a8Sspeer * buffer index information 1790678453a8Sspeer * (rxring_info) and 1791678453a8Sspeer * the message block ring. 1792678453a8Sspeer */ 1793678453a8Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL, 1794678453a8Sspeer "nxge_freeb:rx_msg_p = $%p " 1795678453a8Sspeer "(block pending %d) free buffers", 1796678453a8Sspeer rx_msg_p, nxge_mblks_pending)); 1797678453a8Sspeer nxge_rxdma_databuf_free(ring); 1798678453a8Sspeer if (ring->ring_info) { 1799678453a8Sspeer KMEM_FREE(ring->ring_info, 1800678453a8Sspeer sizeof (rxring_info_t)); 1801678453a8Sspeer } 1802678453a8Sspeer 1803678453a8Sspeer if (ring->rx_msg_ring) { 1804678453a8Sspeer KMEM_FREE(ring->rx_msg_ring, 1805678453a8Sspeer ring->tnblocks * 1806678453a8Sspeer sizeof (p_rx_msg_t)); 1807678453a8Sspeer } 1808007969e0Stm144005 KMEM_FREE(ring, sizeof (*ring)); 1809007969e0Stm144005 } 18103e82a89eSmisaki } 18116f45ec7bSml29623 return; 18126f45ec7bSml29623 } 18136f45ec7bSml29623 18146f45ec7bSml29623 /* 18156f45ec7bSml29623 * Repost buffer. 18166f45ec7bSml29623 */ 18173e82a89eSmisaki if (free_state && (ref_cnt == 1) && ring) { 18186f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, 18196f45ec7bSml29623 "nxge_freeb: post page $%p:", rx_msg_p)); 1820007969e0Stm144005 if (ring->rbr_state == RBR_POSTING) 1821007969e0Stm144005 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 18226f45ec7bSml29623 } 18236f45ec7bSml29623 18246f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 18256f45ec7bSml29623 } 18266f45ec7bSml29623 18276f45ec7bSml29623 uint_t 18286f45ec7bSml29623 nxge_rx_intr(void *arg1, void *arg2) 18296f45ec7bSml29623 { 18306f45ec7bSml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 18316f45ec7bSml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 18326f45ec7bSml29623 p_nxge_ldg_t ldgp; 18336f45ec7bSml29623 uint8_t channel; 18346f45ec7bSml29623 npi_handle_t handle; 18356f45ec7bSml29623 rx_dma_ctl_stat_t cs; 1836da14cebeSEric Cheng p_rx_rcr_ring_t rcr_ring; 1837da14cebeSEric Cheng mblk_t *mp; 18386f45ec7bSml29623 18396f45ec7bSml29623 #ifdef NXGE_DEBUG 18406f45ec7bSml29623 rxdma_cfig1_t cfg; 18416f45ec7bSml29623 #endif 18426f45ec7bSml29623 18436f45ec7bSml29623 if (ldvp == NULL) { 18446f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 18456f45ec7bSml29623 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 18466f45ec7bSml29623 nxgep, ldvp)); 18476f45ec7bSml29623 18486f45ec7bSml29623 return (DDI_INTR_CLAIMED); 18496f45ec7bSml29623 } 18506f45ec7bSml29623 18516f45ec7bSml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 18526f45ec7bSml29623 nxgep = ldvp->nxgep; 18536f45ec7bSml29623 } 18541d36aa9eSspeer 18551d36aa9eSspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 18561d36aa9eSspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 18571d36aa9eSspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 18581d36aa9eSspeer "<== nxge_rx_intr: interface not started or intialized")); 18591d36aa9eSspeer return (DDI_INTR_CLAIMED); 18601d36aa9eSspeer } 18611d36aa9eSspeer 18626f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18636f45ec7bSml29623 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 18646f45ec7bSml29623 nxgep, ldvp)); 18656f45ec7bSml29623 18666f45ec7bSml29623 /* 18676f45ec7bSml29623 * This interrupt handler is for a specific 18686f45ec7bSml29623 * receive dma channel. 18696f45ec7bSml29623 */ 18706f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1871da14cebeSEric Cheng 1872da14cebeSEric Cheng rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1873da14cebeSEric Cheng 1874da14cebeSEric Cheng /* 1875da14cebeSEric Cheng * The RCR ring lock must be held when packets 1876da14cebeSEric Cheng * are being processed and the hardware registers are 1877da14cebeSEric Cheng * being read or written to prevent race condition 1878da14cebeSEric Cheng * among the interrupt thread, the polling thread 1879da14cebeSEric Cheng * (will cause fatal errors such as rcrincon bit set) 1880da14cebeSEric Cheng * and the setting of the poll_flag. 1881da14cebeSEric Cheng */ 1882da14cebeSEric Cheng MUTEX_ENTER(&rcr_ring->lock); 1883da14cebeSEric Cheng 18846f45ec7bSml29623 /* 18856f45ec7bSml29623 * Get the control and status for this channel. 18866f45ec7bSml29623 */ 18876f45ec7bSml29623 channel = ldvp->channel; 18886f45ec7bSml29623 ldgp = ldvp->ldgp; 1889da14cebeSEric Cheng 1890da14cebeSEric Cheng if (!isLDOMguest(nxgep)) { 1891da14cebeSEric Cheng if (!nxgep->rx_channel_started[channel]) { 1892da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, INT_CTL, 1893da14cebeSEric Cheng "<== nxge_rx_intr: channel is not started")); 1894da14cebeSEric Cheng MUTEX_EXIT(&rcr_ring->lock); 1895da14cebeSEric Cheng return (DDI_INTR_CLAIMED); 1896da14cebeSEric Cheng } 1897da14cebeSEric Cheng } 1898da14cebeSEric Cheng 1899da14cebeSEric Cheng ASSERT(rcr_ring->ldgp == ldgp); 1900da14cebeSEric Cheng ASSERT(rcr_ring->ldvp == ldvp); 1901da14cebeSEric Cheng 19026f45ec7bSml29623 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 19036f45ec7bSml29623 19046f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 19056f45ec7bSml29623 "cs 0x%016llx rcrto 0x%x rcrthres %x", 19066f45ec7bSml29623 channel, 19076f45ec7bSml29623 cs.value, 19086f45ec7bSml29623 cs.bits.hdw.rcrto, 19096f45ec7bSml29623 cs.bits.hdw.rcrthres)); 19106f45ec7bSml29623 1911da14cebeSEric Cheng mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 19126f45ec7bSml29623 19136f45ec7bSml29623 /* error events. */ 19146f45ec7bSml29623 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1915678453a8Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs); 19166f45ec7bSml29623 } 19176f45ec7bSml29623 19186f45ec7bSml29623 /* 19196f45ec7bSml29623 * Enable the mailbox update interrupt if we want 19206f45ec7bSml29623 * to use mailbox. We probably don't need to use 19216f45ec7bSml29623 * mailbox as it only saves us one pio read. 19226f45ec7bSml29623 * Also write 1 to rcrthres and rcrto to clear 19236f45ec7bSml29623 * these two edge triggered bits. 19246f45ec7bSml29623 */ 19256f45ec7bSml29623 cs.value &= RX_DMA_CTL_STAT_WR1C; 1926da14cebeSEric Cheng cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 19276f45ec7bSml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 19286f45ec7bSml29623 cs.value); 19296f45ec7bSml29623 19306f45ec7bSml29623 /* 1931da14cebeSEric Cheng * If the polling mode is enabled, disable the interrupt. 1932da14cebeSEric Cheng */ 1933da14cebeSEric Cheng if (rcr_ring->poll_flag) { 1934da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1935da14cebeSEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1936da14cebeSEric Cheng "(disabling interrupts)", channel, ldgp, ldvp)); 1937da14cebeSEric Cheng /* 1938da14cebeSEric Cheng * Disarm this logical group if this is a single device 19396f45ec7bSml29623 * group. 19406f45ec7bSml29623 */ 19416f45ec7bSml29623 if (ldgp->nldvs == 1) { 19426f45ec7bSml29623 ldgimgm_t mgm; 19436f45ec7bSml29623 mgm.value = 0; 1944da14cebeSEric Cheng mgm.bits.ldw.arm = 0; 1945da14cebeSEric Cheng NXGE_REG_WR64(handle, 1946da14cebeSEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 1947da14cebeSEric Cheng } 1948da14cebeSEric Cheng } else { 1949da14cebeSEric Cheng /* 195008ac1c49SNicolas Droux * Rearm this logical group if this is a single device 195108ac1c49SNicolas Droux * group. 1952da14cebeSEric Cheng */ 1953da14cebeSEric Cheng if (ldgp->nldvs == 1) { 1954678453a8Sspeer if (isLDOMguest(nxgep)) { 1955678453a8Sspeer nxge_hio_ldgimgn(nxgep, ldgp); 1956678453a8Sspeer } else { 1957da14cebeSEric Cheng ldgimgm_t mgm; 1958da14cebeSEric Cheng 1959da14cebeSEric Cheng mgm.value = 0; 1960da14cebeSEric Cheng mgm.bits.ldw.arm = 1; 1961da14cebeSEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 1962da14cebeSEric Cheng 19636f45ec7bSml29623 NXGE_REG_WR64(handle, 19646f45ec7bSml29623 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 19656f45ec7bSml29623 mgm.value); 19666f45ec7bSml29623 } 1967678453a8Sspeer } 19686f45ec7bSml29623 1969da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1970da14cebeSEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p " 1971da14cebeSEric Cheng "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1972da14cebeSEric Cheng } 1973da14cebeSEric Cheng MUTEX_EXIT(&rcr_ring->lock); 1974da14cebeSEric Cheng 1975da14cebeSEric Cheng if (mp) { 1976da14cebeSEric Cheng if (!isLDOMguest(nxgep)) 1977da14cebeSEric Cheng mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 1978da14cebeSEric Cheng rcr_ring->rcr_gen_num); 1979da14cebeSEric Cheng #if defined(sun4v) 1980da14cebeSEric Cheng else { /* isLDOMguest(nxgep) */ 1981da14cebeSEric Cheng nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1982da14cebeSEric Cheng nxgep->nxge_hw_p->hio; 1983da14cebeSEric Cheng nx_vio_fp_t *vio = &nhd->hio.vio; 1984da14cebeSEric Cheng 1985da14cebeSEric Cheng if (vio->cb.vio_net_rx_cb) { 1986da14cebeSEric Cheng (*vio->cb.vio_net_rx_cb) 1987da14cebeSEric Cheng (nxgep->hio_vr->vhp, mp); 1988da14cebeSEric Cheng } 1989da14cebeSEric Cheng } 1990da14cebeSEric Cheng #endif 1991da14cebeSEric Cheng } 1992da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1993da14cebeSEric Cheng return (DDI_INTR_CLAIMED); 19946f45ec7bSml29623 } 19956f45ec7bSml29623 19966f45ec7bSml29623 /* 19976f45ec7bSml29623 * Process the packets received in the specified logical device 19986f45ec7bSml29623 * and pass up a chain of message blocks to the upper layer. 1999da14cebeSEric Cheng * The RCR ring lock must be held before calling this function. 20006f45ec7bSml29623 */ 2001da14cebeSEric Cheng static mblk_t * 2002678453a8Sspeer nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 20036f45ec7bSml29623 { 20046f45ec7bSml29623 p_mblk_t mp; 20056f45ec7bSml29623 p_rx_rcr_ring_t rcrp; 20066f45ec7bSml29623 20076f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 2008678453a8Sspeer rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 2009678453a8Sspeer 2010da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2011da14cebeSEric Cheng "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 2012da14cebeSEric Cheng "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 2013678453a8Sspeer if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 20146f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20156f45ec7bSml29623 "<== nxge_rx_pkts_vring: no mp")); 2016da14cebeSEric Cheng return (NULL); 20176f45ec7bSml29623 } 20186f45ec7bSml29623 20196f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 20206f45ec7bSml29623 mp)); 20216f45ec7bSml29623 20226f45ec7bSml29623 #ifdef NXGE_DEBUG 20236f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20246f45ec7bSml29623 "==> nxge_rx_pkts_vring:calling mac_rx " 20256f45ec7bSml29623 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 20266f45ec7bSml29623 "mac_handle $%p", 20276f45ec7bSml29623 mp->b_wptr - mp->b_rptr, 20286f45ec7bSml29623 mp, mp->b_cont, mp->b_next, 20296f45ec7bSml29623 rcrp, rcrp->rcr_mac_handle)); 20306f45ec7bSml29623 20316f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20326f45ec7bSml29623 "==> nxge_rx_pkts_vring: dump packets " 20336f45ec7bSml29623 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 20346f45ec7bSml29623 mp, 20356f45ec7bSml29623 mp->b_rptr, 20366f45ec7bSml29623 mp->b_wptr, 20376f45ec7bSml29623 nxge_dump_packet((char *)mp->b_rptr, 20386f45ec7bSml29623 mp->b_wptr - mp->b_rptr))); 20396f45ec7bSml29623 if (mp->b_cont) { 20406f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20416f45ec7bSml29623 "==> nxge_rx_pkts_vring: dump b_cont packets " 20426f45ec7bSml29623 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 20436f45ec7bSml29623 mp->b_cont, 20446f45ec7bSml29623 mp->b_cont->b_rptr, 20456f45ec7bSml29623 mp->b_cont->b_wptr, 20466f45ec7bSml29623 nxge_dump_packet((char *)mp->b_cont->b_rptr, 20476f45ec7bSml29623 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 20486f45ec7bSml29623 } 20496f45ec7bSml29623 if (mp->b_next) { 20506f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20516f45ec7bSml29623 "==> nxge_rx_pkts_vring: dump next packets " 20526f45ec7bSml29623 "(b_rptr $%p): %s", 20536f45ec7bSml29623 mp->b_next->b_rptr, 20546f45ec7bSml29623 nxge_dump_packet((char *)mp->b_next->b_rptr, 20556f45ec7bSml29623 mp->b_next->b_wptr - mp->b_next->b_rptr))); 20566f45ec7bSml29623 } 20576f45ec7bSml29623 #endif 2058da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2059da14cebeSEric Cheng "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 2060da14cebeSEric Cheng rcrp->rdc, rcrp->rcr_mac_handle)); 20616f45ec7bSml29623 2062da14cebeSEric Cheng return (mp); 20636f45ec7bSml29623 } 20646f45ec7bSml29623 20656f45ec7bSml29623 20666f45ec7bSml29623 /* 20676f45ec7bSml29623 * This routine is the main packet receive processing function. 20686f45ec7bSml29623 * It gets the packet type, error code, and buffer related 20696f45ec7bSml29623 * information from the receive completion entry. 20706f45ec7bSml29623 * How many completion entries to process is based on the number of packets 20716f45ec7bSml29623 * queued by the hardware, a hardware maintained tail pointer 20726f45ec7bSml29623 * and a configurable receive packet count. 20736f45ec7bSml29623 * 20746f45ec7bSml29623 * A chain of message blocks will be created as result of processing 20756f45ec7bSml29623 * the completion entries. This chain of message blocks will be returned and 20766f45ec7bSml29623 * a hardware control status register will be updated with the number of 20776f45ec7bSml29623 * packets were removed from the hardware queue. 20786f45ec7bSml29623 * 2079da14cebeSEric Cheng * The RCR ring lock is held when entering this function. 20806f45ec7bSml29623 */ 2081678453a8Sspeer static mblk_t * 2082678453a8Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 2083678453a8Sspeer int bytes_to_pickup) 20846f45ec7bSml29623 { 20856f45ec7bSml29623 npi_handle_t handle; 20866f45ec7bSml29623 uint8_t channel; 20876f45ec7bSml29623 uint32_t comp_rd_index; 20886f45ec7bSml29623 p_rcr_entry_t rcr_desc_rd_head_p; 20896f45ec7bSml29623 p_rcr_entry_t rcr_desc_rd_head_pp; 20906f45ec7bSml29623 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 20916f45ec7bSml29623 uint16_t qlen, nrcr_read, npkt_read; 20926f45ec7bSml29623 uint32_t qlen_hw; 20936f45ec7bSml29623 boolean_t multi; 20946f45ec7bSml29623 rcrcfig_b_t rcr_cfg_b; 2095678453a8Sspeer int totallen = 0; 20966f45ec7bSml29623 #if defined(_BIG_ENDIAN) 20976f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 20986f45ec7bSml29623 #endif 20996f45ec7bSml29623 2100da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 2101678453a8Sspeer "channel %d", rcr_p->rdc)); 21026f45ec7bSml29623 21036f45ec7bSml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 21046f45ec7bSml29623 return (NULL); 21056f45ec7bSml29623 } 21066f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 21076f45ec7bSml29623 channel = rcr_p->rdc; 21086f45ec7bSml29623 21096f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21106f45ec7bSml29623 "==> nxge_rx_pkts: START: rcr channel %d " 21116f45ec7bSml29623 "head_p $%p head_pp $%p index %d ", 21126f45ec7bSml29623 channel, rcr_p->rcr_desc_rd_head_p, 21136f45ec7bSml29623 rcr_p->rcr_desc_rd_head_pp, 21146f45ec7bSml29623 rcr_p->comp_rd_index)); 21156f45ec7bSml29623 21166f45ec7bSml29623 21176f45ec7bSml29623 #if !defined(_BIG_ENDIAN) 21186f45ec7bSml29623 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 21196f45ec7bSml29623 #else 21206f45ec7bSml29623 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 21216f45ec7bSml29623 if (rs != NPI_SUCCESS) { 2122678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 21236f45ec7bSml29623 "channel %d, get qlen failed 0x%08x", 2124678453a8Sspeer channel, rs)); 21256f45ec7bSml29623 return (NULL); 21266f45ec7bSml29623 } 21276f45ec7bSml29623 #endif 21286f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 21296f45ec7bSml29623 "qlen %d", channel, qlen)); 21306f45ec7bSml29623 21316f45ec7bSml29623 21326f45ec7bSml29623 21336f45ec7bSml29623 if (!qlen) { 2134da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 21356f45ec7bSml29623 "==> nxge_rx_pkts:rcr channel %d " 21366f45ec7bSml29623 "qlen %d (no pkts)", channel, qlen)); 21376f45ec7bSml29623 21386f45ec7bSml29623 return (NULL); 21396f45ec7bSml29623 } 21406f45ec7bSml29623 21416f45ec7bSml29623 comp_rd_index = rcr_p->comp_rd_index; 21426f45ec7bSml29623 21436f45ec7bSml29623 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 21446f45ec7bSml29623 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 21456f45ec7bSml29623 nrcr_read = npkt_read = 0; 21466f45ec7bSml29623 21476f45ec7bSml29623 /* 21486f45ec7bSml29623 * Number of packets queued 21496f45ec7bSml29623 * (The jumbo or multi packet will be counted as only one 21506f45ec7bSml29623 * packets and it may take up more than one completion entry). 21516f45ec7bSml29623 */ 21526f45ec7bSml29623 qlen_hw = (qlen < nxge_max_rx_pkts) ? 21536f45ec7bSml29623 qlen : nxge_max_rx_pkts; 21546f45ec7bSml29623 head_mp = NULL; 21556f45ec7bSml29623 tail_mp = &head_mp; 21566f45ec7bSml29623 nmp = mp_cont = NULL; 21576f45ec7bSml29623 multi = B_FALSE; 21586f45ec7bSml29623 21596f45ec7bSml29623 while (qlen_hw) { 21606f45ec7bSml29623 21616f45ec7bSml29623 #ifdef NXGE_DEBUG 21626f45ec7bSml29623 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 21636f45ec7bSml29623 #endif 21646f45ec7bSml29623 /* 21656f45ec7bSml29623 * Process one completion ring entry. 21666f45ec7bSml29623 */ 21676f45ec7bSml29623 nxge_receive_packet(nxgep, 21686f45ec7bSml29623 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 21696f45ec7bSml29623 21706f45ec7bSml29623 /* 21716f45ec7bSml29623 * message chaining modes 21726f45ec7bSml29623 */ 21736f45ec7bSml29623 if (nmp) { 21746f45ec7bSml29623 nmp->b_next = NULL; 21756f45ec7bSml29623 if (!multi && !mp_cont) { /* frame fits a partition */ 21766f45ec7bSml29623 *tail_mp = nmp; 21776f45ec7bSml29623 tail_mp = &nmp->b_next; 2178678453a8Sspeer totallen += MBLKL(nmp); 21796f45ec7bSml29623 nmp = NULL; 21806f45ec7bSml29623 } else if (multi && !mp_cont) { /* first segment */ 21816f45ec7bSml29623 *tail_mp = nmp; 21826f45ec7bSml29623 tail_mp = &nmp->b_cont; 2183678453a8Sspeer totallen += MBLKL(nmp); 21846f45ec7bSml29623 } else if (multi && mp_cont) { /* mid of multi segs */ 21856f45ec7bSml29623 *tail_mp = mp_cont; 21866f45ec7bSml29623 tail_mp = &mp_cont->b_cont; 2187678453a8Sspeer totallen += MBLKL(mp_cont); 21886f45ec7bSml29623 } else if (!multi && mp_cont) { /* last segment */ 21896f45ec7bSml29623 *tail_mp = mp_cont; 21906f45ec7bSml29623 tail_mp = &nmp->b_next; 2191678453a8Sspeer totallen += MBLKL(mp_cont); 21926f45ec7bSml29623 nmp = NULL; 21936f45ec7bSml29623 } 21946f45ec7bSml29623 } 21956f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21966f45ec7bSml29623 "==> nxge_rx_pkts: loop: rcr channel %d " 21976f45ec7bSml29623 "before updating: multi %d " 21986f45ec7bSml29623 "nrcr_read %d " 21996f45ec7bSml29623 "npk read %d " 22006f45ec7bSml29623 "head_pp $%p index %d ", 22016f45ec7bSml29623 channel, 22026f45ec7bSml29623 multi, 22036f45ec7bSml29623 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 22046f45ec7bSml29623 comp_rd_index)); 22056f45ec7bSml29623 22066f45ec7bSml29623 if (!multi) { 22076f45ec7bSml29623 qlen_hw--; 22086f45ec7bSml29623 npkt_read++; 22096f45ec7bSml29623 } 22106f45ec7bSml29623 22116f45ec7bSml29623 /* 22126f45ec7bSml29623 * Update the next read entry. 22136f45ec7bSml29623 */ 22146f45ec7bSml29623 comp_rd_index = NEXT_ENTRY(comp_rd_index, 22156f45ec7bSml29623 rcr_p->comp_wrap_mask); 22166f45ec7bSml29623 22176f45ec7bSml29623 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 22186f45ec7bSml29623 rcr_p->rcr_desc_first_p, 22196f45ec7bSml29623 rcr_p->rcr_desc_last_p); 22206f45ec7bSml29623 22216f45ec7bSml29623 nrcr_read++; 22226f45ec7bSml29623 22236f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22246f45ec7bSml29623 "<== nxge_rx_pkts: (SAM, process one packet) " 22256f45ec7bSml29623 "nrcr_read %d", 22266f45ec7bSml29623 nrcr_read)); 22276f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22286f45ec7bSml29623 "==> nxge_rx_pkts: loop: rcr channel %d " 22296f45ec7bSml29623 "multi %d " 22306f45ec7bSml29623 "nrcr_read %d " 22316f45ec7bSml29623 "npk read %d " 22326f45ec7bSml29623 "head_pp $%p index %d ", 22336f45ec7bSml29623 channel, 22346f45ec7bSml29623 multi, 22356f45ec7bSml29623 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 22366f45ec7bSml29623 comp_rd_index)); 22376f45ec7bSml29623 2238678453a8Sspeer if ((bytes_to_pickup != -1) && 2239678453a8Sspeer (totallen >= bytes_to_pickup)) { 2240678453a8Sspeer break; 2241678453a8Sspeer } 2242da14cebeSEric Cheng 2243da14cebeSEric Cheng /* limit the number of packets for interrupt */ 2244da14cebeSEric Cheng if (!(rcr_p->poll_flag)) { 2245da14cebeSEric Cheng if (npkt_read == nxge_max_intr_pkts) { 2246da14cebeSEric Cheng break; 2247da14cebeSEric Cheng } 2248da14cebeSEric Cheng } 22496f45ec7bSml29623 } 22506f45ec7bSml29623 22516f45ec7bSml29623 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 22526f45ec7bSml29623 rcr_p->comp_rd_index = comp_rd_index; 22536f45ec7bSml29623 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 22546f45ec7bSml29623 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 22556f45ec7bSml29623 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2256*7b26d9ffSSantwona Behera 2257*7b26d9ffSSantwona Behera rcr_p->intr_timeout = (nxgep->intr_timeout < 2258*7b26d9ffSSantwona Behera NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2259*7b26d9ffSSantwona Behera nxgep->intr_timeout; 2260*7b26d9ffSSantwona Behera 2261*7b26d9ffSSantwona Behera rcr_p->intr_threshold = (nxgep->intr_threshold < 2262*7b26d9ffSSantwona Behera NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2263*7b26d9ffSSantwona Behera nxgep->intr_threshold; 2264*7b26d9ffSSantwona Behera 22656f45ec7bSml29623 rcr_cfg_b.value = 0x0ULL; 22666f45ec7bSml29623 rcr_cfg_b.bits.ldw.entout = 1; 22676f45ec7bSml29623 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 22686f45ec7bSml29623 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2269*7b26d9ffSSantwona Behera 22706f45ec7bSml29623 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 22716f45ec7bSml29623 channel, rcr_cfg_b.value); 22726f45ec7bSml29623 } 22736f45ec7bSml29623 22746f45ec7bSml29623 cs.bits.ldw.pktread = npkt_read; 22756f45ec7bSml29623 cs.bits.ldw.ptrread = nrcr_read; 22766f45ec7bSml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 22776f45ec7bSml29623 channel, cs.value); 22786f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22796f45ec7bSml29623 "==> nxge_rx_pkts: EXIT: rcr channel %d " 22806f45ec7bSml29623 "head_pp $%p index %016llx ", 22816f45ec7bSml29623 channel, 22826f45ec7bSml29623 rcr_p->rcr_desc_rd_head_pp, 22836f45ec7bSml29623 rcr_p->comp_rd_index)); 22846f45ec7bSml29623 /* 22856f45ec7bSml29623 * Update RCR buffer pointer read and number of packets 22866f45ec7bSml29623 * read. 22876f45ec7bSml29623 */ 22886f45ec7bSml29623 2289da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2290da14cebeSEric Cheng "channel %d", rcr_p->rdc)); 2291da14cebeSEric Cheng 22926f45ec7bSml29623 return (head_mp); 22936f45ec7bSml29623 } 22946f45ec7bSml29623 22956f45ec7bSml29623 void 22966f45ec7bSml29623 nxge_receive_packet(p_nxge_t nxgep, 22976f45ec7bSml29623 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 22986f45ec7bSml29623 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 22996f45ec7bSml29623 { 23006f45ec7bSml29623 p_mblk_t nmp = NULL; 23016f45ec7bSml29623 uint64_t multi; 23026f45ec7bSml29623 uint64_t dcf_err; 23036f45ec7bSml29623 uint8_t channel; 23046f45ec7bSml29623 23056f45ec7bSml29623 boolean_t first_entry = B_TRUE; 23066f45ec7bSml29623 boolean_t is_tcp_udp = B_FALSE; 23076f45ec7bSml29623 boolean_t buffer_free = B_FALSE; 23086f45ec7bSml29623 boolean_t error_send_up = B_FALSE; 23096f45ec7bSml29623 uint8_t error_type; 23106f45ec7bSml29623 uint16_t l2_len; 23116f45ec7bSml29623 uint16_t skip_len; 23126f45ec7bSml29623 uint8_t pktbufsz_type; 23136f45ec7bSml29623 uint64_t rcr_entry; 23146f45ec7bSml29623 uint64_t *pkt_buf_addr_pp; 23156f45ec7bSml29623 uint64_t *pkt_buf_addr_p; 23166f45ec7bSml29623 uint32_t buf_offset; 23176f45ec7bSml29623 uint32_t bsize; 23186f45ec7bSml29623 uint32_t error_disp_cnt; 23196f45ec7bSml29623 uint32_t msg_index; 23206f45ec7bSml29623 p_rx_rbr_ring_t rx_rbr_p; 23216f45ec7bSml29623 p_rx_msg_t *rx_msg_ring_p; 23226f45ec7bSml29623 p_rx_msg_t rx_msg_p; 23236f45ec7bSml29623 uint16_t sw_offset_bytes = 0, hdr_size = 0; 23246f45ec7bSml29623 nxge_status_t status = NXGE_OK; 23256f45ec7bSml29623 boolean_t is_valid = B_FALSE; 23266f45ec7bSml29623 p_nxge_rx_ring_stats_t rdc_stats; 23276f45ec7bSml29623 uint32_t bytes_read; 23286f45ec7bSml29623 uint64_t pkt_type; 23296f45ec7bSml29623 uint64_t frag; 23304202ea4bSsbehera boolean_t pkt_too_long_err = B_FALSE; 23316f45ec7bSml29623 #ifdef NXGE_DEBUG 23326f45ec7bSml29623 int dump_len; 23336f45ec7bSml29623 #endif 23346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 23356f45ec7bSml29623 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 23366f45ec7bSml29623 23376f45ec7bSml29623 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 23386f45ec7bSml29623 23396f45ec7bSml29623 multi = (rcr_entry & RCR_MULTI_MASK); 23406f45ec7bSml29623 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 23416f45ec7bSml29623 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 23426f45ec7bSml29623 23436f45ec7bSml29623 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 23446f45ec7bSml29623 frag = (rcr_entry & RCR_FRAG_MASK); 23456f45ec7bSml29623 23466f45ec7bSml29623 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 23476f45ec7bSml29623 23486f45ec7bSml29623 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 23496f45ec7bSml29623 RCR_PKTBUFSZ_SHIFT); 2350adfcba55Sjoycey #if defined(__i386) 2351adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2352adfcba55Sjoycey RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2353adfcba55Sjoycey #else 23546f45ec7bSml29623 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 23556f45ec7bSml29623 RCR_PKT_BUF_ADDR_SHIFT); 2356adfcba55Sjoycey #endif 23576f45ec7bSml29623 23586f45ec7bSml29623 channel = rcr_p->rdc; 23596f45ec7bSml29623 23606f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23616f45ec7bSml29623 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 23626f45ec7bSml29623 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 23636f45ec7bSml29623 "error_type 0x%x pkt_type 0x%x " 23646f45ec7bSml29623 "pktbufsz_type %d ", 23656f45ec7bSml29623 rcr_desc_rd_head_p, 23666f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len, 23676f45ec7bSml29623 multi, 23686f45ec7bSml29623 error_type, 23696f45ec7bSml29623 pkt_type, 23706f45ec7bSml29623 pktbufsz_type)); 23716f45ec7bSml29623 23726f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23736f45ec7bSml29623 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 23746f45ec7bSml29623 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 23756f45ec7bSml29623 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 23766f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len, 23776f45ec7bSml29623 multi, 23786f45ec7bSml29623 error_type, 23796f45ec7bSml29623 pkt_type)); 23806f45ec7bSml29623 23816f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23826f45ec7bSml29623 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 23836f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 23846f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 23856f45ec7bSml29623 23866f45ec7bSml29623 /* get the stats ptr */ 23876f45ec7bSml29623 rdc_stats = rcr_p->rdc_stats; 23886f45ec7bSml29623 23896f45ec7bSml29623 if (!l2_len) { 23906f45ec7bSml29623 23916f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23926f45ec7bSml29623 "<== nxge_receive_packet: failed: l2 length is 0.")); 23936f45ec7bSml29623 return; 23946f45ec7bSml29623 } 23956f45ec7bSml29623 23964202ea4bSsbehera /* 2397da14cebeSEric Cheng * Software workaround for BMAC hardware limitation that allows 23984202ea4bSsbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 23994202ea4bSsbehera * instead of 0x2400 for jumbo. 24004202ea4bSsbehera */ 24014202ea4bSsbehera if (l2_len > nxgep->mac.maxframesize) { 24024202ea4bSsbehera pkt_too_long_err = B_TRUE; 24034202ea4bSsbehera } 24044202ea4bSsbehera 240556d930aeSspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 240656d930aeSspeer l2_len -= ETHERFCSL; 240756d930aeSspeer 24086f45ec7bSml29623 /* shift 6 bits to get the full io address */ 2409adfcba55Sjoycey #if defined(__i386) 2410adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2411adfcba55Sjoycey RCR_PKT_BUF_ADDR_SHIFT_FULL); 2412adfcba55Sjoycey #else 24136f45ec7bSml29623 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 24146f45ec7bSml29623 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2415adfcba55Sjoycey #endif 24166f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24176f45ec7bSml29623 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 24186f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 24196f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 24206f45ec7bSml29623 24216f45ec7bSml29623 rx_rbr_p = rcr_p->rx_rbr_p; 24226f45ec7bSml29623 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 24236f45ec7bSml29623 24246f45ec7bSml29623 if (first_entry) { 24256f45ec7bSml29623 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 24266f45ec7bSml29623 RXDMA_HDR_SIZE_DEFAULT); 24276f45ec7bSml29623 24286f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24296f45ec7bSml29623 "==> nxge_receive_packet: first entry 0x%016llx " 24306f45ec7bSml29623 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 24316f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len, 24326f45ec7bSml29623 hdr_size)); 24336f45ec7bSml29623 } 24346f45ec7bSml29623 24356f45ec7bSml29623 MUTEX_ENTER(&rx_rbr_p->lock); 24366f45ec7bSml29623 24376f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24386f45ec7bSml29623 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 24396f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 24406f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 24416f45ec7bSml29623 24426f45ec7bSml29623 /* 24436f45ec7bSml29623 * Packet buffer address in the completion entry points 24446f45ec7bSml29623 * to the starting buffer address (offset 0). 24456f45ec7bSml29623 * Use the starting buffer address to locate the corresponding 24466f45ec7bSml29623 * kernel address. 24476f45ec7bSml29623 */ 24486f45ec7bSml29623 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 24496f45ec7bSml29623 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 24506f45ec7bSml29623 &buf_offset, 24516f45ec7bSml29623 &msg_index); 24526f45ec7bSml29623 24536f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24546f45ec7bSml29623 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 24556f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 24566f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 24576f45ec7bSml29623 24586f45ec7bSml29623 if (status != NXGE_OK) { 24596f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->lock); 24606f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24616f45ec7bSml29623 "<== nxge_receive_packet: found vaddr failed %d", 24626f45ec7bSml29623 status)); 24636f45ec7bSml29623 return; 24646f45ec7bSml29623 } 24656f45ec7bSml29623 24666f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24676f45ec7bSml29623 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 24686f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 24696f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 24706f45ec7bSml29623 24716f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24726f45ec7bSml29623 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 24736f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 24746f45ec7bSml29623 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 24756f45ec7bSml29623 24766f45ec7bSml29623 rx_msg_p = rx_msg_ring_p[msg_index]; 24776f45ec7bSml29623 24786f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24796f45ec7bSml29623 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 24806f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 24816f45ec7bSml29623 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 24826f45ec7bSml29623 24836f45ec7bSml29623 switch (pktbufsz_type) { 24846f45ec7bSml29623 case RCR_PKTBUFSZ_0: 24856f45ec7bSml29623 bsize = rx_rbr_p->pkt_buf_size0_bytes; 24866f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24876f45ec7bSml29623 "==> nxge_receive_packet: 0 buf %d", bsize)); 24886f45ec7bSml29623 break; 24896f45ec7bSml29623 case RCR_PKTBUFSZ_1: 24906f45ec7bSml29623 bsize = rx_rbr_p->pkt_buf_size1_bytes; 24916f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24926f45ec7bSml29623 "==> nxge_receive_packet: 1 buf %d", bsize)); 24936f45ec7bSml29623 break; 24946f45ec7bSml29623 case RCR_PKTBUFSZ_2: 24956f45ec7bSml29623 bsize = rx_rbr_p->pkt_buf_size2_bytes; 24966f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24976f45ec7bSml29623 "==> nxge_receive_packet: 2 buf %d", bsize)); 24986f45ec7bSml29623 break; 24996f45ec7bSml29623 case RCR_SINGLE_BLOCK: 25006f45ec7bSml29623 bsize = rx_msg_p->block_size; 25016f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25026f45ec7bSml29623 "==> nxge_receive_packet: single %d", bsize)); 25036f45ec7bSml29623 25046f45ec7bSml29623 break; 25056f45ec7bSml29623 default: 25066f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->lock); 25076f45ec7bSml29623 return; 25086f45ec7bSml29623 } 25096f45ec7bSml29623 25106f45ec7bSml29623 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 25116f45ec7bSml29623 (buf_offset + sw_offset_bytes), 25126f45ec7bSml29623 (hdr_size + l2_len), 25136f45ec7bSml29623 DDI_DMA_SYNC_FORCPU); 25146f45ec7bSml29623 25156f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25166f45ec7bSml29623 "==> nxge_receive_packet: after first dump:usage count")); 25176f45ec7bSml29623 25186f45ec7bSml29623 if (rx_msg_p->cur_usage_cnt == 0) { 25196f45ec7bSml29623 if (rx_rbr_p->rbr_use_bcopy) { 25206f45ec7bSml29623 atomic_inc_32(&rx_rbr_p->rbr_consumed); 25216f45ec7bSml29623 if (rx_rbr_p->rbr_consumed < 25226f45ec7bSml29623 rx_rbr_p->rbr_threshold_hi) { 25236f45ec7bSml29623 if (rx_rbr_p->rbr_threshold_lo == 0 || 25246f45ec7bSml29623 ((rx_rbr_p->rbr_consumed >= 25256f45ec7bSml29623 rx_rbr_p->rbr_threshold_lo) && 25266f45ec7bSml29623 (rx_rbr_p->rbr_bufsize_type >= 25276f45ec7bSml29623 pktbufsz_type))) { 25286f45ec7bSml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 25296f45ec7bSml29623 } 25306f45ec7bSml29623 } else { 25316f45ec7bSml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 25326f45ec7bSml29623 } 25336f45ec7bSml29623 } 25346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25356f45ec7bSml29623 "==> nxge_receive_packet: buf %d (new block) ", 25366f45ec7bSml29623 bsize)); 25376f45ec7bSml29623 25386f45ec7bSml29623 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 25396f45ec7bSml29623 rx_msg_p->pkt_buf_size = bsize; 25406f45ec7bSml29623 rx_msg_p->cur_usage_cnt = 1; 25416f45ec7bSml29623 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 25426f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25436f45ec7bSml29623 "==> nxge_receive_packet: buf %d " 25446f45ec7bSml29623 "(single block) ", 25456f45ec7bSml29623 bsize)); 25466f45ec7bSml29623 /* 25476f45ec7bSml29623 * Buffer can be reused once the free function 25486f45ec7bSml29623 * is called. 25496f45ec7bSml29623 */ 25506f45ec7bSml29623 rx_msg_p->max_usage_cnt = 1; 25516f45ec7bSml29623 buffer_free = B_TRUE; 25526f45ec7bSml29623 } else { 25536f45ec7bSml29623 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 25546f45ec7bSml29623 if (rx_msg_p->max_usage_cnt == 1) { 25556f45ec7bSml29623 buffer_free = B_TRUE; 25566f45ec7bSml29623 } 25576f45ec7bSml29623 } 25586f45ec7bSml29623 } else { 25596f45ec7bSml29623 rx_msg_p->cur_usage_cnt++; 25606f45ec7bSml29623 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 25616f45ec7bSml29623 buffer_free = B_TRUE; 25626f45ec7bSml29623 } 25636f45ec7bSml29623 } 25646f45ec7bSml29623 25656f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25666f45ec7bSml29623 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 25676f45ec7bSml29623 msg_index, l2_len, 25686f45ec7bSml29623 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 25696f45ec7bSml29623 25704202ea4bSsbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 25716f45ec7bSml29623 rdc_stats->ierrors++; 25726f45ec7bSml29623 if (dcf_err) { 25736f45ec7bSml29623 rdc_stats->dcf_err++; 25746f45ec7bSml29623 #ifdef NXGE_DEBUG 25756f45ec7bSml29623 if (!rdc_stats->dcf_err) { 25766f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25776f45ec7bSml29623 "nxge_receive_packet: channel %d dcf_err rcr" 25786f45ec7bSml29623 " 0x%llx", channel, rcr_entry)); 25796f45ec7bSml29623 } 25806f45ec7bSml29623 #endif 25816f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 25826f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_DCF_ERR); 25834202ea4bSsbehera } else if (pkt_too_long_err) { 25844202ea4bSsbehera rdc_stats->pkt_too_long_err++; 25854202ea4bSsbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 25864202ea4bSsbehera " channel %d packet length [%d] > " 25874202ea4bSsbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL, 25884202ea4bSsbehera nxgep->mac.maxframesize)); 25896f45ec7bSml29623 } else { 25906f45ec7bSml29623 /* Update error stats */ 25916f45ec7bSml29623 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 25926f45ec7bSml29623 rdc_stats->errlog.compl_err_type = error_type; 25936f45ec7bSml29623 25946f45ec7bSml29623 switch (error_type) { 2595f6485eecSyc148097 /* 2596f6485eecSyc148097 * Do not send FMA ereport for RCR_L2_ERROR and 2597f6485eecSyc148097 * RCR_L4_CSUM_ERROR because most likely they indicate 2598f6485eecSyc148097 * back pressure rather than HW failures. 2599f6485eecSyc148097 */ 26006f45ec7bSml29623 case RCR_L2_ERROR: 26016f45ec7bSml29623 rdc_stats->l2_err++; 26026f45ec7bSml29623 if (rdc_stats->l2_err < 260353f3d8ecSyc148097 error_disp_cnt) { 260453f3d8ecSyc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26056f45ec7bSml29623 " nxge_receive_packet:" 26066f45ec7bSml29623 " channel %d RCR L2_ERROR", 26076f45ec7bSml29623 channel)); 260853f3d8ecSyc148097 } 26096f45ec7bSml29623 break; 26106f45ec7bSml29623 case RCR_L4_CSUM_ERROR: 26116f45ec7bSml29623 error_send_up = B_TRUE; 26126f45ec7bSml29623 rdc_stats->l4_cksum_err++; 26136f45ec7bSml29623 if (rdc_stats->l4_cksum_err < 261453f3d8ecSyc148097 error_disp_cnt) { 261553f3d8ecSyc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26166f45ec7bSml29623 " nxge_receive_packet:" 26176f45ec7bSml29623 " channel %d" 261853f3d8ecSyc148097 " RCR L4_CSUM_ERROR", channel)); 261953f3d8ecSyc148097 } 26206f45ec7bSml29623 break; 2621f6485eecSyc148097 /* 2622f6485eecSyc148097 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2623f6485eecSyc148097 * RCR_ZCP_SOFT_ERROR because they reflect the same 2624f6485eecSyc148097 * FFLP and ZCP errors that have been reported by 2625f6485eecSyc148097 * nxge_fflp.c and nxge_zcp.c. 2626f6485eecSyc148097 */ 26276f45ec7bSml29623 case RCR_FFLP_SOFT_ERROR: 26286f45ec7bSml29623 error_send_up = B_TRUE; 26296f45ec7bSml29623 rdc_stats->fflp_soft_err++; 26306f45ec7bSml29623 if (rdc_stats->fflp_soft_err < 263153f3d8ecSyc148097 error_disp_cnt) { 26326f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, 26336f45ec7bSml29623 NXGE_ERR_CTL, 26346f45ec7bSml29623 " nxge_receive_packet:" 26356f45ec7bSml29623 " channel %d" 263653f3d8ecSyc148097 " RCR FFLP_SOFT_ERROR", channel)); 263753f3d8ecSyc148097 } 26386f45ec7bSml29623 break; 26396f45ec7bSml29623 case RCR_ZCP_SOFT_ERROR: 26406f45ec7bSml29623 error_send_up = B_TRUE; 26416f45ec7bSml29623 rdc_stats->fflp_soft_err++; 26426f45ec7bSml29623 if (rdc_stats->zcp_soft_err < 26436f45ec7bSml29623 error_disp_cnt) 264453f3d8ecSyc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 264553f3d8ecSyc148097 " nxge_receive_packet: Channel %d" 264653f3d8ecSyc148097 " RCR ZCP_SOFT_ERROR", channel)); 26476f45ec7bSml29623 break; 26486f45ec7bSml29623 default: 264953f3d8ecSyc148097 rdc_stats->rcr_unknown_err++; 265053f3d8ecSyc148097 if (rdc_stats->rcr_unknown_err 265153f3d8ecSyc148097 < error_disp_cnt) { 26526f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 265353f3d8ecSyc148097 " nxge_receive_packet: Channel %d" 265453f3d8ecSyc148097 " RCR entry 0x%llx error 0x%x", 265553f3d8ecSyc148097 rcr_entry, channel, error_type)); 265653f3d8ecSyc148097 } 26576f45ec7bSml29623 break; 26586f45ec7bSml29623 } 26596f45ec7bSml29623 } 26606f45ec7bSml29623 26616f45ec7bSml29623 /* 26626f45ec7bSml29623 * Update and repost buffer block if max usage 26636f45ec7bSml29623 * count is reached. 26646f45ec7bSml29623 */ 26656f45ec7bSml29623 if (error_send_up == B_FALSE) { 2666958cea9eSml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 26676f45ec7bSml29623 if (buffer_free == B_TRUE) { 26686f45ec7bSml29623 rx_msg_p->free = B_TRUE; 26696f45ec7bSml29623 } 26706f45ec7bSml29623 26716f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->lock); 26726f45ec7bSml29623 nxge_freeb(rx_msg_p); 26736f45ec7bSml29623 return; 26746f45ec7bSml29623 } 26756f45ec7bSml29623 } 26766f45ec7bSml29623 26776f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 26786f45ec7bSml29623 "==> nxge_receive_packet: DMA sync second ")); 26796f45ec7bSml29623 268053f3d8ecSyc148097 bytes_read = rcr_p->rcvd_pkt_bytes; 26816f45ec7bSml29623 skip_len = sw_offset_bytes + hdr_size; 26826f45ec7bSml29623 if (!rx_msg_p->rx_use_bcopy) { 2683958cea9eSml29623 /* 2684958cea9eSml29623 * For loaned up buffers, the driver reference count 2685958cea9eSml29623 * will be incremented first and then the free state. 2686958cea9eSml29623 */ 268753f3d8ecSyc148097 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 26886f45ec7bSml29623 if (first_entry) { 26896f45ec7bSml29623 nmp->b_rptr = &nmp->b_rptr[skip_len]; 269053f3d8ecSyc148097 if (l2_len < bsize - skip_len) { 26916f45ec7bSml29623 nmp->b_wptr = &nmp->b_rptr[l2_len]; 26926f45ec7bSml29623 } else { 269353f3d8ecSyc148097 nmp->b_wptr = &nmp->b_rptr[bsize 269453f3d8ecSyc148097 - skip_len]; 269553f3d8ecSyc148097 } 269653f3d8ecSyc148097 } else { 269753f3d8ecSyc148097 if (l2_len - bytes_read < bsize) { 26986f45ec7bSml29623 nmp->b_wptr = 26996f45ec7bSml29623 &nmp->b_rptr[l2_len - bytes_read]; 270053f3d8ecSyc148097 } else { 270153f3d8ecSyc148097 nmp->b_wptr = &nmp->b_rptr[bsize]; 27026f45ec7bSml29623 } 270353f3d8ecSyc148097 } 270453f3d8ecSyc148097 } 270553f3d8ecSyc148097 } else { 270653f3d8ecSyc148097 if (first_entry) { 270753f3d8ecSyc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 270853f3d8ecSyc148097 l2_len < bsize - skip_len ? 270953f3d8ecSyc148097 l2_len : bsize - skip_len); 271053f3d8ecSyc148097 } else { 271153f3d8ecSyc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 271253f3d8ecSyc148097 l2_len - bytes_read < bsize ? 271353f3d8ecSyc148097 l2_len - bytes_read : bsize); 271453f3d8ecSyc148097 } 271553f3d8ecSyc148097 } 271653f3d8ecSyc148097 if (nmp != NULL) { 2717f720bc57Syc148097 if (first_entry) { 2718f720bc57Syc148097 /* 2719f720bc57Syc148097 * Jumbo packets may be received with more than one 2720f720bc57Syc148097 * buffer, increment ipackets for the first entry only. 2721f720bc57Syc148097 */ 2722f720bc57Syc148097 rdc_stats->ipackets++; 2723f720bc57Syc148097 2724f720bc57Syc148097 /* Update ibytes for kstat. */ 2725f720bc57Syc148097 rdc_stats->ibytes += skip_len 2726f720bc57Syc148097 + l2_len < bsize ? l2_len : bsize; 2727f720bc57Syc148097 /* 2728f720bc57Syc148097 * Update the number of bytes read so far for the 2729f720bc57Syc148097 * current frame. 2730f720bc57Syc148097 */ 273153f3d8ecSyc148097 bytes_read = nmp->b_wptr - nmp->b_rptr; 2732f720bc57Syc148097 } else { 2733f720bc57Syc148097 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2734f720bc57Syc148097 l2_len - bytes_read : bsize; 27356f45ec7bSml29623 bytes_read += nmp->b_wptr - nmp->b_rptr; 2736f720bc57Syc148097 } 273753f3d8ecSyc148097 27386f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 27396f45ec7bSml29623 "==> nxge_receive_packet after dupb: " 27406f45ec7bSml29623 "rbr consumed %d " 27416f45ec7bSml29623 "pktbufsz_type %d " 27426f45ec7bSml29623 "nmp $%p rptr $%p wptr $%p " 27436f45ec7bSml29623 "buf_offset %d bzise %d l2_len %d skip_len %d", 27446f45ec7bSml29623 rx_rbr_p->rbr_consumed, 27456f45ec7bSml29623 pktbufsz_type, 27466f45ec7bSml29623 nmp, nmp->b_rptr, nmp->b_wptr, 27476f45ec7bSml29623 buf_offset, bsize, l2_len, skip_len)); 27486f45ec7bSml29623 } else { 27496f45ec7bSml29623 cmn_err(CE_WARN, "!nxge_receive_packet: " 27506f45ec7bSml29623 "update stats (error)"); 27512e59129aSraghus atomic_inc_32(&rx_msg_p->ref_cnt); 27522e59129aSraghus if (buffer_free == B_TRUE) { 27532e59129aSraghus rx_msg_p->free = B_TRUE; 27542e59129aSraghus } 27552e59129aSraghus MUTEX_EXIT(&rx_rbr_p->lock); 27562e59129aSraghus nxge_freeb(rx_msg_p); 27572e59129aSraghus return; 27586f45ec7bSml29623 } 2759ee5416c9Syc148097 27606f45ec7bSml29623 if (buffer_free == B_TRUE) { 27616f45ec7bSml29623 rx_msg_p->free = B_TRUE; 27626f45ec7bSml29623 } 2763f720bc57Syc148097 27646f45ec7bSml29623 is_valid = (nmp != NULL); 276553f3d8ecSyc148097 276653f3d8ecSyc148097 rcr_p->rcvd_pkt_bytes = bytes_read; 276753f3d8ecSyc148097 27686f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->lock); 27696f45ec7bSml29623 27706f45ec7bSml29623 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 27716f45ec7bSml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 27726f45ec7bSml29623 nxge_freeb(rx_msg_p); 27736f45ec7bSml29623 } 27746f45ec7bSml29623 27756f45ec7bSml29623 if (is_valid) { 27766f45ec7bSml29623 nmp->b_cont = NULL; 27776f45ec7bSml29623 if (first_entry) { 27786f45ec7bSml29623 *mp = nmp; 27796f45ec7bSml29623 *mp_cont = NULL; 278053f3d8ecSyc148097 } else { 27816f45ec7bSml29623 *mp_cont = nmp; 27826f45ec7bSml29623 } 278353f3d8ecSyc148097 } 27846f45ec7bSml29623 27856f45ec7bSml29623 /* 2786f720bc57Syc148097 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2787f720bc57Syc148097 * If a packet is not fragmented and no error bit is set, then 2788f720bc57Syc148097 * L4 checksum is OK. 27896f45ec7bSml29623 */ 2790f720bc57Syc148097 27916f45ec7bSml29623 if (is_valid && !multi) { 2792678453a8Sspeer /* 2793b4d05839Sml29623 * If the checksum flag nxge_chksum_offload 2794b4d05839Sml29623 * is 1, TCP and UDP packets can be sent 2795678453a8Sspeer * up with good checksum. If the checksum flag 2796b4d05839Sml29623 * is set to 0, checksum reporting will apply to 2797678453a8Sspeer * TCP packets only (workaround for a hardware bug). 2798b4d05839Sml29623 * If the checksum flag nxge_cksum_offload is 2799b4d05839Sml29623 * greater than 1, both TCP and UDP packets 2800b4d05839Sml29623 * will not be reported its hardware checksum results. 2801678453a8Sspeer */ 2802b4d05839Sml29623 if (nxge_cksum_offload == 1) { 28036f45ec7bSml29623 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 28046f45ec7bSml29623 pkt_type == RCR_PKT_IS_UDP) ? 28056f45ec7bSml29623 B_TRUE: B_FALSE); 2806b4d05839Sml29623 } else if (!nxge_cksum_offload) { 2807678453a8Sspeer /* TCP checksum only. */ 2808678453a8Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2809678453a8Sspeer B_TRUE: B_FALSE); 2810678453a8Sspeer } 28116f45ec7bSml29623 28126f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 28136f45ec7bSml29623 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 28146f45ec7bSml29623 is_valid, multi, is_tcp_udp, frag, error_type)); 28156f45ec7bSml29623 28166f45ec7bSml29623 if (is_tcp_udp && !frag && !error_type) { 28176f45ec7bSml29623 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 28186f45ec7bSml29623 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 28196f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 28206f45ec7bSml29623 "==> nxge_receive_packet: Full tcp/udp cksum " 28216f45ec7bSml29623 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 28226f45ec7bSml29623 "error %d", 28236f45ec7bSml29623 is_valid, multi, is_tcp_udp, frag, error_type)); 28246f45ec7bSml29623 } 28256f45ec7bSml29623 } 28266f45ec7bSml29623 28276f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 28286f45ec7bSml29623 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 28296f45ec7bSml29623 28306f45ec7bSml29623 *multi_p = (multi == RCR_MULTI_MASK); 28316f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 28326f45ec7bSml29623 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 28336f45ec7bSml29623 *multi_p, nmp, *mp, *mp_cont)); 28346f45ec7bSml29623 } 28356f45ec7bSml29623 2836da14cebeSEric Cheng /* 2837da14cebeSEric Cheng * Enable polling for a ring. Interrupt for the ring is disabled when 2838da14cebeSEric Cheng * the nxge interrupt comes (see nxge_rx_intr). 2839da14cebeSEric Cheng */ 2840da14cebeSEric Cheng int 2841da14cebeSEric Cheng nxge_enable_poll(void *arg) 2842da14cebeSEric Cheng { 2843da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2844da14cebeSEric Cheng p_rx_rcr_ring_t ringp; 2845da14cebeSEric Cheng p_nxge_t nxgep; 2846da14cebeSEric Cheng p_nxge_ldg_t ldgp; 2847da14cebeSEric Cheng uint32_t channel; 2848da14cebeSEric Cheng 2849da14cebeSEric Cheng if (ring_handle == NULL) { 2850da14cebeSEric Cheng return (0); 2851da14cebeSEric Cheng } 2852da14cebeSEric Cheng 2853da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2854da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2855da14cebeSEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2856da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2857da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2858da14cebeSEric Cheng ldgp = ringp->ldgp; 2859da14cebeSEric Cheng if (ldgp == NULL) { 2860da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2861da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2862da14cebeSEric Cheng ringp->rdc)); 2863da14cebeSEric Cheng return (0); 2864da14cebeSEric Cheng } 2865da14cebeSEric Cheng 2866da14cebeSEric Cheng MUTEX_ENTER(&ringp->lock); 2867da14cebeSEric Cheng /* enable polling */ 2868da14cebeSEric Cheng if (ringp->poll_flag == 0) { 2869da14cebeSEric Cheng ringp->poll_flag = 1; 2870da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2871da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d set poll flag to 1", 2872da14cebeSEric Cheng ringp->rdc)); 2873da14cebeSEric Cheng } 2874da14cebeSEric Cheng 2875da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2876da14cebeSEric Cheng return (0); 2877da14cebeSEric Cheng } 2878da14cebeSEric Cheng /* 2879da14cebeSEric Cheng * Disable polling for a ring and enable its interrupt. 2880da14cebeSEric Cheng */ 2881da14cebeSEric Cheng int 2882da14cebeSEric Cheng nxge_disable_poll(void *arg) 2883da14cebeSEric Cheng { 2884da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2885da14cebeSEric Cheng p_rx_rcr_ring_t ringp; 2886da14cebeSEric Cheng p_nxge_t nxgep; 2887da14cebeSEric Cheng uint32_t channel; 2888da14cebeSEric Cheng 2889da14cebeSEric Cheng if (ring_handle == NULL) { 2890da14cebeSEric Cheng return (0); 2891da14cebeSEric Cheng } 2892da14cebeSEric Cheng 2893da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2894da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2895da14cebeSEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2896da14cebeSEric Cheng 2897da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2898da14cebeSEric Cheng "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2899da14cebeSEric Cheng 2900da14cebeSEric Cheng MUTEX_ENTER(&ringp->lock); 2901da14cebeSEric Cheng 2902da14cebeSEric Cheng /* disable polling: enable interrupt */ 2903da14cebeSEric Cheng if (ringp->poll_flag) { 2904da14cebeSEric Cheng npi_handle_t handle; 2905da14cebeSEric Cheng rx_dma_ctl_stat_t cs; 2906da14cebeSEric Cheng uint8_t channel; 2907da14cebeSEric Cheng p_nxge_ldg_t ldgp; 2908da14cebeSEric Cheng 2909da14cebeSEric Cheng /* 2910da14cebeSEric Cheng * Get the control and status for this channel. 2911da14cebeSEric Cheng */ 2912da14cebeSEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 2913da14cebeSEric Cheng channel = ringp->rdc; 2914da14cebeSEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2915da14cebeSEric Cheng channel, &cs.value); 2916da14cebeSEric Cheng 2917da14cebeSEric Cheng /* 2918da14cebeSEric Cheng * Enable mailbox update 2919da14cebeSEric Cheng * Since packets were not read and the hardware uses 2920da14cebeSEric Cheng * bits pktread and ptrread to update the queue 2921da14cebeSEric Cheng * length, we need to set both bits to 0. 2922da14cebeSEric Cheng */ 2923da14cebeSEric Cheng cs.bits.ldw.pktread = 0; 2924da14cebeSEric Cheng cs.bits.ldw.ptrread = 0; 2925da14cebeSEric Cheng cs.bits.hdw.mex = 1; 2926da14cebeSEric Cheng RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2927da14cebeSEric Cheng cs.value); 2928da14cebeSEric Cheng 2929da14cebeSEric Cheng /* 2930da14cebeSEric Cheng * Rearm this logical group if this is a single device 2931da14cebeSEric Cheng * group. 2932da14cebeSEric Cheng */ 2933da14cebeSEric Cheng ldgp = ringp->ldgp; 2934da14cebeSEric Cheng if (ldgp == NULL) { 2935da14cebeSEric Cheng ringp->poll_flag = 0; 2936da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2937da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2938da14cebeSEric Cheng "==> nxge_disable_poll: no ldgp rdc %d " 2939da14cebeSEric Cheng "(still set poll to 0", ringp->rdc)); 2940da14cebeSEric Cheng return (0); 2941da14cebeSEric Cheng } 2942da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2943da14cebeSEric Cheng "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2944da14cebeSEric Cheng ringp->rdc, ldgp)); 2945da14cebeSEric Cheng if (ldgp->nldvs == 1) { 2946da14cebeSEric Cheng ldgimgm_t mgm; 2947da14cebeSEric Cheng mgm.value = 0; 2948da14cebeSEric Cheng mgm.bits.ldw.arm = 1; 2949da14cebeSEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 2950da14cebeSEric Cheng NXGE_REG_WR64(handle, 2951da14cebeSEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 2952da14cebeSEric Cheng } 2953da14cebeSEric Cheng ringp->poll_flag = 0; 2954da14cebeSEric Cheng } 2955da14cebeSEric Cheng 2956da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2957da14cebeSEric Cheng return (0); 2958da14cebeSEric Cheng } 2959da14cebeSEric Cheng 2960da14cebeSEric Cheng /* 2961da14cebeSEric Cheng * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2962da14cebeSEric Cheng */ 2963da14cebeSEric Cheng mblk_t * 2964da14cebeSEric Cheng nxge_rx_poll(void *arg, int bytes_to_pickup) 2965da14cebeSEric Cheng { 2966da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2967da14cebeSEric Cheng p_rx_rcr_ring_t rcr_p; 2968da14cebeSEric Cheng p_nxge_t nxgep; 2969da14cebeSEric Cheng npi_handle_t handle; 2970da14cebeSEric Cheng rx_dma_ctl_stat_t cs; 2971da14cebeSEric Cheng mblk_t *mblk; 2972da14cebeSEric Cheng p_nxge_ldv_t ldvp; 2973da14cebeSEric Cheng uint32_t channel; 2974da14cebeSEric Cheng 2975da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2976da14cebeSEric Cheng 2977da14cebeSEric Cheng /* 2978da14cebeSEric Cheng * Get the control and status for this channel. 2979da14cebeSEric Cheng */ 2980da14cebeSEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 2981da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2982da14cebeSEric Cheng rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2983da14cebeSEric Cheng MUTEX_ENTER(&rcr_p->lock); 2984da14cebeSEric Cheng ASSERT(rcr_p->poll_flag == 1); 2985da14cebeSEric Cheng 2986da14cebeSEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2987da14cebeSEric Cheng 2988da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2989da14cebeSEric Cheng "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2990da14cebeSEric Cheng rcr_p->rdc, rcr_p->poll_flag)); 2991da14cebeSEric Cheng mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2992da14cebeSEric Cheng 2993da14cebeSEric Cheng ldvp = rcr_p->ldvp; 2994da14cebeSEric Cheng /* error events. */ 2995da14cebeSEric Cheng if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2996da14cebeSEric Cheng (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2997da14cebeSEric Cheng } 2998da14cebeSEric Cheng 2999da14cebeSEric Cheng MUTEX_EXIT(&rcr_p->lock); 3000da14cebeSEric Cheng 3001da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3002da14cebeSEric Cheng "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 3003da14cebeSEric Cheng return (mblk); 3004da14cebeSEric Cheng } 3005da14cebeSEric Cheng 3006da14cebeSEric Cheng 30076f45ec7bSml29623 /*ARGSUSED*/ 30086f45ec7bSml29623 static nxge_status_t 3009678453a8Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 30106f45ec7bSml29623 { 30116f45ec7bSml29623 p_nxge_rx_ring_stats_t rdc_stats; 30126f45ec7bSml29623 npi_handle_t handle; 30136f45ec7bSml29623 npi_status_t rs; 30146f45ec7bSml29623 boolean_t rxchan_fatal = B_FALSE; 30156f45ec7bSml29623 boolean_t rxport_fatal = B_FALSE; 30166f45ec7bSml29623 uint8_t portn; 30176f45ec7bSml29623 nxge_status_t status = NXGE_OK; 30186f45ec7bSml29623 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 30196f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 30206f45ec7bSml29623 30216f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 30226f45ec7bSml29623 portn = nxgep->mac.portnum; 3023678453a8Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel]; 30246f45ec7bSml29623 30256f45ec7bSml29623 if (cs.bits.hdw.rbr_tmout) { 30266f45ec7bSml29623 rdc_stats->rx_rbr_tmout++; 30276f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30286f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 30296f45ec7bSml29623 rxchan_fatal = B_TRUE; 30306f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30316f45ec7bSml29623 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 30326f45ec7bSml29623 } 30336f45ec7bSml29623 if (cs.bits.hdw.rsp_cnt_err) { 30346f45ec7bSml29623 rdc_stats->rsp_cnt_err++; 30356f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30366f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 30376f45ec7bSml29623 rxchan_fatal = B_TRUE; 30386f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30396f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30406f45ec7bSml29623 "rsp_cnt_err", channel)); 30416f45ec7bSml29623 } 30426f45ec7bSml29623 if (cs.bits.hdw.byte_en_bus) { 30436f45ec7bSml29623 rdc_stats->byte_en_bus++; 30446f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30456f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 30466f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30476f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30486f45ec7bSml29623 "fatal error: byte_en_bus", channel)); 30496f45ec7bSml29623 rxchan_fatal = B_TRUE; 30506f45ec7bSml29623 } 30516f45ec7bSml29623 if (cs.bits.hdw.rsp_dat_err) { 30526f45ec7bSml29623 rdc_stats->rsp_dat_err++; 30536f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30546f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 30556f45ec7bSml29623 rxchan_fatal = B_TRUE; 30566f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30576f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30586f45ec7bSml29623 "fatal error: rsp_dat_err", channel)); 30596f45ec7bSml29623 } 30606f45ec7bSml29623 if (cs.bits.hdw.rcr_ack_err) { 30616f45ec7bSml29623 rdc_stats->rcr_ack_err++; 30626f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30636f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 30646f45ec7bSml29623 rxchan_fatal = B_TRUE; 30656f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30666f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30676f45ec7bSml29623 "fatal error: rcr_ack_err", channel)); 30686f45ec7bSml29623 } 30696f45ec7bSml29623 if (cs.bits.hdw.dc_fifo_err) { 30706f45ec7bSml29623 rdc_stats->dc_fifo_err++; 30716f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30726f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 30736f45ec7bSml29623 /* This is not a fatal error! */ 30746f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30756f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30766f45ec7bSml29623 "dc_fifo_err", channel)); 30776f45ec7bSml29623 rxport_fatal = B_TRUE; 30786f45ec7bSml29623 } 30796f45ec7bSml29623 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 30806f45ec7bSml29623 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 30816f45ec7bSml29623 &rdc_stats->errlog.pre_par, 30826f45ec7bSml29623 &rdc_stats->errlog.sha_par)) 30836f45ec7bSml29623 != NPI_SUCCESS) { 30846f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30856f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30866f45ec7bSml29623 "rcr_sha_par: get perr", channel)); 30876f45ec7bSml29623 return (NXGE_ERROR | rs); 30886f45ec7bSml29623 } 30896f45ec7bSml29623 if (cs.bits.hdw.rcr_sha_par) { 30906f45ec7bSml29623 rdc_stats->rcr_sha_par++; 30916f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30926f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 30936f45ec7bSml29623 rxchan_fatal = B_TRUE; 30946f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30956f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30966f45ec7bSml29623 "fatal error: rcr_sha_par", channel)); 30976f45ec7bSml29623 } 30986f45ec7bSml29623 if (cs.bits.hdw.rbr_pre_par) { 30996f45ec7bSml29623 rdc_stats->rbr_pre_par++; 31006f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31016f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 31026f45ec7bSml29623 rxchan_fatal = B_TRUE; 31036f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31046f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 31056f45ec7bSml29623 "fatal error: rbr_pre_par", channel)); 31066f45ec7bSml29623 } 31076f45ec7bSml29623 } 310863e23a19Syc148097 /* 310963e23a19Syc148097 * The Following 4 status bits are for information, the system 311063e23a19Syc148097 * is running fine. There is no need to send FMA ereports or 311163e23a19Syc148097 * log messages. 311263e23a19Syc148097 */ 31136f45ec7bSml29623 if (cs.bits.hdw.port_drop_pkt) { 31146f45ec7bSml29623 rdc_stats->port_drop_pkt++; 31156f45ec7bSml29623 } 31166f45ec7bSml29623 if (cs.bits.hdw.wred_drop) { 31176f45ec7bSml29623 rdc_stats->wred_drop++; 31186f45ec7bSml29623 } 31196f45ec7bSml29623 if (cs.bits.hdw.rbr_pre_empty) { 31206f45ec7bSml29623 rdc_stats->rbr_pre_empty++; 31216f45ec7bSml29623 } 31226f45ec7bSml29623 if (cs.bits.hdw.rcr_shadow_full) { 31236f45ec7bSml29623 rdc_stats->rcr_shadow_full++; 31246f45ec7bSml29623 } 31256f45ec7bSml29623 if (cs.bits.hdw.config_err) { 31266f45ec7bSml29623 rdc_stats->config_err++; 31276f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31286f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 31296f45ec7bSml29623 rxchan_fatal = B_TRUE; 31306f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31316f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 31326f45ec7bSml29623 "config error", channel)); 31336f45ec7bSml29623 } 31346f45ec7bSml29623 if (cs.bits.hdw.rcrincon) { 31356f45ec7bSml29623 rdc_stats->rcrincon++; 31366f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31376f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RCRINCON); 31386f45ec7bSml29623 rxchan_fatal = B_TRUE; 31396f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31406f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 31416f45ec7bSml29623 "fatal error: rcrincon error", channel)); 31426f45ec7bSml29623 } 31436f45ec7bSml29623 if (cs.bits.hdw.rcrfull) { 31446f45ec7bSml29623 rdc_stats->rcrfull++; 31456f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31466f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RCRFULL); 31476f45ec7bSml29623 rxchan_fatal = B_TRUE; 31486f45ec7bSml29623 if (rdc_stats->rcrfull < error_disp_cnt) 31496f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31506f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 31516f45ec7bSml29623 "fatal error: rcrfull error", channel)); 31526f45ec7bSml29623 } 31536f45ec7bSml29623 if (cs.bits.hdw.rbr_empty) { 315463e23a19Syc148097 /* 315563e23a19Syc148097 * This bit is for information, there is no need 315663e23a19Syc148097 * send FMA ereport or log a message. 315763e23a19Syc148097 */ 31586f45ec7bSml29623 rdc_stats->rbr_empty++; 31596f45ec7bSml29623 } 31606f45ec7bSml29623 if (cs.bits.hdw.rbrfull) { 31616f45ec7bSml29623 rdc_stats->rbrfull++; 31626f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31636f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RBRFULL); 31646f45ec7bSml29623 rxchan_fatal = B_TRUE; 31656f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31666f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 31676f45ec7bSml29623 "fatal error: rbr_full error", channel)); 31686f45ec7bSml29623 } 31696f45ec7bSml29623 if (cs.bits.hdw.rbrlogpage) { 31706f45ec7bSml29623 rdc_stats->rbrlogpage++; 31716f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31726f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 31736f45ec7bSml29623 rxchan_fatal = B_TRUE; 31746f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31756f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 31766f45ec7bSml29623 "fatal error: rbr logical page error", channel)); 31776f45ec7bSml29623 } 31786f45ec7bSml29623 if (cs.bits.hdw.cfiglogpage) { 31796f45ec7bSml29623 rdc_stats->cfiglogpage++; 31806f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31816f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 31826f45ec7bSml29623 rxchan_fatal = B_TRUE; 31836f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31846f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 31856f45ec7bSml29623 "fatal error: cfig logical page error", channel)); 31866f45ec7bSml29623 } 31876f45ec7bSml29623 31886f45ec7bSml29623 if (rxport_fatal) { 31896f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3190678453a8Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n", 31916f45ec7bSml29623 portn)); 3192678453a8Sspeer if (isLDOMguest(nxgep)) { 3193678453a8Sspeer status = NXGE_ERROR; 3194678453a8Sspeer } else { 31956f45ec7bSml29623 status = nxge_ipp_fatal_err_recover(nxgep); 31966f45ec7bSml29623 if (status == NXGE_OK) { 31976f45ec7bSml29623 FM_SERVICE_RESTORED(nxgep); 31986f45ec7bSml29623 } 31996f45ec7bSml29623 } 3200678453a8Sspeer } 32016f45ec7bSml29623 32026f45ec7bSml29623 if (rxchan_fatal) { 32036f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3204678453a8Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n", 32056f45ec7bSml29623 channel)); 3206678453a8Sspeer if (isLDOMguest(nxgep)) { 3207678453a8Sspeer status = NXGE_ERROR; 3208678453a8Sspeer } else { 32096f45ec7bSml29623 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 32106f45ec7bSml29623 if (status == NXGE_OK) { 32116f45ec7bSml29623 FM_SERVICE_RESTORED(nxgep); 32126f45ec7bSml29623 } 32136f45ec7bSml29623 } 3214678453a8Sspeer } 32156f45ec7bSml29623 32166f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 32176f45ec7bSml29623 32186f45ec7bSml29623 return (status); 32196f45ec7bSml29623 } 32206f45ec7bSml29623 3221678453a8Sspeer /* 3222678453a8Sspeer * nxge_rdc_hvio_setup 3223678453a8Sspeer * 3224678453a8Sspeer * This code appears to setup some Hypervisor variables. 3225678453a8Sspeer * 3226678453a8Sspeer * Arguments: 3227678453a8Sspeer * nxgep 3228678453a8Sspeer * channel 3229678453a8Sspeer * 3230678453a8Sspeer * Notes: 3231678453a8Sspeer * What does NIU_LP_WORKAROUND mean? 3232678453a8Sspeer * 3233678453a8Sspeer * NPI/NXGE function calls: 3234678453a8Sspeer * na 3235678453a8Sspeer * 3236678453a8Sspeer * Context: 3237678453a8Sspeer * Any domain 3238678453a8Sspeer */ 32396f45ec7bSml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3240678453a8Sspeer static void 3241678453a8Sspeer nxge_rdc_hvio_setup( 3242678453a8Sspeer nxge_t *nxgep, int channel) 3243678453a8Sspeer { 3244678453a8Sspeer nxge_dma_common_t *dma_common; 3245678453a8Sspeer nxge_dma_common_t *dma_control; 3246678453a8Sspeer rx_rbr_ring_t *ring; 3247678453a8Sspeer 3248678453a8Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3249678453a8Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3250678453a8Sspeer 3251678453a8Sspeer ring->hv_set = B_FALSE; 3252678453a8Sspeer 3253678453a8Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3254678453a8Sspeer dma_common->orig_ioaddr_pp; 3255678453a8Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t) 3256678453a8Sspeer dma_common->orig_alength; 3257678453a8Sspeer 3258678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3259678453a8Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3260678453a8Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp, 3261678453a8Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3262678453a8Sspeer dma_common->orig_alength, dma_common->orig_alength)); 3263678453a8Sspeer 3264678453a8Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3265678453a8Sspeer 3266678453a8Sspeer ring->hv_rx_cntl_base_ioaddr_pp = 3267678453a8Sspeer (uint64_t)dma_control->orig_ioaddr_pp; 3268678453a8Sspeer ring->hv_rx_cntl_ioaddr_size = 3269678453a8Sspeer (uint64_t)dma_control->orig_alength; 3270678453a8Sspeer 3271678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3272678453a8Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3273678453a8Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp, 3274678453a8Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3275678453a8Sspeer dma_control->orig_alength, dma_control->orig_alength)); 3276678453a8Sspeer } 32776f45ec7bSml29623 #endif 32786f45ec7bSml29623 3279678453a8Sspeer /* 3280678453a8Sspeer * nxge_map_rxdma 3281678453a8Sspeer * 3282678453a8Sspeer * Map an RDC into our kernel space. 3283678453a8Sspeer * 3284678453a8Sspeer * Arguments: 3285678453a8Sspeer * nxgep 3286678453a8Sspeer * channel The channel to map. 3287678453a8Sspeer * 3288678453a8Sspeer * Notes: 3289678453a8Sspeer * 1. Allocate & initialise a memory pool, if necessary. 3290678453a8Sspeer * 2. Allocate however many receive buffers are required. 3291678453a8Sspeer * 3. Setup buffers, descriptors, and mailbox. 3292678453a8Sspeer * 3293678453a8Sspeer * NPI/NXGE function calls: 3294678453a8Sspeer * nxge_alloc_rx_mem_pool() 3295678453a8Sspeer * nxge_alloc_rbb() 3296678453a8Sspeer * nxge_map_rxdma_channel() 3297678453a8Sspeer * 3298678453a8Sspeer * Registers accessed: 3299678453a8Sspeer * 3300678453a8Sspeer * Context: 3301678453a8Sspeer * Any domain 3302678453a8Sspeer */ 3303678453a8Sspeer static nxge_status_t 3304678453a8Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel) 3305678453a8Sspeer { 3306678453a8Sspeer nxge_dma_common_t **data; 3307678453a8Sspeer nxge_dma_common_t **control; 3308678453a8Sspeer rx_rbr_ring_t **rbr_ring; 3309678453a8Sspeer rx_rcr_ring_t **rcr_ring; 3310678453a8Sspeer rx_mbox_t **mailbox; 3311678453a8Sspeer uint32_t chunks; 3312678453a8Sspeer 3313678453a8Sspeer nxge_status_t status; 3314678453a8Sspeer 33156f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 33166f45ec7bSml29623 3317678453a8Sspeer if (!nxgep->rx_buf_pool_p) { 3318678453a8Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 33196f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33206f45ec7bSml29623 "<== nxge_map_rxdma: buf not allocated")); 33216f45ec7bSml29623 return (NXGE_ERROR); 33226f45ec7bSml29623 } 33236f45ec7bSml29623 } 33246f45ec7bSml29623 3325678453a8Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3326678453a8Sspeer return (NXGE_ERROR); 33276f45ec7bSml29623 33286f45ec7bSml29623 /* 33296f45ec7bSml29623 * Timeout should be set based on the system clock divider. 3330*7b26d9ffSSantwona Behera * A timeout value of 1 assumes that the 33316f45ec7bSml29623 * granularity (1000) is 3 microseconds running at 300MHz. 33326f45ec7bSml29623 */ 33336f45ec7bSml29623 3334*7b26d9ffSSantwona Behera nxgep->intr_threshold = nxge_rcr_threshold; 3335*7b26d9ffSSantwona Behera nxgep->intr_timeout = nxge_rcr_timeout; 33366f45ec7bSml29623 33376f45ec7bSml29623 /* 3338678453a8Sspeer * Map descriptors from the buffer polls for each dma channel. 33396f45ec7bSml29623 */ 3340678453a8Sspeer 33416f45ec7bSml29623 /* 33426f45ec7bSml29623 * Set up and prepare buffer blocks, descriptors 33436f45ec7bSml29623 * and mailbox. 33446f45ec7bSml29623 */ 3345678453a8Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3346678453a8Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3347678453a8Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3348678453a8Sspeer 3349678453a8Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3350678453a8Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3351678453a8Sspeer 3352678453a8Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3353678453a8Sspeer 3354678453a8Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3355678453a8Sspeer chunks, control, rcr_ring, mailbox); 33566f45ec7bSml29623 if (status != NXGE_OK) { 3357678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3358678453a8Sspeer "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3359678453a8Sspeer "returned 0x%x", 3360678453a8Sspeer channel, status)); 3361678453a8Sspeer return (status); 33626f45ec7bSml29623 } 3363678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3364678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3365678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3366678453a8Sspeer &nxgep->statsp->rdc_stats[channel]; 33676f45ec7bSml29623 33686f45ec7bSml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3369678453a8Sspeer if (!isLDOMguest(nxgep)) 3370678453a8Sspeer nxge_rdc_hvio_setup(nxgep, channel); 3371678453a8Sspeer #endif 33726f45ec7bSml29623 33736f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3374678453a8Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 33756f45ec7bSml29623 33766f45ec7bSml29623 return (status); 33776f45ec7bSml29623 } 33786f45ec7bSml29623 33796f45ec7bSml29623 static void 3380678453a8Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 33816f45ec7bSml29623 { 3382678453a8Sspeer rx_rbr_ring_t *rbr_ring; 3383678453a8Sspeer rx_rcr_ring_t *rcr_ring; 3384678453a8Sspeer rx_mbox_t *mailbox; 33856f45ec7bSml29623 3386678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 33876f45ec7bSml29623 3388678453a8Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3389678453a8Sspeer !nxgep->rx_mbox_areas_p) 33906f45ec7bSml29623 return; 33916f45ec7bSml29623 3392678453a8Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3393678453a8Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3394678453a8Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3395678453a8Sspeer 3396678453a8Sspeer if (!rbr_ring || !rcr_ring || !mailbox) 33976f45ec7bSml29623 return; 33986f45ec7bSml29623 3399678453a8Sspeer (void) nxge_unmap_rxdma_channel( 3400678453a8Sspeer nxgep, channel, rbr_ring, rcr_ring, mailbox); 34016f45ec7bSml29623 3402678453a8Sspeer nxge_free_rxb(nxgep, channel); 34036f45ec7bSml29623 3404678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 34056f45ec7bSml29623 } 34066f45ec7bSml29623 34076f45ec7bSml29623 nxge_status_t 34086f45ec7bSml29623 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 34096f45ec7bSml29623 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 34106f45ec7bSml29623 uint32_t num_chunks, 34116f45ec7bSml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 34126f45ec7bSml29623 p_rx_mbox_t *rx_mbox_p) 34136f45ec7bSml29623 { 34146f45ec7bSml29623 int status = NXGE_OK; 34156f45ec7bSml29623 34166f45ec7bSml29623 /* 34176f45ec7bSml29623 * Set up and prepare buffer blocks, descriptors 34186f45ec7bSml29623 * and mailbox. 34196f45ec7bSml29623 */ 34206f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34216f45ec7bSml29623 "==> nxge_map_rxdma_channel (channel %d)", channel)); 34226f45ec7bSml29623 /* 34236f45ec7bSml29623 * Receive buffer blocks 34246f45ec7bSml29623 */ 34256f45ec7bSml29623 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 34266f45ec7bSml29623 dma_buf_p, rbr_p, num_chunks); 34276f45ec7bSml29623 if (status != NXGE_OK) { 34286f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34296f45ec7bSml29623 "==> nxge_map_rxdma_channel (channel %d): " 34306f45ec7bSml29623 "map buffer failed 0x%x", channel, status)); 34316f45ec7bSml29623 goto nxge_map_rxdma_channel_exit; 34326f45ec7bSml29623 } 34336f45ec7bSml29623 34346f45ec7bSml29623 /* 34356f45ec7bSml29623 * Receive block ring, completion ring and mailbox. 34366f45ec7bSml29623 */ 34376f45ec7bSml29623 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 34386f45ec7bSml29623 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 34396f45ec7bSml29623 if (status != NXGE_OK) { 34406f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34416f45ec7bSml29623 "==> nxge_map_rxdma_channel (channel %d): " 34426f45ec7bSml29623 "map config failed 0x%x", channel, status)); 34436f45ec7bSml29623 goto nxge_map_rxdma_channel_fail2; 34446f45ec7bSml29623 } 34456f45ec7bSml29623 34466f45ec7bSml29623 goto nxge_map_rxdma_channel_exit; 34476f45ec7bSml29623 34486f45ec7bSml29623 nxge_map_rxdma_channel_fail3: 34496f45ec7bSml29623 /* Free rbr, rcr */ 34506f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34516f45ec7bSml29623 "==> nxge_map_rxdma_channel: free rbr/rcr " 34526f45ec7bSml29623 "(status 0x%x channel %d)", 34536f45ec7bSml29623 status, channel)); 34546f45ec7bSml29623 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 34556f45ec7bSml29623 *rcr_p, *rx_mbox_p); 34566f45ec7bSml29623 34576f45ec7bSml29623 nxge_map_rxdma_channel_fail2: 34586f45ec7bSml29623 /* Free buffer blocks */ 34596f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34606f45ec7bSml29623 "==> nxge_map_rxdma_channel: free rx buffers" 34616f45ec7bSml29623 "(nxgep 0x%x status 0x%x channel %d)", 34626f45ec7bSml29623 nxgep, status, channel)); 34636f45ec7bSml29623 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 34646f45ec7bSml29623 346556d930aeSspeer status = NXGE_ERROR; 346656d930aeSspeer 34676f45ec7bSml29623 nxge_map_rxdma_channel_exit: 34686f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34696f45ec7bSml29623 "<== nxge_map_rxdma_channel: " 34706f45ec7bSml29623 "(nxgep 0x%x status 0x%x channel %d)", 34716f45ec7bSml29623 nxgep, status, channel)); 34726f45ec7bSml29623 34736f45ec7bSml29623 return (status); 34746f45ec7bSml29623 } 34756f45ec7bSml29623 34766f45ec7bSml29623 /*ARGSUSED*/ 34776f45ec7bSml29623 static void 34786f45ec7bSml29623 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 34796f45ec7bSml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 34806f45ec7bSml29623 { 34816f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34826f45ec7bSml29623 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 34836f45ec7bSml29623 34846f45ec7bSml29623 /* 34856f45ec7bSml29623 * unmap receive block ring, completion ring and mailbox. 34866f45ec7bSml29623 */ 34876f45ec7bSml29623 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 34886f45ec7bSml29623 rcr_p, rx_mbox_p); 34896f45ec7bSml29623 34906f45ec7bSml29623 /* unmap buffer blocks */ 34916f45ec7bSml29623 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 34926f45ec7bSml29623 34936f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 34946f45ec7bSml29623 } 34956f45ec7bSml29623 34966f45ec7bSml29623 /*ARGSUSED*/ 34976f45ec7bSml29623 static nxge_status_t 34986f45ec7bSml29623 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 34996f45ec7bSml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 35006f45ec7bSml29623 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 35016f45ec7bSml29623 { 35026f45ec7bSml29623 p_rx_rbr_ring_t rbrp; 35036f45ec7bSml29623 p_rx_rcr_ring_t rcrp; 35046f45ec7bSml29623 p_rx_mbox_t mboxp; 35056f45ec7bSml29623 p_nxge_dma_common_t cntl_dmap; 35066f45ec7bSml29623 p_nxge_dma_common_t dmap; 35076f45ec7bSml29623 p_rx_msg_t *rx_msg_ring; 35086f45ec7bSml29623 p_rx_msg_t rx_msg_p; 35096f45ec7bSml29623 p_rbr_cfig_a_t rcfga_p; 35106f45ec7bSml29623 p_rbr_cfig_b_t rcfgb_p; 35116f45ec7bSml29623 p_rcrcfig_a_t cfga_p; 35126f45ec7bSml29623 p_rcrcfig_b_t cfgb_p; 35136f45ec7bSml29623 p_rxdma_cfig1_t cfig1_p; 35146f45ec7bSml29623 p_rxdma_cfig2_t cfig2_p; 35156f45ec7bSml29623 p_rbr_kick_t kick_p; 35166f45ec7bSml29623 uint32_t dmaaddrp; 35176f45ec7bSml29623 uint32_t *rbr_vaddrp; 35186f45ec7bSml29623 uint32_t bkaddr; 35196f45ec7bSml29623 nxge_status_t status = NXGE_OK; 35206f45ec7bSml29623 int i; 35216f45ec7bSml29623 uint32_t nxge_port_rcr_size; 35226f45ec7bSml29623 35236f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35246f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring")); 35256f45ec7bSml29623 35266f45ec7bSml29623 cntl_dmap = *dma_cntl_p; 35276f45ec7bSml29623 35286f45ec7bSml29623 /* Map in the receive block ring */ 35296f45ec7bSml29623 rbrp = *rbr_p; 35306f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 35316f45ec7bSml29623 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 35326f45ec7bSml29623 /* 35336f45ec7bSml29623 * Zero out buffer block ring descriptors. 35346f45ec7bSml29623 */ 35356f45ec7bSml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 35366f45ec7bSml29623 35376f45ec7bSml29623 rcfga_p = &(rbrp->rbr_cfga); 35386f45ec7bSml29623 rcfgb_p = &(rbrp->rbr_cfgb); 35396f45ec7bSml29623 kick_p = &(rbrp->rbr_kick); 35406f45ec7bSml29623 rcfga_p->value = 0; 35416f45ec7bSml29623 rcfgb_p->value = 0; 35426f45ec7bSml29623 kick_p->value = 0; 35436f45ec7bSml29623 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 35446f45ec7bSml29623 rcfga_p->value = (rbrp->rbr_addr & 35456f45ec7bSml29623 (RBR_CFIG_A_STDADDR_MASK | 35466f45ec7bSml29623 RBR_CFIG_A_STDADDR_BASE_MASK)); 35476f45ec7bSml29623 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 35486f45ec7bSml29623 35496f45ec7bSml29623 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 35506f45ec7bSml29623 rcfgb_p->bits.ldw.vld0 = 1; 35516f45ec7bSml29623 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 35526f45ec7bSml29623 rcfgb_p->bits.ldw.vld1 = 1; 35536f45ec7bSml29623 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 35546f45ec7bSml29623 rcfgb_p->bits.ldw.vld2 = 1; 35556f45ec7bSml29623 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 35566f45ec7bSml29623 35576f45ec7bSml29623 /* 35586f45ec7bSml29623 * For each buffer block, enter receive block address to the ring. 35596f45ec7bSml29623 */ 35606f45ec7bSml29623 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 35616f45ec7bSml29623 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 35626f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35636f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 35646f45ec7bSml29623 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 35656f45ec7bSml29623 35666f45ec7bSml29623 rx_msg_ring = rbrp->rx_msg_ring; 35676f45ec7bSml29623 for (i = 0; i < rbrp->tnblocks; i++) { 35686f45ec7bSml29623 rx_msg_p = rx_msg_ring[i]; 35696f45ec7bSml29623 rx_msg_p->nxgep = nxgep; 35706f45ec7bSml29623 rx_msg_p->rx_rbr_p = rbrp; 35716f45ec7bSml29623 bkaddr = (uint32_t) 35726f45ec7bSml29623 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 35736f45ec7bSml29623 >> RBR_BKADDR_SHIFT)); 35746f45ec7bSml29623 rx_msg_p->free = B_FALSE; 35756f45ec7bSml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 35766f45ec7bSml29623 35776f45ec7bSml29623 *rbr_vaddrp++ = bkaddr; 35786f45ec7bSml29623 } 35796f45ec7bSml29623 35806f45ec7bSml29623 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 35816f45ec7bSml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 35826f45ec7bSml29623 35836f45ec7bSml29623 rbrp->rbr_rd_index = 0; 35846f45ec7bSml29623 35856f45ec7bSml29623 rbrp->rbr_consumed = 0; 35866f45ec7bSml29623 rbrp->rbr_use_bcopy = B_TRUE; 35876f45ec7bSml29623 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 35886f45ec7bSml29623 /* 35896f45ec7bSml29623 * Do bcopy on packets greater than bcopy size once 35906f45ec7bSml29623 * the lo threshold is reached. 35916f45ec7bSml29623 * This lo threshold should be less than the hi threshold. 35926f45ec7bSml29623 * 35936f45ec7bSml29623 * Do bcopy on every packet once the hi threshold is reached. 35946f45ec7bSml29623 */ 35956f45ec7bSml29623 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 35966f45ec7bSml29623 /* default it to use hi */ 35976f45ec7bSml29623 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 35986f45ec7bSml29623 } 35996f45ec7bSml29623 36006f45ec7bSml29623 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 36016f45ec7bSml29623 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 36026f45ec7bSml29623 } 36036f45ec7bSml29623 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 36046f45ec7bSml29623 36056f45ec7bSml29623 switch (nxge_rx_threshold_hi) { 36066f45ec7bSml29623 default: 36076f45ec7bSml29623 case NXGE_RX_COPY_NONE: 36086f45ec7bSml29623 /* Do not do bcopy at all */ 36096f45ec7bSml29623 rbrp->rbr_use_bcopy = B_FALSE; 36106f45ec7bSml29623 rbrp->rbr_threshold_hi = rbrp->rbb_max; 36116f45ec7bSml29623 break; 36126f45ec7bSml29623 36136f45ec7bSml29623 case NXGE_RX_COPY_1: 36146f45ec7bSml29623 case NXGE_RX_COPY_2: 36156f45ec7bSml29623 case NXGE_RX_COPY_3: 36166f45ec7bSml29623 case NXGE_RX_COPY_4: 36176f45ec7bSml29623 case NXGE_RX_COPY_5: 36186f45ec7bSml29623 case NXGE_RX_COPY_6: 36196f45ec7bSml29623 case NXGE_RX_COPY_7: 36206f45ec7bSml29623 rbrp->rbr_threshold_hi = 36216f45ec7bSml29623 rbrp->rbb_max * 36226f45ec7bSml29623 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 36236f45ec7bSml29623 break; 36246f45ec7bSml29623 36256f45ec7bSml29623 case NXGE_RX_COPY_ALL: 36266f45ec7bSml29623 rbrp->rbr_threshold_hi = 0; 36276f45ec7bSml29623 break; 36286f45ec7bSml29623 } 36296f45ec7bSml29623 36306f45ec7bSml29623 switch (nxge_rx_threshold_lo) { 36316f45ec7bSml29623 default: 36326f45ec7bSml29623 case NXGE_RX_COPY_NONE: 36336f45ec7bSml29623 /* Do not do bcopy at all */ 36346f45ec7bSml29623 if (rbrp->rbr_use_bcopy) { 36356f45ec7bSml29623 rbrp->rbr_use_bcopy = B_FALSE; 36366f45ec7bSml29623 } 36376f45ec7bSml29623 rbrp->rbr_threshold_lo = rbrp->rbb_max; 36386f45ec7bSml29623 break; 36396f45ec7bSml29623 36406f45ec7bSml29623 case NXGE_RX_COPY_1: 36416f45ec7bSml29623 case NXGE_RX_COPY_2: 36426f45ec7bSml29623 case NXGE_RX_COPY_3: 36436f45ec7bSml29623 case NXGE_RX_COPY_4: 36446f45ec7bSml29623 case NXGE_RX_COPY_5: 36456f45ec7bSml29623 case NXGE_RX_COPY_6: 36466f45ec7bSml29623 case NXGE_RX_COPY_7: 36476f45ec7bSml29623 rbrp->rbr_threshold_lo = 36486f45ec7bSml29623 rbrp->rbb_max * 36496f45ec7bSml29623 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 36506f45ec7bSml29623 break; 36516f45ec7bSml29623 36526f45ec7bSml29623 case NXGE_RX_COPY_ALL: 36536f45ec7bSml29623 rbrp->rbr_threshold_lo = 0; 36546f45ec7bSml29623 break; 36556f45ec7bSml29623 } 36566f45ec7bSml29623 36576f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 36586f45ec7bSml29623 "nxge_map_rxdma_channel_cfg_ring: channel %d " 36596f45ec7bSml29623 "rbb_max %d " 36606f45ec7bSml29623 "rbrp->rbr_bufsize_type %d " 36616f45ec7bSml29623 "rbb_threshold_hi %d " 36626f45ec7bSml29623 "rbb_threshold_lo %d", 36636f45ec7bSml29623 dma_channel, 36646f45ec7bSml29623 rbrp->rbb_max, 36656f45ec7bSml29623 rbrp->rbr_bufsize_type, 36666f45ec7bSml29623 rbrp->rbr_threshold_hi, 36676f45ec7bSml29623 rbrp->rbr_threshold_lo)); 36686f45ec7bSml29623 36696f45ec7bSml29623 rbrp->page_valid.value = 0; 36706f45ec7bSml29623 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 36716f45ec7bSml29623 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 36726f45ec7bSml29623 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 36736f45ec7bSml29623 rbrp->page_hdl.value = 0; 36746f45ec7bSml29623 36756f45ec7bSml29623 rbrp->page_valid.bits.ldw.page0 = 1; 36766f45ec7bSml29623 rbrp->page_valid.bits.ldw.page1 = 1; 36776f45ec7bSml29623 36786f45ec7bSml29623 /* Map in the receive completion ring */ 36796f45ec7bSml29623 rcrp = (p_rx_rcr_ring_t) 36806f45ec7bSml29623 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 36816f45ec7bSml29623 rcrp->rdc = dma_channel; 36826f45ec7bSml29623 36836f45ec7bSml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 36846f45ec7bSml29623 rcrp->comp_size = nxge_port_rcr_size; 36856f45ec7bSml29623 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 36866f45ec7bSml29623 36876f45ec7bSml29623 rcrp->max_receive_pkts = nxge_max_rx_pkts; 36886f45ec7bSml29623 36896f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 36906f45ec7bSml29623 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 36916f45ec7bSml29623 sizeof (rcr_entry_t)); 36926f45ec7bSml29623 rcrp->comp_rd_index = 0; 36936f45ec7bSml29623 rcrp->comp_wt_index = 0; 36946f45ec7bSml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 36956f45ec7bSml29623 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3696adfcba55Sjoycey #if defined(__i386) 369752ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3698adfcba55Sjoycey (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3699adfcba55Sjoycey #else 370052ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 37016f45ec7bSml29623 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3702adfcba55Sjoycey #endif 37036f45ec7bSml29623 37046f45ec7bSml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 37056f45ec7bSml29623 (nxge_port_rcr_size - 1); 37066f45ec7bSml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 37076f45ec7bSml29623 (nxge_port_rcr_size - 1); 37086f45ec7bSml29623 37096f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37106f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring: " 37116f45ec7bSml29623 "channel %d " 37126f45ec7bSml29623 "rbr_vaddrp $%p " 37136f45ec7bSml29623 "rcr_desc_rd_head_p $%p " 37146f45ec7bSml29623 "rcr_desc_rd_head_pp $%p " 37156f45ec7bSml29623 "rcr_desc_rd_last_p $%p " 37166f45ec7bSml29623 "rcr_desc_rd_last_pp $%p ", 37176f45ec7bSml29623 dma_channel, 37186f45ec7bSml29623 rbr_vaddrp, 37196f45ec7bSml29623 rcrp->rcr_desc_rd_head_p, 37206f45ec7bSml29623 rcrp->rcr_desc_rd_head_pp, 37216f45ec7bSml29623 rcrp->rcr_desc_last_p, 37226f45ec7bSml29623 rcrp->rcr_desc_last_pp)); 37236f45ec7bSml29623 37246f45ec7bSml29623 /* 37256f45ec7bSml29623 * Zero out buffer block ring descriptors. 37266f45ec7bSml29623 */ 37276f45ec7bSml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3728*7b26d9ffSSantwona Behera 3729*7b26d9ffSSantwona Behera rcrp->intr_timeout = (nxgep->intr_timeout < 3730*7b26d9ffSSantwona Behera NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3731*7b26d9ffSSantwona Behera nxgep->intr_timeout; 3732*7b26d9ffSSantwona Behera 3733*7b26d9ffSSantwona Behera rcrp->intr_threshold = (nxgep->intr_threshold < 3734*7b26d9ffSSantwona Behera NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3735*7b26d9ffSSantwona Behera nxgep->intr_threshold; 3736*7b26d9ffSSantwona Behera 37376f45ec7bSml29623 rcrp->full_hdr_flag = B_FALSE; 37386f45ec7bSml29623 rcrp->sw_priv_hdr_len = 0; 37396f45ec7bSml29623 37406f45ec7bSml29623 cfga_p = &(rcrp->rcr_cfga); 37416f45ec7bSml29623 cfgb_p = &(rcrp->rcr_cfgb); 37426f45ec7bSml29623 cfga_p->value = 0; 37436f45ec7bSml29623 cfgb_p->value = 0; 37446f45ec7bSml29623 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 37456f45ec7bSml29623 cfga_p->value = (rcrp->rcr_addr & 37466f45ec7bSml29623 (RCRCFIG_A_STADDR_MASK | 37476f45ec7bSml29623 RCRCFIG_A_STADDR_BASE_MASK)); 37486f45ec7bSml29623 37496f45ec7bSml29623 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 37506f45ec7bSml29623 RCRCFIG_A_LEN_SHIF); 37516f45ec7bSml29623 37526f45ec7bSml29623 /* 37536f45ec7bSml29623 * Timeout should be set based on the system clock divider. 3754*7b26d9ffSSantwona Behera * A timeout value of 1 assumes that the 37556f45ec7bSml29623 * granularity (1000) is 3 microseconds running at 300MHz. 37566f45ec7bSml29623 */ 37576f45ec7bSml29623 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 37586f45ec7bSml29623 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 37596f45ec7bSml29623 cfgb_p->bits.ldw.entout = 1; 37606f45ec7bSml29623 37616f45ec7bSml29623 /* Map in the mailbox */ 37626f45ec7bSml29623 mboxp = (p_rx_mbox_t) 37636f45ec7bSml29623 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 37646f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 37656f45ec7bSml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 37666f45ec7bSml29623 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 37676f45ec7bSml29623 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 37686f45ec7bSml29623 cfig1_p->value = cfig2_p->value = 0; 37696f45ec7bSml29623 37706f45ec7bSml29623 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 37716f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37726f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring: " 37736f45ec7bSml29623 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 37746f45ec7bSml29623 dma_channel, cfig1_p->value, cfig2_p->value, 37756f45ec7bSml29623 mboxp->mbox_addr)); 37766f45ec7bSml29623 37776f45ec7bSml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 37786f45ec7bSml29623 & 0xfff); 37796f45ec7bSml29623 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 37806f45ec7bSml29623 37816f45ec7bSml29623 37826f45ec7bSml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 37836f45ec7bSml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 37846f45ec7bSml29623 RXDMA_CFIG2_MBADDR_L_MASK); 37856f45ec7bSml29623 37866f45ec7bSml29623 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 37876f45ec7bSml29623 37886f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37896f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring: " 37906f45ec7bSml29623 "channel %d damaddrp $%p " 37916f45ec7bSml29623 "cfg1 0x%016llx cfig2 0x%016llx", 37926f45ec7bSml29623 dma_channel, dmaaddrp, 37936f45ec7bSml29623 cfig1_p->value, cfig2_p->value)); 37946f45ec7bSml29623 37956f45ec7bSml29623 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 37966f45ec7bSml29623 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 37976f45ec7bSml29623 37986f45ec7bSml29623 rbrp->rx_rcr_p = rcrp; 37996f45ec7bSml29623 rcrp->rx_rbr_p = rbrp; 38006f45ec7bSml29623 *rcr_p = rcrp; 38016f45ec7bSml29623 *rx_mbox_p = mboxp; 38026f45ec7bSml29623 38036f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38046f45ec7bSml29623 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 38056f45ec7bSml29623 38066f45ec7bSml29623 return (status); 38076f45ec7bSml29623 } 38086f45ec7bSml29623 38096f45ec7bSml29623 /*ARGSUSED*/ 38106f45ec7bSml29623 static void 38116f45ec7bSml29623 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 38126f45ec7bSml29623 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 38136f45ec7bSml29623 { 38146f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38156f45ec7bSml29623 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 38166f45ec7bSml29623 rcr_p->rdc)); 38176f45ec7bSml29623 38186f45ec7bSml29623 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 38196f45ec7bSml29623 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 38206f45ec7bSml29623 38216f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38226f45ec7bSml29623 "<== nxge_unmap_rxdma_channel_cfg_ring")); 38236f45ec7bSml29623 } 38246f45ec7bSml29623 38256f45ec7bSml29623 static nxge_status_t 38266f45ec7bSml29623 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 38276f45ec7bSml29623 p_nxge_dma_common_t *dma_buf_p, 38286f45ec7bSml29623 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 38296f45ec7bSml29623 { 38306f45ec7bSml29623 p_rx_rbr_ring_t rbrp; 38316f45ec7bSml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 38326f45ec7bSml29623 p_rx_msg_t *rx_msg_ring; 38336f45ec7bSml29623 p_rx_msg_t rx_msg_p; 38346f45ec7bSml29623 p_mblk_t mblk_p; 38356f45ec7bSml29623 38366f45ec7bSml29623 rxring_info_t *ring_info; 38376f45ec7bSml29623 nxge_status_t status = NXGE_OK; 38386f45ec7bSml29623 int i, j, index; 38396f45ec7bSml29623 uint32_t size, bsize, nblocks, nmsgs; 38406f45ec7bSml29623 38416f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38426f45ec7bSml29623 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 38436f45ec7bSml29623 channel)); 38446f45ec7bSml29623 38456f45ec7bSml29623 dma_bufp = tmp_bufp = *dma_buf_p; 38466f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38476f45ec7bSml29623 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 38486f45ec7bSml29623 "chunks bufp 0x%016llx", 38496f45ec7bSml29623 channel, num_chunks, dma_bufp)); 38506f45ec7bSml29623 38516f45ec7bSml29623 nmsgs = 0; 38526f45ec7bSml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 38536f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38546f45ec7bSml29623 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 38556f45ec7bSml29623 "bufp 0x%016llx nblocks %d nmsgs %d", 38566f45ec7bSml29623 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 38576f45ec7bSml29623 nmsgs += tmp_bufp->nblocks; 38586f45ec7bSml29623 } 38596f45ec7bSml29623 if (!nmsgs) { 386056d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 38616f45ec7bSml29623 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 38626f45ec7bSml29623 "no msg blocks", 38636f45ec7bSml29623 channel)); 38646f45ec7bSml29623 status = NXGE_ERROR; 38656f45ec7bSml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 38666f45ec7bSml29623 } 38676f45ec7bSml29623 3868007969e0Stm144005 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 38696f45ec7bSml29623 38706f45ec7bSml29623 size = nmsgs * sizeof (p_rx_msg_t); 38716f45ec7bSml29623 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 38726f45ec7bSml29623 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 38736f45ec7bSml29623 KM_SLEEP); 38746f45ec7bSml29623 38756f45ec7bSml29623 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 38766f45ec7bSml29623 (void *)nxgep->interrupt_cookie); 38776f45ec7bSml29623 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 38786f45ec7bSml29623 (void *)nxgep->interrupt_cookie); 38796f45ec7bSml29623 rbrp->rdc = channel; 38806f45ec7bSml29623 rbrp->num_blocks = num_chunks; 38816f45ec7bSml29623 rbrp->tnblocks = nmsgs; 38826f45ec7bSml29623 rbrp->rbb_max = nmsgs; 38836f45ec7bSml29623 rbrp->rbr_max_size = nmsgs; 38846f45ec7bSml29623 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 38856f45ec7bSml29623 38866f45ec7bSml29623 /* 38876f45ec7bSml29623 * Buffer sizes suggested by NIU architect. 38886f45ec7bSml29623 * 256, 512 and 2K. 38896f45ec7bSml29623 */ 38906f45ec7bSml29623 38916f45ec7bSml29623 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 38926f45ec7bSml29623 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 38936f45ec7bSml29623 rbrp->npi_pkt_buf_size0 = SIZE_256B; 38946f45ec7bSml29623 38956f45ec7bSml29623 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 38966f45ec7bSml29623 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 38976f45ec7bSml29623 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 38986f45ec7bSml29623 38996f45ec7bSml29623 rbrp->block_size = nxgep->rx_default_block_size; 39006f45ec7bSml29623 39016f45ec7bSml29623 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 39026f45ec7bSml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 39036f45ec7bSml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 39046f45ec7bSml29623 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 39056f45ec7bSml29623 } else { 39066f45ec7bSml29623 if (rbrp->block_size >= 0x2000) { 39076f45ec7bSml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 39086f45ec7bSml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 39096f45ec7bSml29623 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 39106f45ec7bSml29623 } else { 39116f45ec7bSml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 39126f45ec7bSml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 39136f45ec7bSml29623 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 39146f45ec7bSml29623 } 39156f45ec7bSml29623 } 39166f45ec7bSml29623 39176f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39186f45ec7bSml29623 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 39196f45ec7bSml29623 "actual rbr max %d rbb_max %d nmsgs %d " 39206f45ec7bSml29623 "rbrp->block_size %d default_block_size %d " 39216f45ec7bSml29623 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 39226f45ec7bSml29623 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 39236f45ec7bSml29623 rbrp->block_size, nxgep->rx_default_block_size, 39246f45ec7bSml29623 nxge_rbr_size, nxge_rbr_spare_size)); 39256f45ec7bSml29623 39266f45ec7bSml29623 /* Map in buffers from the buffer pool. */ 39276f45ec7bSml29623 index = 0; 39286f45ec7bSml29623 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 39296f45ec7bSml29623 bsize = dma_bufp->block_size; 39306f45ec7bSml29623 nblocks = dma_bufp->nblocks; 3931adfcba55Sjoycey #if defined(__i386) 3932adfcba55Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3933adfcba55Sjoycey #else 39346f45ec7bSml29623 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3935adfcba55Sjoycey #endif 39366f45ec7bSml29623 ring_info->buffer[i].buf_index = i; 39376f45ec7bSml29623 ring_info->buffer[i].buf_size = dma_bufp->alength; 39386f45ec7bSml29623 ring_info->buffer[i].start_index = index; 3939adfcba55Sjoycey #if defined(__i386) 3940adfcba55Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3941adfcba55Sjoycey #else 39426f45ec7bSml29623 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3943adfcba55Sjoycey #endif 39446f45ec7bSml29623 39456f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39466f45ec7bSml29623 " nxge_map_rxdma_channel_buf_ring: map channel %d " 39476f45ec7bSml29623 "chunk %d" 39486f45ec7bSml29623 " nblocks %d chunk_size %x block_size 0x%x " 39496f45ec7bSml29623 "dma_bufp $%p", channel, i, 39506f45ec7bSml29623 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 39516f45ec7bSml29623 dma_bufp)); 39526f45ec7bSml29623 39536f45ec7bSml29623 for (j = 0; j < nblocks; j++) { 39546f45ec7bSml29623 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 39556f45ec7bSml29623 dma_bufp)) == NULL) { 395656d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 395756d930aeSspeer "allocb failed (index %d i %d j %d)", 395856d930aeSspeer index, i, j)); 395956d930aeSspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 39606f45ec7bSml29623 } 39616f45ec7bSml29623 rx_msg_ring[index] = rx_msg_p; 39626f45ec7bSml29623 rx_msg_p->block_index = index; 39636f45ec7bSml29623 rx_msg_p->shifted_addr = (uint32_t) 39646f45ec7bSml29623 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 39656f45ec7bSml29623 RBR_BKADDR_SHIFT)); 39666f45ec7bSml29623 39676f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 396856d930aeSspeer "index %d j %d rx_msg_p $%p mblk %p", 396956d930aeSspeer index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 39706f45ec7bSml29623 39716f45ec7bSml29623 mblk_p = rx_msg_p->rx_mblk_p; 39726f45ec7bSml29623 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3973007969e0Stm144005 3974007969e0Stm144005 rbrp->rbr_ref_cnt++; 39756f45ec7bSml29623 index++; 39766f45ec7bSml29623 rx_msg_p->buf_dma.dma_channel = channel; 39776f45ec7bSml29623 } 3978678453a8Sspeer 3979678453a8Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3980678453a8Sspeer if (dma_bufp->contig_alloc_type) { 3981678453a8Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3982678453a8Sspeer } 3983678453a8Sspeer 3984678453a8Sspeer if (dma_bufp->kmem_alloc_type) { 3985678453a8Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC; 3986678453a8Sspeer } 3987678453a8Sspeer 3988678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3989678453a8Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d " 3990678453a8Sspeer "chunk %d" 3991678453a8Sspeer " nblocks %d chunk_size %x block_size 0x%x " 3992678453a8Sspeer "dma_bufp $%p", 3993678453a8Sspeer channel, i, 3994678453a8Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3995678453a8Sspeer dma_bufp)); 39966f45ec7bSml29623 } 39976f45ec7bSml29623 if (i < rbrp->num_blocks) { 39986f45ec7bSml29623 goto nxge_map_rxdma_channel_buf_ring_fail1; 39996f45ec7bSml29623 } 40006f45ec7bSml29623 40016f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40026f45ec7bSml29623 "nxge_map_rxdma_channel_buf_ring: done buf init " 40036f45ec7bSml29623 "channel %d msg block entries %d", 40046f45ec7bSml29623 channel, index)); 40056f45ec7bSml29623 ring_info->block_size_mask = bsize - 1; 40066f45ec7bSml29623 rbrp->rx_msg_ring = rx_msg_ring; 40076f45ec7bSml29623 rbrp->dma_bufp = dma_buf_p; 40086f45ec7bSml29623 rbrp->ring_info = ring_info; 40096f45ec7bSml29623 40106f45ec7bSml29623 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 40116f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40126f45ec7bSml29623 " nxge_map_rxdma_channel_buf_ring: " 40136f45ec7bSml29623 "channel %d done buf info init", channel)); 40146f45ec7bSml29623 4015007969e0Stm144005 /* 4016007969e0Stm144005 * Finally, permit nxge_freeb() to call nxge_post_page(). 4017007969e0Stm144005 */ 4018007969e0Stm144005 rbrp->rbr_state = RBR_POSTING; 4019007969e0Stm144005 40206f45ec7bSml29623 *rbr_p = rbrp; 40216f45ec7bSml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 40226f45ec7bSml29623 40236f45ec7bSml29623 nxge_map_rxdma_channel_buf_ring_fail1: 40246f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40256f45ec7bSml29623 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 40266f45ec7bSml29623 channel, status)); 40276f45ec7bSml29623 40286f45ec7bSml29623 index--; 40296f45ec7bSml29623 for (; index >= 0; index--) { 40306f45ec7bSml29623 rx_msg_p = rx_msg_ring[index]; 40316f45ec7bSml29623 if (rx_msg_p != NULL) { 40326f45ec7bSml29623 freeb(rx_msg_p->rx_mblk_p); 40336f45ec7bSml29623 rx_msg_ring[index] = NULL; 40346f45ec7bSml29623 } 40356f45ec7bSml29623 } 40366f45ec7bSml29623 nxge_map_rxdma_channel_buf_ring_fail: 40376f45ec7bSml29623 MUTEX_DESTROY(&rbrp->post_lock); 40386f45ec7bSml29623 MUTEX_DESTROY(&rbrp->lock); 40396f45ec7bSml29623 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 40406f45ec7bSml29623 KMEM_FREE(rx_msg_ring, size); 40416f45ec7bSml29623 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 40426f45ec7bSml29623 404356d930aeSspeer status = NXGE_ERROR; 404456d930aeSspeer 40456f45ec7bSml29623 nxge_map_rxdma_channel_buf_ring_exit: 40466f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40476f45ec7bSml29623 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 40486f45ec7bSml29623 40496f45ec7bSml29623 return (status); 40506f45ec7bSml29623 } 40516f45ec7bSml29623 40526f45ec7bSml29623 /*ARGSUSED*/ 40536f45ec7bSml29623 static void 40546f45ec7bSml29623 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 40556f45ec7bSml29623 p_rx_rbr_ring_t rbr_p) 40566f45ec7bSml29623 { 40576f45ec7bSml29623 p_rx_msg_t *rx_msg_ring; 40586f45ec7bSml29623 p_rx_msg_t rx_msg_p; 40596f45ec7bSml29623 rxring_info_t *ring_info; 40606f45ec7bSml29623 int i; 40616f45ec7bSml29623 uint32_t size; 40626f45ec7bSml29623 #ifdef NXGE_DEBUG 40636f45ec7bSml29623 int num_chunks; 40646f45ec7bSml29623 #endif 40656f45ec7bSml29623 40666f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40676f45ec7bSml29623 "==> nxge_unmap_rxdma_channel_buf_ring")); 40686f45ec7bSml29623 if (rbr_p == NULL) { 40696f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 40706f45ec7bSml29623 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 40716f45ec7bSml29623 return; 40726f45ec7bSml29623 } 40736f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40746f45ec7bSml29623 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 40756f45ec7bSml29623 rbr_p->rdc)); 40766f45ec7bSml29623 40776f45ec7bSml29623 rx_msg_ring = rbr_p->rx_msg_ring; 40786f45ec7bSml29623 ring_info = rbr_p->ring_info; 40796f45ec7bSml29623 40806f45ec7bSml29623 if (rx_msg_ring == NULL || ring_info == NULL) { 40816f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40826f45ec7bSml29623 "<== nxge_unmap_rxdma_channel_buf_ring: " 40836f45ec7bSml29623 "rx_msg_ring $%p ring_info $%p", 40846f45ec7bSml29623 rx_msg_p, ring_info)); 40856f45ec7bSml29623 return; 40866f45ec7bSml29623 } 40876f45ec7bSml29623 40886f45ec7bSml29623 #ifdef NXGE_DEBUG 40896f45ec7bSml29623 num_chunks = rbr_p->num_blocks; 40906f45ec7bSml29623 #endif 40916f45ec7bSml29623 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 40926f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40936f45ec7bSml29623 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 40946f45ec7bSml29623 "tnblocks %d (max %d) size ptrs %d ", 40956f45ec7bSml29623 rbr_p->rdc, num_chunks, 40966f45ec7bSml29623 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 40976f45ec7bSml29623 40986f45ec7bSml29623 for (i = 0; i < rbr_p->tnblocks; i++) { 40996f45ec7bSml29623 rx_msg_p = rx_msg_ring[i]; 41006f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41016f45ec7bSml29623 " nxge_unmap_rxdma_channel_buf_ring: " 41026f45ec7bSml29623 "rx_msg_p $%p", 41036f45ec7bSml29623 rx_msg_p)); 41046f45ec7bSml29623 if (rx_msg_p != NULL) { 41056f45ec7bSml29623 freeb(rx_msg_p->rx_mblk_p); 41066f45ec7bSml29623 rx_msg_ring[i] = NULL; 41076f45ec7bSml29623 } 41086f45ec7bSml29623 } 41096f45ec7bSml29623 4110007969e0Stm144005 /* 4111007969e0Stm144005 * We no longer may use the mutex <post_lock>. By setting 4112007969e0Stm144005 * <rbr_state> to anything but POSTING, we prevent 4113007969e0Stm144005 * nxge_post_page() from accessing a dead mutex. 4114007969e0Stm144005 */ 4115007969e0Stm144005 rbr_p->rbr_state = RBR_UNMAPPING; 41166f45ec7bSml29623 MUTEX_DESTROY(&rbr_p->post_lock); 4117007969e0Stm144005 41186f45ec7bSml29623 MUTEX_DESTROY(&rbr_p->lock); 4119007969e0Stm144005 4120007969e0Stm144005 if (rbr_p->rbr_ref_cnt == 0) { 4121678453a8Sspeer /* 4122678453a8Sspeer * This is the normal state of affairs. 4123678453a8Sspeer * Need to free the following buffers: 4124678453a8Sspeer * - data buffers 4125678453a8Sspeer * - rx_msg ring 4126678453a8Sspeer * - ring_info 4127678453a8Sspeer * - rbr ring 4128678453a8Sspeer */ 4129678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 4130678453a8Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4131678453a8Sspeer nxge_rxdma_databuf_free(rbr_p); 4132678453a8Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4133678453a8Sspeer KMEM_FREE(rx_msg_ring, size); 4134007969e0Stm144005 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4135007969e0Stm144005 } else { 4136007969e0Stm144005 /* 4137007969e0Stm144005 * Some of our buffers are still being used. 4138007969e0Stm144005 * Therefore, tell nxge_freeb() this ring is 4139007969e0Stm144005 * unmapped, so it may free <rbr_p> for us. 4140007969e0Stm144005 */ 4141007969e0Stm144005 rbr_p->rbr_state = RBR_UNMAPPED; 4142007969e0Stm144005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4143007969e0Stm144005 "unmap_rxdma_buf_ring: %d %s outstanding.", 4144007969e0Stm144005 rbr_p->rbr_ref_cnt, 4145007969e0Stm144005 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4146007969e0Stm144005 } 41476f45ec7bSml29623 41486f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41496f45ec7bSml29623 "<== nxge_unmap_rxdma_channel_buf_ring")); 41506f45ec7bSml29623 } 41516f45ec7bSml29623 4152678453a8Sspeer /* 4153678453a8Sspeer * nxge_rxdma_hw_start_common 4154678453a8Sspeer * 4155678453a8Sspeer * Arguments: 4156678453a8Sspeer * nxgep 4157678453a8Sspeer * 4158678453a8Sspeer * Notes: 4159678453a8Sspeer * 4160678453a8Sspeer * NPI/NXGE function calls: 4161678453a8Sspeer * nxge_init_fzc_rx_common(); 4162678453a8Sspeer * nxge_init_fzc_rxdma_port(); 4163678453a8Sspeer * 4164678453a8Sspeer * Registers accessed: 4165678453a8Sspeer * 4166678453a8Sspeer * Context: 4167678453a8Sspeer * Service domain 4168678453a8Sspeer */ 41696f45ec7bSml29623 static nxge_status_t 41706f45ec7bSml29623 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 41716f45ec7bSml29623 { 41726f45ec7bSml29623 nxge_status_t status = NXGE_OK; 41736f45ec7bSml29623 41746f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41756f45ec7bSml29623 41766f45ec7bSml29623 /* 41776f45ec7bSml29623 * Load the sharable parameters by writing to the 41786f45ec7bSml29623 * function zero control registers. These FZC registers 41796f45ec7bSml29623 * should be initialized only once for the entire chip. 41806f45ec7bSml29623 */ 41816f45ec7bSml29623 (void) nxge_init_fzc_rx_common(nxgep); 41826f45ec7bSml29623 41836f45ec7bSml29623 /* 41846f45ec7bSml29623 * Initialize the RXDMA port specific FZC control configurations. 41856f45ec7bSml29623 * These FZC registers are pertaining to each port. 41866f45ec7bSml29623 */ 41876f45ec7bSml29623 (void) nxge_init_fzc_rxdma_port(nxgep); 41886f45ec7bSml29623 41896f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41906f45ec7bSml29623 41916f45ec7bSml29623 return (status); 41926f45ec7bSml29623 } 41936f45ec7bSml29623 41946f45ec7bSml29623 static nxge_status_t 4195678453a8Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 41966f45ec7bSml29623 { 41976f45ec7bSml29623 int i, ndmas; 41986f45ec7bSml29623 p_rx_rbr_rings_t rx_rbr_rings; 41996f45ec7bSml29623 p_rx_rbr_ring_t *rbr_rings; 42006f45ec7bSml29623 p_rx_rcr_rings_t rx_rcr_rings; 42016f45ec7bSml29623 p_rx_rcr_ring_t *rcr_rings; 42026f45ec7bSml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 42036f45ec7bSml29623 p_rx_mbox_t *rx_mbox_p; 42046f45ec7bSml29623 nxge_status_t status = NXGE_OK; 42056f45ec7bSml29623 42066f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 42076f45ec7bSml29623 42086f45ec7bSml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 42096f45ec7bSml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 42106f45ec7bSml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 42116f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42126f45ec7bSml29623 "<== nxge_rxdma_hw_start: NULL ring pointers")); 42136f45ec7bSml29623 return (NXGE_ERROR); 42146f45ec7bSml29623 } 42156f45ec7bSml29623 ndmas = rx_rbr_rings->ndmas; 42166f45ec7bSml29623 if (ndmas == 0) { 42176f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42186f45ec7bSml29623 "<== nxge_rxdma_hw_start: no dma channel allocated")); 42196f45ec7bSml29623 return (NXGE_ERROR); 42206f45ec7bSml29623 } 42216f45ec7bSml29623 42226f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42236f45ec7bSml29623 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 42246f45ec7bSml29623 42256f45ec7bSml29623 rbr_rings = rx_rbr_rings->rbr_rings; 42266f45ec7bSml29623 rcr_rings = rx_rcr_rings->rcr_rings; 42276f45ec7bSml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 42286f45ec7bSml29623 if (rx_mbox_areas_p) { 42296f45ec7bSml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 42306f45ec7bSml29623 } 42316f45ec7bSml29623 4232678453a8Sspeer i = channel; 42336f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42346f45ec7bSml29623 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 42356f45ec7bSml29623 ndmas, channel)); 42366f45ec7bSml29623 status = nxge_rxdma_start_channel(nxgep, channel, 42376f45ec7bSml29623 (p_rx_rbr_ring_t)rbr_rings[i], 42386f45ec7bSml29623 (p_rx_rcr_ring_t)rcr_rings[i], 42396f45ec7bSml29623 (p_rx_mbox_t)rx_mbox_p[i]); 42406f45ec7bSml29623 if (status != NXGE_OK) { 4241678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4242678453a8Sspeer "==> nxge_rxdma_hw_start: disable " 4243678453a8Sspeer "(status 0x%x channel %d)", status, channel)); 4244678453a8Sspeer return (status); 42456f45ec7bSml29623 } 42466f45ec7bSml29623 42476f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 42486f45ec7bSml29623 "rx_rbr_rings 0x%016llx rings 0x%016llx", 42496f45ec7bSml29623 rx_rbr_rings, rx_rcr_rings)); 42506f45ec7bSml29623 42516f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42526f45ec7bSml29623 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 42536f45ec7bSml29623 42546f45ec7bSml29623 return (status); 42556f45ec7bSml29623 } 42566f45ec7bSml29623 42576f45ec7bSml29623 static void 4258678453a8Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 42596f45ec7bSml29623 { 42606f45ec7bSml29623 p_rx_rbr_rings_t rx_rbr_rings; 42616f45ec7bSml29623 p_rx_rcr_rings_t rx_rcr_rings; 42626f45ec7bSml29623 42636f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 42646f45ec7bSml29623 42656f45ec7bSml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 42666f45ec7bSml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 42676f45ec7bSml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 42686f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42696f45ec7bSml29623 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 42706f45ec7bSml29623 return; 42716f45ec7bSml29623 } 42726f45ec7bSml29623 42736f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4274678453a8Sspeer "==> nxge_rxdma_hw_stop(channel %d)", 4275678453a8Sspeer channel)); 42766f45ec7bSml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 42776f45ec7bSml29623 42786f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 42796f45ec7bSml29623 "rx_rbr_rings 0x%016llx rings 0x%016llx", 42806f45ec7bSml29623 rx_rbr_rings, rx_rcr_rings)); 42816f45ec7bSml29623 42826f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 42836f45ec7bSml29623 } 42846f45ec7bSml29623 42856f45ec7bSml29623 42866f45ec7bSml29623 static nxge_status_t 42876f45ec7bSml29623 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 42886f45ec7bSml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 42896f45ec7bSml29623 42906f45ec7bSml29623 { 42916f45ec7bSml29623 npi_handle_t handle; 42926f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 42936f45ec7bSml29623 rx_dma_ctl_stat_t cs; 42946f45ec7bSml29623 rx_dma_ent_msk_t ent_mask; 42956f45ec7bSml29623 nxge_status_t status = NXGE_OK; 42966f45ec7bSml29623 42976f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 42986f45ec7bSml29623 42996f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 43006f45ec7bSml29623 43016f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 43026f45ec7bSml29623 "npi handle addr $%p acc $%p", 43036f45ec7bSml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 43046f45ec7bSml29623 4305678453a8Sspeer /* Reset RXDMA channel, but not if you're a guest. */ 4306678453a8Sspeer if (!isLDOMguest(nxgep)) { 43076f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 43086f45ec7bSml29623 if (rs != NPI_SUCCESS) { 43096f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4310678453a8Sspeer "==> nxge_init_fzc_rdc: " 4311678453a8Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4312678453a8Sspeer channel, rs)); 43136f45ec7bSml29623 return (NXGE_ERROR | rs); 43146f45ec7bSml29623 } 43156f45ec7bSml29623 43166f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43176f45ec7bSml29623 "==> nxge_rxdma_start_channel: reset done: channel %d", 43186f45ec7bSml29623 channel)); 4319678453a8Sspeer } 4320678453a8Sspeer 4321678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4322678453a8Sspeer if (isLDOMguest(nxgep)) 4323678453a8Sspeer (void) nxge_rdc_lp_conf(nxgep, channel); 4324678453a8Sspeer #endif 43256f45ec7bSml29623 43266f45ec7bSml29623 /* 43276f45ec7bSml29623 * Initialize the RXDMA channel specific FZC control 43286f45ec7bSml29623 * configurations. These FZC registers are pertaining 43296f45ec7bSml29623 * to each RX channel (logical pages). 43306f45ec7bSml29623 */ 4331678453a8Sspeer if (!isLDOMguest(nxgep)) { 4332678453a8Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel); 43336f45ec7bSml29623 if (status != NXGE_OK) { 43346f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43356f45ec7bSml29623 "==> nxge_rxdma_start_channel: " 43366f45ec7bSml29623 "init fzc rxdma failed (0x%08x channel %d)", 43376f45ec7bSml29623 status, channel)); 43386f45ec7bSml29623 return (status); 43396f45ec7bSml29623 } 43406f45ec7bSml29623 43416f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43426f45ec7bSml29623 "==> nxge_rxdma_start_channel: fzc done")); 4343678453a8Sspeer } 43446f45ec7bSml29623 43456f45ec7bSml29623 /* Set up the interrupt event masks. */ 43466f45ec7bSml29623 ent_mask.value = 0; 43476f45ec7bSml29623 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 43486f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 43496f45ec7bSml29623 &ent_mask); 43506f45ec7bSml29623 if (rs != NPI_SUCCESS) { 43516f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43526f45ec7bSml29623 "==> nxge_rxdma_start_channel: " 4353678453a8Sspeer "init rxdma event masks failed " 4354678453a8Sspeer "(0x%08x channel %d)", 43556f45ec7bSml29623 status, channel)); 43566f45ec7bSml29623 return (NXGE_ERROR | rs); 43576f45ec7bSml29623 } 43586f45ec7bSml29623 4359678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4360678453a8Sspeer "==> nxge_rxdma_start_channel: " 43616f45ec7bSml29623 "event done: channel %d (mask 0x%016llx)", 43626f45ec7bSml29623 channel, ent_mask.value)); 43636f45ec7bSml29623 43646f45ec7bSml29623 /* Initialize the receive DMA control and status register */ 43656f45ec7bSml29623 cs.value = 0; 43666f45ec7bSml29623 cs.bits.hdw.mex = 1; 43676f45ec7bSml29623 cs.bits.hdw.rcrthres = 1; 43686f45ec7bSml29623 cs.bits.hdw.rcrto = 1; 43696f45ec7bSml29623 cs.bits.hdw.rbr_empty = 1; 43706f45ec7bSml29623 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 43716f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43726f45ec7bSml29623 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 43736f45ec7bSml29623 if (status != NXGE_OK) { 43746f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43756f45ec7bSml29623 "==> nxge_rxdma_start_channel: " 43766f45ec7bSml29623 "init rxdma control register failed (0x%08x channel %d", 43776f45ec7bSml29623 status, channel)); 43786f45ec7bSml29623 return (status); 43796f45ec7bSml29623 } 43806f45ec7bSml29623 43816f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43826f45ec7bSml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 43836f45ec7bSml29623 43846f45ec7bSml29623 /* 43856f45ec7bSml29623 * Load RXDMA descriptors, buffers, mailbox, 43866f45ec7bSml29623 * initialise the receive DMA channels and 43876f45ec7bSml29623 * enable each DMA channel. 43886f45ec7bSml29623 */ 43896f45ec7bSml29623 status = nxge_enable_rxdma_channel(nxgep, 43906f45ec7bSml29623 channel, rbr_p, rcr_p, mbox_p); 43916f45ec7bSml29623 43926f45ec7bSml29623 if (status != NXGE_OK) { 43936f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43946f45ec7bSml29623 " nxge_rxdma_start_channel: " 4395678453a8Sspeer " enable rxdma failed (0x%08x channel %d)", 43966f45ec7bSml29623 status, channel)); 43976f45ec7bSml29623 return (status); 43986f45ec7bSml29623 } 43996f45ec7bSml29623 4400678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4401678453a8Sspeer "==> nxge_rxdma_start_channel: enabled channel %d")); 4402678453a8Sspeer 4403678453a8Sspeer if (isLDOMguest(nxgep)) { 4404678453a8Sspeer /* Add interrupt handler for this channel. */ 4405678453a8Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4406678453a8Sspeer != NXGE_OK) { 4407678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4408678453a8Sspeer " nxge_rxdma_start_channel: " 4409678453a8Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)", 4410678453a8Sspeer status, channel)); 4411678453a8Sspeer } 4412678453a8Sspeer } 4413678453a8Sspeer 44146f45ec7bSml29623 ent_mask.value = 0; 44156f45ec7bSml29623 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 44166f45ec7bSml29623 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 44176f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 44186f45ec7bSml29623 &ent_mask); 44196f45ec7bSml29623 if (rs != NPI_SUCCESS) { 44206f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 44216f45ec7bSml29623 "==> nxge_rxdma_start_channel: " 44226f45ec7bSml29623 "init rxdma event masks failed (0x%08x channel %d)", 44236f45ec7bSml29623 status, channel)); 44246f45ec7bSml29623 return (NXGE_ERROR | rs); 44256f45ec7bSml29623 } 44266f45ec7bSml29623 44276f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 44286f45ec7bSml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 44296f45ec7bSml29623 44306f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 44316f45ec7bSml29623 44326f45ec7bSml29623 return (NXGE_OK); 44336f45ec7bSml29623 } 44346f45ec7bSml29623 44356f45ec7bSml29623 static nxge_status_t 44366f45ec7bSml29623 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 44376f45ec7bSml29623 { 44386f45ec7bSml29623 npi_handle_t handle; 44396f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 44406f45ec7bSml29623 rx_dma_ctl_stat_t cs; 44416f45ec7bSml29623 rx_dma_ent_msk_t ent_mask; 44426f45ec7bSml29623 nxge_status_t status = NXGE_OK; 44436f45ec7bSml29623 44446f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 44456f45ec7bSml29623 44466f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 44476f45ec7bSml29623 44486f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 44496f45ec7bSml29623 "npi handle addr $%p acc $%p", 44506f45ec7bSml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 44516f45ec7bSml29623 4452330cd344SMichael Speer if (!isLDOMguest(nxgep)) { 4453330cd344SMichael Speer /* 4454330cd344SMichael Speer * Stop RxMAC = A.9.2.6 4455330cd344SMichael Speer */ 4456330cd344SMichael Speer if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4457330cd344SMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4458330cd344SMichael Speer "nxge_rxdma_stop_channel: " 4459330cd344SMichael Speer "Failed to disable RxMAC")); 4460330cd344SMichael Speer } 4461330cd344SMichael Speer 4462330cd344SMichael Speer /* 4463330cd344SMichael Speer * Drain IPP Port = A.9.3.6 4464330cd344SMichael Speer */ 4465330cd344SMichael Speer (void) nxge_ipp_drain(nxgep); 4466330cd344SMichael Speer } 4467330cd344SMichael Speer 44686f45ec7bSml29623 /* Reset RXDMA channel */ 44696f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 44706f45ec7bSml29623 if (rs != NPI_SUCCESS) { 44716f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44726f45ec7bSml29623 " nxge_rxdma_stop_channel: " 44736f45ec7bSml29623 " reset rxdma failed (0x%08x channel %d)", 44746f45ec7bSml29623 rs, channel)); 44756f45ec7bSml29623 return (NXGE_ERROR | rs); 44766f45ec7bSml29623 } 44776f45ec7bSml29623 44786f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44796f45ec7bSml29623 "==> nxge_rxdma_stop_channel: reset done")); 44806f45ec7bSml29623 44816f45ec7bSml29623 /* Set up the interrupt event masks. */ 44826f45ec7bSml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 44836f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 44846f45ec7bSml29623 &ent_mask); 44856f45ec7bSml29623 if (rs != NPI_SUCCESS) { 44866f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44876f45ec7bSml29623 "==> nxge_rxdma_stop_channel: " 44886f45ec7bSml29623 "set rxdma event masks failed (0x%08x channel %d)", 44896f45ec7bSml29623 rs, channel)); 44906f45ec7bSml29623 return (NXGE_ERROR | rs); 44916f45ec7bSml29623 } 44926f45ec7bSml29623 44936f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44946f45ec7bSml29623 "==> nxge_rxdma_stop_channel: event done")); 44956f45ec7bSml29623 4496330cd344SMichael Speer /* 4497330cd344SMichael Speer * Initialize the receive DMA control and status register 4498330cd344SMichael Speer */ 44996f45ec7bSml29623 cs.value = 0; 4500330cd344SMichael Speer status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 45016f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 45026f45ec7bSml29623 " to default (all 0s) 0x%08x", cs.value)); 45036f45ec7bSml29623 if (status != NXGE_OK) { 45046f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45056f45ec7bSml29623 " nxge_rxdma_stop_channel: init rxdma" 45066f45ec7bSml29623 " control register failed (0x%08x channel %d", 45076f45ec7bSml29623 status, channel)); 45086f45ec7bSml29623 return (status); 45096f45ec7bSml29623 } 45106f45ec7bSml29623 45116f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 45126f45ec7bSml29623 "==> nxge_rxdma_stop_channel: control done")); 45136f45ec7bSml29623 4514330cd344SMichael Speer /* 4515330cd344SMichael Speer * Make sure channel is disabled. 4516330cd344SMichael Speer */ 45176f45ec7bSml29623 status = nxge_disable_rxdma_channel(nxgep, channel); 4518da14cebeSEric Cheng 45196f45ec7bSml29623 if (status != NXGE_OK) { 45206f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45216f45ec7bSml29623 " nxge_rxdma_stop_channel: " 45226f45ec7bSml29623 " init enable rxdma failed (0x%08x channel %d)", 45236f45ec7bSml29623 status, channel)); 45246f45ec7bSml29623 return (status); 45256f45ec7bSml29623 } 45266f45ec7bSml29623 4527330cd344SMichael Speer if (!isLDOMguest(nxgep)) { 4528330cd344SMichael Speer /* 4529330cd344SMichael Speer * Enable RxMAC = A.9.2.10 4530330cd344SMichael Speer */ 4531330cd344SMichael Speer if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4532330cd344SMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4533330cd344SMichael Speer "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4534330cd344SMichael Speer } 4535330cd344SMichael Speer } 4536330cd344SMichael Speer 45376f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, 45386f45ec7bSml29623 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 45396f45ec7bSml29623 45406f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 45416f45ec7bSml29623 45426f45ec7bSml29623 return (NXGE_OK); 45436f45ec7bSml29623 } 45446f45ec7bSml29623 45456f45ec7bSml29623 nxge_status_t 45466f45ec7bSml29623 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 45476f45ec7bSml29623 { 45486f45ec7bSml29623 npi_handle_t handle; 45496f45ec7bSml29623 p_nxge_rdc_sys_stats_t statsp; 45506f45ec7bSml29623 rx_ctl_dat_fifo_stat_t stat; 45516f45ec7bSml29623 uint32_t zcp_err_status; 45526f45ec7bSml29623 uint32_t ipp_err_status; 45536f45ec7bSml29623 nxge_status_t status = NXGE_OK; 45546f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 45556f45ec7bSml29623 boolean_t my_err = B_FALSE; 45566f45ec7bSml29623 45576f45ec7bSml29623 handle = nxgep->npi_handle; 45586f45ec7bSml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 45596f45ec7bSml29623 45606f45ec7bSml29623 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 45616f45ec7bSml29623 45626f45ec7bSml29623 if (rs != NPI_SUCCESS) 45636f45ec7bSml29623 return (NXGE_ERROR | rs); 45646f45ec7bSml29623 45656f45ec7bSml29623 if (stat.bits.ldw.id_mismatch) { 45666f45ec7bSml29623 statsp->id_mismatch++; 45676f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 45686f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 45696f45ec7bSml29623 /* Global fatal error encountered */ 45706f45ec7bSml29623 } 45716f45ec7bSml29623 45726f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 45736f45ec7bSml29623 switch (nxgep->mac.portnum) { 45746f45ec7bSml29623 case 0: 45756f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 45766f45ec7bSml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 45776f45ec7bSml29623 my_err = B_TRUE; 45786f45ec7bSml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45796f45ec7bSml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45806f45ec7bSml29623 } 45816f45ec7bSml29623 break; 45826f45ec7bSml29623 case 1: 45836f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 45846f45ec7bSml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 45856f45ec7bSml29623 my_err = B_TRUE; 45866f45ec7bSml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45876f45ec7bSml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45886f45ec7bSml29623 } 45896f45ec7bSml29623 break; 45906f45ec7bSml29623 case 2: 45916f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 45926f45ec7bSml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 45936f45ec7bSml29623 my_err = B_TRUE; 45946f45ec7bSml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45956f45ec7bSml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45966f45ec7bSml29623 } 45976f45ec7bSml29623 break; 45986f45ec7bSml29623 case 3: 45996f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 46006f45ec7bSml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 46016f45ec7bSml29623 my_err = B_TRUE; 46026f45ec7bSml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 46036f45ec7bSml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 46046f45ec7bSml29623 } 46056f45ec7bSml29623 break; 46066f45ec7bSml29623 default: 46076f45ec7bSml29623 return (NXGE_ERROR); 46086f45ec7bSml29623 } 46096f45ec7bSml29623 } 46106f45ec7bSml29623 46116f45ec7bSml29623 if (my_err) { 46126f45ec7bSml29623 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 46136f45ec7bSml29623 zcp_err_status); 46146f45ec7bSml29623 if (status != NXGE_OK) 46156f45ec7bSml29623 return (status); 46166f45ec7bSml29623 } 46176f45ec7bSml29623 46186f45ec7bSml29623 return (NXGE_OK); 46196f45ec7bSml29623 } 46206f45ec7bSml29623 46216f45ec7bSml29623 static nxge_status_t 46226f45ec7bSml29623 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 46236f45ec7bSml29623 uint32_t zcp_status) 46246f45ec7bSml29623 { 46256f45ec7bSml29623 boolean_t rxport_fatal = B_FALSE; 46266f45ec7bSml29623 p_nxge_rdc_sys_stats_t statsp; 46276f45ec7bSml29623 nxge_status_t status = NXGE_OK; 46286f45ec7bSml29623 uint8_t portn; 46296f45ec7bSml29623 46306f45ec7bSml29623 portn = nxgep->mac.portnum; 46316f45ec7bSml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 46326f45ec7bSml29623 46336f45ec7bSml29623 if (ipp_status & (0x1 << portn)) { 46346f45ec7bSml29623 statsp->ipp_eop_err++; 46356f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 46366f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 46376f45ec7bSml29623 rxport_fatal = B_TRUE; 46386f45ec7bSml29623 } 46396f45ec7bSml29623 46406f45ec7bSml29623 if (zcp_status & (0x1 << portn)) { 46416f45ec7bSml29623 statsp->zcp_eop_err++; 46426f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 46436f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 46446f45ec7bSml29623 rxport_fatal = B_TRUE; 46456f45ec7bSml29623 } 46466f45ec7bSml29623 46476f45ec7bSml29623 if (rxport_fatal) { 46486f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46496f45ec7bSml29623 " nxge_rxdma_handle_port_error: " 46506f45ec7bSml29623 " fatal error on Port #%d\n", 46516f45ec7bSml29623 portn)); 46526f45ec7bSml29623 status = nxge_rx_port_fatal_err_recover(nxgep); 46536f45ec7bSml29623 if (status == NXGE_OK) { 46546f45ec7bSml29623 FM_SERVICE_RESTORED(nxgep); 46556f45ec7bSml29623 } 46566f45ec7bSml29623 } 46576f45ec7bSml29623 46586f45ec7bSml29623 return (status); 46596f45ec7bSml29623 } 46606f45ec7bSml29623 46616f45ec7bSml29623 static nxge_status_t 46626f45ec7bSml29623 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 46636f45ec7bSml29623 { 46646f45ec7bSml29623 npi_handle_t handle; 46656f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 46666f45ec7bSml29623 nxge_status_t status = NXGE_OK; 46676f45ec7bSml29623 p_rx_rbr_ring_t rbrp; 46686f45ec7bSml29623 p_rx_rcr_ring_t rcrp; 46696f45ec7bSml29623 p_rx_mbox_t mboxp; 46706f45ec7bSml29623 rx_dma_ent_msk_t ent_mask; 46716f45ec7bSml29623 p_nxge_dma_common_t dmap; 46726f45ec7bSml29623 int ring_idx; 46736f45ec7bSml29623 uint32_t ref_cnt; 46746f45ec7bSml29623 p_rx_msg_t rx_msg_p; 46756f45ec7bSml29623 int i; 46766f45ec7bSml29623 uint32_t nxge_port_rcr_size; 46776f45ec7bSml29623 46786f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 46796f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46806f45ec7bSml29623 "Recovering from RxDMAChannel#%d error...", channel)); 46816f45ec7bSml29623 46826f45ec7bSml29623 /* 46836f45ec7bSml29623 * Stop the dma channel waits for the stop done. 46846f45ec7bSml29623 * If the stop done bit is not set, then create 46856f45ec7bSml29623 * an error. 46866f45ec7bSml29623 */ 46876f45ec7bSml29623 46886f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 46896f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 46906f45ec7bSml29623 46916f45ec7bSml29623 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 46926f45ec7bSml29623 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 46936f45ec7bSml29623 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 46946f45ec7bSml29623 46956f45ec7bSml29623 MUTEX_ENTER(&rcrp->lock); 46966f45ec7bSml29623 MUTEX_ENTER(&rbrp->lock); 46976f45ec7bSml29623 MUTEX_ENTER(&rbrp->post_lock); 46986f45ec7bSml29623 46996f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 47006f45ec7bSml29623 47016f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 47026f45ec7bSml29623 if (rs != NPI_SUCCESS) { 47036f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47046f45ec7bSml29623 "nxge_disable_rxdma_channel:failed")); 47056f45ec7bSml29623 goto fail; 47066f45ec7bSml29623 } 47076f45ec7bSml29623 47086f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 47096f45ec7bSml29623 47106f45ec7bSml29623 /* Disable interrupt */ 47116f45ec7bSml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 47126f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 47136f45ec7bSml29623 if (rs != NPI_SUCCESS) { 47146f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47156f45ec7bSml29623 "nxge_rxdma_stop_channel: " 47166f45ec7bSml29623 "set rxdma event masks failed (channel %d)", 47176f45ec7bSml29623 channel)); 47186f45ec7bSml29623 } 47196f45ec7bSml29623 47206f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 47216f45ec7bSml29623 47226f45ec7bSml29623 /* Reset RXDMA channel */ 47236f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 47246f45ec7bSml29623 if (rs != NPI_SUCCESS) { 47256f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47266f45ec7bSml29623 "nxge_rxdma_fatal_err_recover: " 47276f45ec7bSml29623 " reset rxdma failed (channel %d)", channel)); 47286f45ec7bSml29623 goto fail; 47296f45ec7bSml29623 } 47306f45ec7bSml29623 47316f45ec7bSml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 47326f45ec7bSml29623 47336f45ec7bSml29623 mboxp = 47346f45ec7bSml29623 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 47356f45ec7bSml29623 47366f45ec7bSml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 47376f45ec7bSml29623 rbrp->rbr_rd_index = 0; 47386f45ec7bSml29623 47396f45ec7bSml29623 rcrp->comp_rd_index = 0; 47406f45ec7bSml29623 rcrp->comp_wt_index = 0; 47416f45ec7bSml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 47426f45ec7bSml29623 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4743adfcba55Sjoycey #if defined(__i386) 474452ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4745adfcba55Sjoycey (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4746adfcba55Sjoycey #else 474752ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 47486f45ec7bSml29623 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4749adfcba55Sjoycey #endif 47506f45ec7bSml29623 47516f45ec7bSml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 47526f45ec7bSml29623 (nxge_port_rcr_size - 1); 47536f45ec7bSml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 47546f45ec7bSml29623 (nxge_port_rcr_size - 1); 47556f45ec7bSml29623 47566f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 47576f45ec7bSml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 47586f45ec7bSml29623 47596f45ec7bSml29623 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 47606f45ec7bSml29623 47616f45ec7bSml29623 for (i = 0; i < rbrp->rbr_max_size; i++) { 47626f45ec7bSml29623 rx_msg_p = rbrp->rx_msg_ring[i]; 47636f45ec7bSml29623 ref_cnt = rx_msg_p->ref_cnt; 47646f45ec7bSml29623 if (ref_cnt != 1) { 47656f45ec7bSml29623 if (rx_msg_p->cur_usage_cnt != 47666f45ec7bSml29623 rx_msg_p->max_usage_cnt) { 47676f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47686f45ec7bSml29623 "buf[%d]: cur_usage_cnt = %d " 47696f45ec7bSml29623 "max_usage_cnt = %d\n", i, 47706f45ec7bSml29623 rx_msg_p->cur_usage_cnt, 47716f45ec7bSml29623 rx_msg_p->max_usage_cnt)); 47726f45ec7bSml29623 } else { 47736f45ec7bSml29623 /* Buffer can be re-posted */ 47746f45ec7bSml29623 rx_msg_p->free = B_TRUE; 47756f45ec7bSml29623 rx_msg_p->cur_usage_cnt = 0; 47766f45ec7bSml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 47776f45ec7bSml29623 rx_msg_p->pkt_buf_size = 0; 47786f45ec7bSml29623 } 47796f45ec7bSml29623 } 47806f45ec7bSml29623 } 47816f45ec7bSml29623 47826f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 47836f45ec7bSml29623 47846f45ec7bSml29623 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 47856f45ec7bSml29623 if (status != NXGE_OK) { 47866f45ec7bSml29623 goto fail; 47876f45ec7bSml29623 } 47886f45ec7bSml29623 47896f45ec7bSml29623 MUTEX_EXIT(&rbrp->post_lock); 47906f45ec7bSml29623 MUTEX_EXIT(&rbrp->lock); 47916f45ec7bSml29623 MUTEX_EXIT(&rcrp->lock); 47926f45ec7bSml29623 47936f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47946f45ec7bSml29623 "Recovery Successful, RxDMAChannel#%d Restored", 47956f45ec7bSml29623 channel)); 47966f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 47976f45ec7bSml29623 47986f45ec7bSml29623 return (NXGE_OK); 47996f45ec7bSml29623 fail: 48006f45ec7bSml29623 MUTEX_EXIT(&rbrp->post_lock); 48016f45ec7bSml29623 MUTEX_EXIT(&rbrp->lock); 48026f45ec7bSml29623 MUTEX_EXIT(&rcrp->lock); 48036f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 48046f45ec7bSml29623 48056f45ec7bSml29623 return (NXGE_ERROR | rs); 48066f45ec7bSml29623 } 48076f45ec7bSml29623 48086f45ec7bSml29623 nxge_status_t 48096f45ec7bSml29623 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 48106f45ec7bSml29623 { 4811678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 48126f45ec7bSml29623 nxge_status_t status = NXGE_OK; 4813678453a8Sspeer int rdc; 48146f45ec7bSml29623 48156f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 48166f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48176f45ec7bSml29623 "Recovering from RxPort error...")); 4818678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 48196f45ec7bSml29623 48206f45ec7bSml29623 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 48216f45ec7bSml29623 goto fail; 48226f45ec7bSml29623 48236f45ec7bSml29623 NXGE_DELAY(1000); 48246f45ec7bSml29623 4825678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 48266f45ec7bSml29623 4827678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4828678453a8Sspeer if ((1 << rdc) & set->owned.map) { 4829678453a8Sspeer if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4830678453a8Sspeer != NXGE_OK) { 48316f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4832678453a8Sspeer "Could not recover channel %d", rdc)); 4833678453a8Sspeer } 48346f45ec7bSml29623 } 48356f45ec7bSml29623 } 48366f45ec7bSml29623 4837678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 48386f45ec7bSml29623 48396f45ec7bSml29623 /* Reset IPP */ 48406f45ec7bSml29623 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 48416f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48426f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 48436f45ec7bSml29623 "Failed to reset IPP")); 48446f45ec7bSml29623 goto fail; 48456f45ec7bSml29623 } 48466f45ec7bSml29623 48476f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 48486f45ec7bSml29623 48496f45ec7bSml29623 /* Reset RxMAC */ 48506f45ec7bSml29623 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 48516f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48526f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 48536f45ec7bSml29623 "Failed to reset RxMAC")); 48546f45ec7bSml29623 goto fail; 48556f45ec7bSml29623 } 48566f45ec7bSml29623 48576f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 48586f45ec7bSml29623 48596f45ec7bSml29623 /* Re-Initialize IPP */ 48606f45ec7bSml29623 if (nxge_ipp_init(nxgep) != NXGE_OK) { 48616f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48626f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 48636f45ec7bSml29623 "Failed to init IPP")); 48646f45ec7bSml29623 goto fail; 48656f45ec7bSml29623 } 48666f45ec7bSml29623 48676f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 48686f45ec7bSml29623 48696f45ec7bSml29623 /* Re-Initialize RxMAC */ 48706f45ec7bSml29623 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 48716f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48726f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 48736f45ec7bSml29623 "Failed to reset RxMAC")); 48746f45ec7bSml29623 goto fail; 48756f45ec7bSml29623 } 48766f45ec7bSml29623 48776f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 48786f45ec7bSml29623 48796f45ec7bSml29623 /* Re-enable RxMAC */ 48806f45ec7bSml29623 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 48816f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48826f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 48836f45ec7bSml29623 "Failed to enable RxMAC")); 48846f45ec7bSml29623 goto fail; 48856f45ec7bSml29623 } 48866f45ec7bSml29623 48876f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48886f45ec7bSml29623 "Recovery Successful, RxPort Restored")); 48896f45ec7bSml29623 48906f45ec7bSml29623 return (NXGE_OK); 48916f45ec7bSml29623 fail: 48926f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 48936f45ec7bSml29623 return (status); 48946f45ec7bSml29623 } 48956f45ec7bSml29623 48966f45ec7bSml29623 void 48976f45ec7bSml29623 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 48986f45ec7bSml29623 { 48996f45ec7bSml29623 rx_dma_ctl_stat_t cs; 49006f45ec7bSml29623 rx_ctl_dat_fifo_stat_t cdfs; 49016f45ec7bSml29623 49026f45ec7bSml29623 switch (err_id) { 49036f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 49046f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 49056f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 49066f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 49076f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 49086f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 49096f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 49106f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 49116f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RCRINCON: 49126f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RCRFULL: 49136f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RBRFULL: 49146f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 49156f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 49166f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 49176f45ec7bSml29623 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 49186f45ec7bSml29623 chan, &cs.value); 49196f45ec7bSml29623 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 49206f45ec7bSml29623 cs.bits.hdw.rcr_ack_err = 1; 49216f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 49226f45ec7bSml29623 cs.bits.hdw.dc_fifo_err = 1; 49236f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 49246f45ec7bSml29623 cs.bits.hdw.rcr_sha_par = 1; 49256f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 49266f45ec7bSml29623 cs.bits.hdw.rbr_pre_par = 1; 49276f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 49286f45ec7bSml29623 cs.bits.hdw.rbr_tmout = 1; 49296f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 49306f45ec7bSml29623 cs.bits.hdw.rsp_cnt_err = 1; 49316f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 49326f45ec7bSml29623 cs.bits.hdw.byte_en_bus = 1; 49336f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 49346f45ec7bSml29623 cs.bits.hdw.rsp_dat_err = 1; 49356f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 49366f45ec7bSml29623 cs.bits.hdw.config_err = 1; 49376f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 49386f45ec7bSml29623 cs.bits.hdw.rcrincon = 1; 49396f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 49406f45ec7bSml29623 cs.bits.hdw.rcrfull = 1; 49416f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 49426f45ec7bSml29623 cs.bits.hdw.rbrfull = 1; 49436f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 49446f45ec7bSml29623 cs.bits.hdw.rbrlogpage = 1; 49456f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 49466f45ec7bSml29623 cs.bits.hdw.cfiglogpage = 1; 4947adfcba55Sjoycey #if defined(__i386) 4948adfcba55Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4949adfcba55Sjoycey cs.value); 4950adfcba55Sjoycey #else 49516f45ec7bSml29623 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 49526f45ec7bSml29623 cs.value); 4953adfcba55Sjoycey #endif 49546f45ec7bSml29623 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 49556f45ec7bSml29623 chan, cs.value); 49566f45ec7bSml29623 break; 49576f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 49586f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 49596f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 49606f45ec7bSml29623 cdfs.value = 0; 49616f45ec7bSml29623 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 49626f45ec7bSml29623 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 49636f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 49646f45ec7bSml29623 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 49656f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 49666f45ec7bSml29623 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4967adfcba55Sjoycey #if defined(__i386) 4968adfcba55Sjoycey cmn_err(CE_NOTE, 4969adfcba55Sjoycey "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4970adfcba55Sjoycey cdfs.value); 4971adfcba55Sjoycey #else 49726f45ec7bSml29623 cmn_err(CE_NOTE, 49736f45ec7bSml29623 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 49746f45ec7bSml29623 cdfs.value); 4975adfcba55Sjoycey #endif 4976678453a8Sspeer NXGE_REG_WR64(nxgep->npi_handle, 4977678453a8Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 49786f45ec7bSml29623 break; 49796f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 49806f45ec7bSml29623 break; 498153f3d8ecSyc148097 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 49826f45ec7bSml29623 break; 49836f45ec7bSml29623 } 49846f45ec7bSml29623 } 4985678453a8Sspeer 4986678453a8Sspeer static void 4987678453a8Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4988678453a8Sspeer { 4989678453a8Sspeer rxring_info_t *ring_info; 4990678453a8Sspeer int index; 4991678453a8Sspeer uint32_t chunk_size; 4992678453a8Sspeer uint64_t kaddr; 4993678453a8Sspeer uint_t num_blocks; 4994678453a8Sspeer 4995678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4996678453a8Sspeer 4997678453a8Sspeer if (rbr_p == NULL) { 4998678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4999678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 5000678453a8Sspeer return; 5001678453a8Sspeer } 5002678453a8Sspeer 5003678453a8Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 5004678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5005678453a8Sspeer "==> nxge_rxdma_databuf_free: DDI")); 5006678453a8Sspeer return; 5007678453a8Sspeer } 5008678453a8Sspeer 5009678453a8Sspeer ring_info = rbr_p->ring_info; 5010678453a8Sspeer if (ring_info == NULL) { 5011678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5012678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL ring info")); 5013678453a8Sspeer return; 5014678453a8Sspeer } 5015678453a8Sspeer num_blocks = rbr_p->num_blocks; 5016678453a8Sspeer for (index = 0; index < num_blocks; index++) { 5017678453a8Sspeer kaddr = ring_info->buffer[index].kaddr; 5018678453a8Sspeer chunk_size = ring_info->buffer[index].buf_size; 5019678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 5020678453a8Sspeer "==> nxge_rxdma_databuf_free: free chunk %d " 5021678453a8Sspeer "kaddrp $%p chunk size %d", 5022678453a8Sspeer index, kaddr, chunk_size)); 5023678453a8Sspeer if (kaddr == NULL) continue; 5024678453a8Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 5025678453a8Sspeer ring_info->buffer[index].kaddr = NULL; 5026678453a8Sspeer } 5027678453a8Sspeer 5028678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 5029678453a8Sspeer } 5030678453a8Sspeer 5031678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5032678453a8Sspeer extern void contig_mem_free(void *, size_t); 5033678453a8Sspeer #endif 5034678453a8Sspeer 5035678453a8Sspeer void 5036678453a8Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 5037678453a8Sspeer { 5038678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 5039678453a8Sspeer 5040678453a8Sspeer if (kaddr == NULL || !buf_size) { 5041678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5042678453a8Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d", 5043678453a8Sspeer kaddr, buf_size)); 5044678453a8Sspeer return; 5045678453a8Sspeer } 5046678453a8Sspeer 5047678453a8Sspeer switch (alloc_type) { 5048678453a8Sspeer case KMEM_ALLOC: 5049678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 5050678453a8Sspeer "==> nxge_free_buf: freeing kmem $%p size %d", 5051678453a8Sspeer kaddr, buf_size)); 5052678453a8Sspeer #if defined(__i386) 5053678453a8Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 5054678453a8Sspeer #else 5055678453a8Sspeer KMEM_FREE((void *)kaddr, buf_size); 5056678453a8Sspeer #endif 5057678453a8Sspeer break; 5058678453a8Sspeer 5059678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5060678453a8Sspeer case CONTIG_MEM_ALLOC: 5061678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 5062678453a8Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 5063678453a8Sspeer kaddr, buf_size)); 5064678453a8Sspeer contig_mem_free((void *)kaddr, buf_size); 5065678453a8Sspeer break; 5066678453a8Sspeer #endif 5067678453a8Sspeer 5068678453a8Sspeer default: 5069678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5070678453a8Sspeer "<== nxge_free_buf: unsupported alloc type %d", 5071678453a8Sspeer alloc_type)); 5072678453a8Sspeer return; 5073678453a8Sspeer } 5074678453a8Sspeer 5075678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5076678453a8Sspeer } 5077