1 /*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #ifdef DEV_NETMAP 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/eventhandler.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/selinfo.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <machine/bus.h> 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_media.h> 48 #include <net/if_var.h> 49 #include <net/if_clone.h> 50 #include <net/if_types.h> 51 #include <net/netmap.h> 52 #include <dev/netmap/netmap_kern.h> 53 54 #include "common/common.h" 55 #include "common/t4_regs.h" 56 #include "common/t4_regs_values.h" 57 58 extern int fl_pad; /* XXXNM */ 59 60 /* 61 * 0 = normal netmap rx 62 * 1 = black hole 63 * 2 = supermassive black hole (buffer packing enabled) 64 */ 65 int black_hole = 0; 66 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RWTUN, &black_hole, 0, 67 "Sink incoming packets."); 68 69 int rx_ndesc = 256; 70 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 71 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 72 73 int rx_nframes = 64; 74 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN, 75 &rx_nframes, 0, "max # of frames received before waking up netmap rx."); 76 77 int holdoff_tmr_idx = 2; 78 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 79 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 80 81 /* 82 * Congestion drops. 83 * -1: no congestion feedback (not recommended). 84 * 0: backpressure the channel instead of dropping packets right away. 85 * 1: no backpressure, drop packets for the congested queue immediately. 86 */ 87 static int nm_cong_drop = 1; 88 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_cong_drop, CTLFLAG_RWTUN, 89 &nm_cong_drop, 0, 90 "Congestion control for netmap rx queues (0 = backpressure, 1 = drop"); 91 92 int starve_fl = 0; 93 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN, 94 &starve_fl, 0, "Don't ring fl db for netmap rx queues."); 95 96 /* 97 * Try to process tx credits in bulk. This may cause a delay in the return of 98 * tx credits and is suitable for bursty or non-stop tx only. 99 */ 100 int lazy_tx_credit_flush = 1; 101 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN, 102 &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues."); 103 104 /* 105 * Split the netmap rx queues into two groups that populate separate halves of 106 * the RSS indirection table. This allows filters with hashmask to steer to a 107 * particular group of queues. 108 */ 109 static int nm_split_rss = 0; 110 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_split_rss, CTLFLAG_RWTUN, 111 &nm_split_rss, 0, "Split the netmap rx queues into two groups."); 112 113 /* 114 * netmap(4) says "netmap does not use features such as checksum offloading, TCP 115 * segmentation offloading, encryption, VLAN encapsulation/decapsulation, etc." 116 * but this knob can be used to get the hardware to checksum all tx traffic 117 * anyway. 118 */ 119 static int nm_txcsum = 0; 120 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_txcsum, CTLFLAG_RWTUN, 121 &nm_txcsum, 0, "Enable transmit checksum offloading."); 122 123 static int 124 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong) 125 { 126 int rc, cntxt_id, i; 127 __be32 v; 128 struct adapter *sc = vi->adapter; 129 struct sge_params *sp = &sc->params.sge; 130 struct netmap_adapter *na = NA(vi->ifp); 131 struct fw_iq_cmd c; 132 133 MPASS(na != NULL); 134 MPASS(nm_rxq->iq_desc != NULL); 135 MPASS(nm_rxq->fl_desc != NULL); 136 137 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); 138 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); 139 140 bzero(&c, sizeof(c)); 141 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 142 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 143 V_FW_IQ_CMD_VFN(0)); 144 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 145 FW_LEN16(c)); 146 MPASS(!forwarding_intr_to_fwq(sc)); 147 KASSERT(nm_rxq->intr_idx < sc->intr_count, 148 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); 149 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 150 c.type_to_iqandstindex = htobe32(v | 151 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 152 V_FW_IQ_CMD_VIID(vi->viid) | 153 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 154 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) | 155 F_FW_IQ_CMD_IQGTSMODE | 156 V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 157 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 158 c.iqsize = htobe16(vi->qsize_rxq); 159 c.iqaddr = htobe64(nm_rxq->iq_ba); 160 if (cong >= 0) { 161 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 162 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | 163 F_FW_IQ_CMD_FL0CONGEN); 164 } 165 c.iqns_to_fl0congen |= 166 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 167 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 168 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 169 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 170 c.fl0dcaen_to_fl0cidxfthresh = 171 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 172 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 173 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 174 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 175 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); 176 c.fl0addr = htobe64(nm_rxq->fl_ba); 177 178 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 179 if (rc != 0) { 180 device_printf(sc->dev, 181 "failed to create netmap ingress queue: %d\n", rc); 182 return (rc); 183 } 184 185 nm_rxq->iq_cidx = 0; 186 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); 187 nm_rxq->iq_gen = F_RSPD_GEN; 188 nm_rxq->iq_cntxt_id = be16toh(c.iqid); 189 nm_rxq->iq_abs_id = be16toh(c.physiqid); 190 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 191 if (cntxt_id >= sc->sge.niq) { 192 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 193 __func__, cntxt_id, sc->sge.niq - 1); 194 } 195 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 196 197 nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 198 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 199 MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 200 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 201 if (cntxt_id >= sc->sge.neq) { 202 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 203 __func__, cntxt_id, sc->sge.neq - 1); 204 } 205 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 206 207 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | 208 sc->chip_params->sge_fl_db; 209 210 if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) { 211 uint32_t param, val; 212 213 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 214 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 215 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 216 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 217 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 218 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 219 if (cong == 0) 220 val = 1 << 19; 221 else { 222 val = 2 << 19; 223 for (i = 0; i < 4; i++) { 224 if (cong & (1 << i)) 225 val |= 1 << (i << 2); 226 } 227 } 228 229 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 230 if (rc != 0) { 231 /* report error but carry on */ 232 device_printf(sc->dev, 233 "failed to set congestion manager context for " 234 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); 235 } 236 } 237 238 t4_write_reg(sc, sc->sge_gts_reg, 239 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 240 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 241 242 return (rc); 243 } 244 245 static int 246 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 247 { 248 struct adapter *sc = vi->adapter; 249 int rc; 250 251 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 252 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 253 if (rc != 0) 254 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 255 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 256 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 257 return (rc); 258 } 259 260 static int 261 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 262 { 263 int rc, cntxt_id; 264 size_t len; 265 struct adapter *sc = vi->adapter; 266 struct netmap_adapter *na = NA(vi->ifp); 267 struct fw_eq_eth_cmd c; 268 269 MPASS(na != NULL); 270 MPASS(nm_txq->desc != NULL); 271 272 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 273 bzero(nm_txq->desc, len); 274 275 bzero(&c, sizeof(c)); 276 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 277 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 278 V_FW_EQ_ETH_CMD_VFN(0)); 279 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 280 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 281 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 282 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 283 c.fetchszm_to_iqid = 284 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 285 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 286 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 287 c.dcaen_to_eqsize = 288 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 289 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 290 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 291 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 292 c.eqaddr = htobe64(nm_txq->ba); 293 294 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 295 if (rc != 0) { 296 device_printf(vi->dev, 297 "failed to create netmap egress queue: %d\n", rc); 298 return (rc); 299 } 300 301 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 302 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 303 if (cntxt_id >= sc->sge.neq) 304 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 305 cntxt_id, sc->sge.neq - 1); 306 sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 307 308 nm_txq->pidx = nm_txq->cidx = 0; 309 MPASS(nm_txq->sidx == na->num_tx_desc); 310 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 311 312 nm_txq->doorbells = sc->doorbells; 313 if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 314 isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 315 isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 316 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 317 uint32_t mask = (1 << s_qpp) - 1; 318 volatile uint8_t *udb; 319 320 udb = sc->udbs_base + UDBS_DB_OFFSET; 321 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 322 nm_txq->udb_qid = nm_txq->cntxt_id & mask; 323 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 324 clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 325 else { 326 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 327 nm_txq->udb_qid = 0; 328 } 329 nm_txq->udb = (volatile void *)udb; 330 } 331 332 return (rc); 333 } 334 335 static int 336 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 337 { 338 struct adapter *sc = vi->adapter; 339 int rc; 340 341 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 342 if (rc != 0) 343 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 344 nm_txq->cntxt_id, rc); 345 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 346 return (rc); 347 } 348 349 static int 350 cxgbe_netmap_simple_rss(struct adapter *sc, struct vi_info *vi, 351 struct ifnet *ifp, struct netmap_adapter *na) 352 { 353 struct netmap_kring *kring; 354 struct sge_nm_rxq *nm_rxq; 355 int rc, i, j, nm_state, defq; 356 uint16_t *rss; 357 358 /* 359 * Check if there's at least one active (or about to go active) netmap 360 * rx queue. 361 */ 362 defq = -1; 363 for_each_nm_rxq(vi, j, nm_rxq) { 364 nm_state = atomic_load_int(&nm_rxq->nm_state); 365 kring = na->rx_rings[nm_rxq->nid]; 366 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 367 (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 368 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 369 if (defq == -1) { 370 defq = nm_rxq->iq_abs_id; 371 break; 372 } 373 } 374 } 375 376 if (defq == -1) { 377 /* No active netmap queues. Switch back to NIC queues. */ 378 rss = vi->rss; 379 defq = vi->rss[0]; 380 } else { 381 for (i = 0; i < vi->rss_size;) { 382 for_each_nm_rxq(vi, j, nm_rxq) { 383 nm_state = atomic_load_int(&nm_rxq->nm_state); 384 kring = na->rx_rings[nm_rxq->nid]; 385 if ((nm_state != NM_OFF && 386 !nm_kring_pending_off(kring)) || 387 (nm_state == NM_OFF && 388 nm_kring_pending_on(kring))) { 389 MPASS(nm_rxq->iq_cntxt_id != 390 INVALID_NM_RXQ_CNTXT_ID); 391 vi->nm_rss[i++] = nm_rxq->iq_abs_id; 392 if (i == vi->rss_size) 393 break; 394 } 395 } 396 } 397 rss = vi->nm_rss; 398 } 399 400 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 401 vi->rss_size); 402 if (rc != 0) 403 if_printf(ifp, "netmap rss_config failed: %d\n", rc); 404 405 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 406 if (rc != 0) { 407 if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 408 } 409 410 return (rc); 411 } 412 413 /* 414 * Odd number of rx queues work best for split RSS mode as the first queue can 415 * be dedicated for non-RSS traffic and the rest divided into two equal halves. 416 */ 417 static int 418 cxgbe_netmap_split_rss(struct adapter *sc, struct vi_info *vi, 419 struct ifnet *ifp, struct netmap_adapter *na) 420 { 421 struct netmap_kring *kring; 422 struct sge_nm_rxq *nm_rxq; 423 int rc, i, j, nm_state, defq; 424 int nactive[2] = {0, 0}; 425 int dq[2] = {-1, -1}; 426 bool dq_norss; /* default queue should not be in RSS table. */ 427 428 MPASS(nm_split_rss != 0); 429 MPASS(vi->nnmrxq > 1); 430 431 for_each_nm_rxq(vi, i, nm_rxq) { 432 j = i / ((vi->nnmrxq + 1) / 2); 433 nm_state = atomic_load_int(&nm_rxq->nm_state); 434 kring = na->rx_rings[nm_rxq->nid]; 435 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 436 (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 437 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 438 nactive[j]++; 439 if (dq[j] == -1) { 440 dq[j] = nm_rxq->iq_abs_id; 441 break; 442 } 443 } 444 } 445 446 if (nactive[0] == 0 || nactive[1] == 0) 447 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 448 449 MPASS(dq[0] != -1 && dq[1] != -1); 450 if (nactive[0] > nactive[1]) { 451 defq = dq[0]; 452 dq_norss = true; 453 } else if (nactive[0] < nactive[1]) { 454 defq = dq[1]; 455 dq_norss = true; 456 } else { 457 defq = dq[0]; 458 dq_norss = false; 459 } 460 461 i = 0; 462 nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq]; 463 while (i < vi->rss_size / 2) { 464 for (j = 0; j < (vi->nnmrxq + 1) / 2; j++) { 465 nm_state = atomic_load_int(&nm_rxq[j].nm_state); 466 kring = na->rx_rings[nm_rxq[j].nid]; 467 if ((nm_state == NM_OFF && 468 !nm_kring_pending_on(kring)) || 469 (nm_state == NM_ON && 470 nm_kring_pending_off(kring))) { 471 continue; 472 } 473 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 474 if (dq_norss && defq == nm_rxq[j].iq_abs_id) 475 continue; 476 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 477 if (i == vi->rss_size / 2) 478 break; 479 } 480 } 481 while (i < vi->rss_size) { 482 for (j = (vi->nnmrxq + 1) / 2; j < vi->nnmrxq; j++) { 483 nm_state = atomic_load_int(&nm_rxq[j].nm_state); 484 kring = na->rx_rings[nm_rxq[j].nid]; 485 if ((nm_state == NM_OFF && 486 !nm_kring_pending_on(kring)) || 487 (nm_state == NM_ON && 488 nm_kring_pending_off(kring))) { 489 continue; 490 } 491 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 492 if (dq_norss && defq == nm_rxq[j].iq_abs_id) 493 continue; 494 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 495 if (i == vi->rss_size) 496 break; 497 } 498 } 499 500 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 501 vi->nm_rss, vi->rss_size); 502 if (rc != 0) 503 if_printf(ifp, "netmap split_rss_config failed: %d\n", rc); 504 505 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 506 if (rc != 0) 507 if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 508 509 return (rc); 510 } 511 512 static inline int 513 cxgbe_netmap_rss(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 514 struct netmap_adapter *na) 515 { 516 517 if (nm_split_rss == 0 || vi->nnmrxq == 1) 518 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 519 else 520 return (cxgbe_netmap_split_rss(sc, vi, ifp, na)); 521 } 522 523 static int 524 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 525 struct netmap_adapter *na) 526 { 527 struct netmap_slot *slot; 528 struct netmap_kring *kring; 529 struct sge_nm_rxq *nm_rxq; 530 struct sge_nm_txq *nm_txq; 531 int i, j, hwidx; 532 struct rx_buf_info *rxb; 533 534 ASSERT_SYNCHRONIZED_OP(sc); 535 MPASS(vi->nnmrxq > 0); 536 MPASS(vi->nnmtxq > 0); 537 538 if ((vi->flags & VI_INIT_DONE) == 0 || 539 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 540 return (EAGAIN); 541 542 rxb = &sc->sge.rx_buf_info[0]; 543 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 544 if (rxb->size1 == NETMAP_BUF_SIZE(na)) { 545 hwidx = rxb->hwidx1; 546 break; 547 } 548 if (rxb->size2 == NETMAP_BUF_SIZE(na)) { 549 hwidx = rxb->hwidx2; 550 break; 551 } 552 } 553 if (i >= SW_ZONE_SIZES) { 554 if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 555 NETMAP_BUF_SIZE(na)); 556 return (ENXIO); 557 } 558 559 /* Must set caps before calling netmap_reset */ 560 nm_set_native_flags(na); 561 562 for_each_nm_rxq(vi, i, nm_rxq) { 563 kring = na->rx_rings[nm_rxq->nid]; 564 if (!nm_kring_pending_on(kring) || 565 nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID) 566 continue; 567 568 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); 569 nm_rxq->fl_hwidx = hwidx; 570 slot = netmap_reset(na, NR_RX, i, 0); 571 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 572 573 /* We deal with 8 bufs at a time */ 574 MPASS((na->num_rx_desc & 7) == 0); 575 MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 576 for (j = 0; j < nm_rxq->fl_sidx; j++) { 577 uint64_t ba; 578 579 PNMB(na, &slot[j], &ba); 580 MPASS(ba != 0); 581 nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 582 } 583 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 584 MPASS((j & 7) == 0); 585 j /= 8; /* driver pidx to hardware pidx */ 586 wmb(); 587 t4_write_reg(sc, sc->sge_kdoorbell_reg, 588 nm_rxq->fl_db_val | V_PIDX(j)); 589 590 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON); 591 } 592 593 for_each_nm_txq(vi, i, nm_txq) { 594 kring = na->tx_rings[nm_txq->nid]; 595 if (!nm_kring_pending_on(kring) || 596 nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID) 597 continue; 598 599 alloc_nm_txq_hwq(vi, nm_txq); 600 slot = netmap_reset(na, NR_TX, i, 0); 601 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 602 } 603 604 if (vi->nm_rss == NULL) { 605 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, 606 M_ZERO | M_WAITOK); 607 } 608 609 return (cxgbe_netmap_rss(sc, vi, ifp, na)); 610 } 611 612 static int 613 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 614 struct netmap_adapter *na) 615 { 616 struct netmap_kring *kring; 617 int rc, i, nm_state, nactive; 618 struct sge_nm_txq *nm_txq; 619 struct sge_nm_rxq *nm_rxq; 620 621 ASSERT_SYNCHRONIZED_OP(sc); 622 MPASS(vi->nnmrxq > 0); 623 MPASS(vi->nnmtxq > 0); 624 625 if (!nm_netmap_on(na)) 626 return (0); 627 628 if ((vi->flags & VI_INIT_DONE) == 0) 629 return (0); 630 631 /* First remove the queues that are stopping from the RSS table. */ 632 rc = cxgbe_netmap_rss(sc, vi, ifp, na); 633 if (rc != 0) 634 return (rc); /* error message logged already. */ 635 636 for_each_nm_txq(vi, i, nm_txq) { 637 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 638 639 kring = na->tx_rings[nm_txq->nid]; 640 if (!nm_kring_pending_off(kring) || 641 nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) 642 continue; 643 644 /* Wait for hw pidx to catch up ... */ 645 while (be16toh(nm_txq->pidx) != spg->pidx) 646 pause("nmpidx", 1); 647 648 /* ... and then for the cidx. */ 649 while (spg->pidx != spg->cidx) 650 pause("nmcidx", 1); 651 652 free_nm_txq_hwq(vi, nm_txq); 653 654 /* XXX: netmap, not the driver, should do this. */ 655 kring->rhead = kring->rcur = kring->nr_hwcur = 0; 656 kring->rtail = kring->nr_hwtail = kring->nkr_num_slots - 1; 657 } 658 nactive = 0; 659 for_each_nm_rxq(vi, i, nm_rxq) { 660 nm_state = atomic_load_int(&nm_rxq->nm_state); 661 kring = na->rx_rings[nm_rxq->nid]; 662 if (nm_state != NM_OFF && !nm_kring_pending_off(kring)) 663 nactive++; 664 if (nm_state == NM_OFF || !nm_kring_pending_off(kring)) 665 continue; 666 667 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 668 while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF)) 669 pause("nmst", 1); 670 671 free_nm_rxq_hwq(vi, nm_rxq); 672 673 /* XXX: netmap, not the driver, should do this. */ 674 kring->rhead = kring->rcur = kring->nr_hwcur = 0; 675 kring->rtail = kring->nr_hwtail = 0; 676 } 677 netmap_krings_mode_commit(na, 0); 678 if (nactive == 0) 679 nm_clear_native_flags(na); 680 681 return (rc); 682 } 683 684 static int 685 cxgbe_netmap_reg(struct netmap_adapter *na, int on) 686 { 687 struct ifnet *ifp = na->ifp; 688 struct vi_info *vi = ifp->if_softc; 689 struct adapter *sc = vi->adapter; 690 int rc; 691 692 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); 693 if (rc != 0) 694 return (rc); 695 if (on) 696 rc = cxgbe_netmap_on(sc, vi, ifp, na); 697 else 698 rc = cxgbe_netmap_off(sc, vi, ifp, na); 699 end_synchronized_op(sc, 0); 700 701 return (rc); 702 } 703 704 /* How many packets can a single type1 WR carry in n descriptors */ 705 static inline int 706 ndesc_to_npkt(const int n) 707 { 708 709 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 710 711 return (n * 2 - 1); 712 } 713 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 714 715 /* 716 * Space (in descriptors) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 717 * carries n packets 718 */ 719 static inline int 720 npkt_to_ndesc(const int n) 721 { 722 723 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 724 725 return ((n + 2) / 2); 726 } 727 728 /* 729 * Space (in 16B units) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 730 * carries n packets 731 */ 732 static inline int 733 npkt_to_len16(const int n) 734 { 735 736 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 737 738 return (n * 2 + 1); 739 } 740 741 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 742 743 static void 744 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 745 { 746 int n; 747 u_int db = nm_txq->doorbells; 748 749 MPASS(nm_txq->pidx != nm_txq->dbidx); 750 751 n = NMIDXDIFF(nm_txq, dbidx); 752 if (n > 1) 753 clrbit(&db, DOORBELL_WCWR); 754 wmb(); 755 756 switch (ffs(db) - 1) { 757 case DOORBELL_UDB: 758 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 759 break; 760 761 case DOORBELL_WCWR: { 762 volatile uint64_t *dst, *src; 763 764 /* 765 * Queues whose 128B doorbell segment fits in the page do not 766 * use relative qid (udb_qid is always 0). Only queues with 767 * doorbell segments can do WCWR. 768 */ 769 KASSERT(nm_txq->udb_qid == 0 && n == 1, 770 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 771 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 772 773 dst = (volatile void *)((uintptr_t)nm_txq->udb + 774 UDBS_WR_OFFSET - UDBS_DB_OFFSET); 775 src = (void *)&nm_txq->desc[nm_txq->dbidx]; 776 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 777 *dst++ = *src++; 778 wmb(); 779 break; 780 } 781 782 case DOORBELL_UDBWC: 783 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 784 wmb(); 785 break; 786 787 case DOORBELL_KDB: 788 t4_write_reg(sc, sc->sge_kdoorbell_reg, 789 V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 790 break; 791 } 792 nm_txq->dbidx = nm_txq->pidx; 793 } 794 795 /* 796 * Write work requests to send 'npkt' frames and ring the doorbell to send them 797 * on their way. No need to check for wraparound. 798 */ 799 static void 800 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 801 struct netmap_kring *kring, int npkt, int npkt_remaining) 802 { 803 struct netmap_ring *ring = kring->ring; 804 struct netmap_slot *slot; 805 const u_int lim = kring->nkr_num_slots - 1; 806 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 807 uint16_t len; 808 uint64_t ba; 809 struct cpl_tx_pkt_core *cpl; 810 struct ulptx_sgl *usgl; 811 int i, n; 812 813 while (npkt) { 814 n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 815 len = 0; 816 817 wr = (void *)&nm_txq->desc[nm_txq->pidx]; 818 wr->op_pkd = nm_txq->op_pkd; 819 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 820 wr->npkt = n; 821 wr->r3 = 0; 822 wr->type = 1; 823 cpl = (void *)(wr + 1); 824 825 for (i = 0; i < n; i++) { 826 slot = &ring->slot[kring->nr_hwcur]; 827 PNMB(kring->na, slot, &ba); 828 MPASS(ba != 0); 829 830 cpl->ctrl0 = nm_txq->cpl_ctrl0; 831 cpl->pack = 0; 832 cpl->len = htobe16(slot->len); 833 cpl->ctrl1 = nm_txcsum ? 0 : 834 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 835 836 usgl = (void *)(cpl + 1); 837 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 838 V_ULPTX_NSGE(1)); 839 usgl->len0 = htobe32(slot->len); 840 usgl->addr0 = htobe64(ba); 841 842 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 843 cpl = (void *)(usgl + 1); 844 MPASS(slot->len + len <= UINT16_MAX); 845 len += slot->len; 846 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 847 } 848 wr->plen = htobe16(len); 849 850 npkt -= n; 851 nm_txq->pidx += npkt_to_ndesc(n); 852 MPASS(nm_txq->pidx <= nm_txq->sidx); 853 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 854 /* 855 * This routine doesn't know how to write WRs that wrap 856 * around. Make sure it wasn't asked to. 857 */ 858 MPASS(npkt == 0); 859 nm_txq->pidx = 0; 860 } 861 862 if (npkt == 0 && npkt_remaining == 0) { 863 /* All done. */ 864 if (lazy_tx_credit_flush == 0) { 865 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 866 F_FW_WR_EQUIQ); 867 nm_txq->equeqidx = nm_txq->pidx; 868 nm_txq->equiqidx = nm_txq->pidx; 869 } 870 ring_nm_txq_db(sc, nm_txq); 871 return; 872 } 873 874 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 875 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 876 F_FW_WR_EQUIQ); 877 nm_txq->equeqidx = nm_txq->pidx; 878 nm_txq->equiqidx = nm_txq->pidx; 879 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 880 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 881 nm_txq->equeqidx = nm_txq->pidx; 882 } 883 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) 884 ring_nm_txq_db(sc, nm_txq); 885 } 886 887 /* Will get called again. */ 888 MPASS(npkt_remaining); 889 } 890 891 /* How many contiguous free descriptors starting at pidx */ 892 static inline int 893 contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 894 { 895 896 if (nm_txq->cidx > nm_txq->pidx) 897 return (nm_txq->cidx - nm_txq->pidx - 1); 898 else if (nm_txq->cidx > 0) 899 return (nm_txq->sidx - nm_txq->pidx); 900 else 901 return (nm_txq->sidx - nm_txq->pidx - 1); 902 } 903 904 static int 905 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 906 { 907 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 908 uint16_t hw_cidx = spg->cidx; /* snapshot */ 909 struct fw_eth_tx_pkts_wr *wr; 910 int n = 0; 911 912 hw_cidx = be16toh(hw_cidx); 913 914 while (nm_txq->cidx != hw_cidx) { 915 wr = (void *)&nm_txq->desc[nm_txq->cidx]; 916 917 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)) || 918 wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR))); 919 MPASS(wr->type == 1); 920 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 921 922 n += wr->npkt; 923 nm_txq->cidx += npkt_to_ndesc(wr->npkt); 924 925 /* 926 * We never sent a WR that wrapped around so the credits coming 927 * back, WR by WR, should never cause the cidx to wrap around 928 * either. 929 */ 930 MPASS(nm_txq->cidx <= nm_txq->sidx); 931 if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 932 nm_txq->cidx = 0; 933 } 934 935 return (n); 936 } 937 938 static int 939 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 940 { 941 struct netmap_adapter *na = kring->na; 942 struct ifnet *ifp = na->ifp; 943 struct vi_info *vi = ifp->if_softc; 944 struct adapter *sc = vi->adapter; 945 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; 946 const u_int head = kring->rhead; 947 u_int reclaimed = 0; 948 int n, d, npkt_remaining, ndesc_remaining; 949 950 /* 951 * Tx was at kring->nr_hwcur last time around and now we need to advance 952 * to kring->rhead. Note that the driver's pidx moves independent of 953 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 954 * between descriptors and frames isn't 1:1). 955 */ 956 957 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 958 kring->nkr_num_slots - kring->nr_hwcur + head; 959 while (npkt_remaining) { 960 reclaimed += reclaim_nm_tx_desc(nm_txq); 961 ndesc_remaining = contiguous_ndesc_available(nm_txq); 962 /* Can't run out of descriptors with packets still remaining */ 963 MPASS(ndesc_remaining > 0); 964 965 /* # of desc needed to tx all remaining packets */ 966 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 967 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 968 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 969 970 if (d <= ndesc_remaining) 971 n = npkt_remaining; 972 else { 973 /* Can't send all, calculate how many can be sent */ 974 n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 975 MAX_NPKT_IN_TYPE1_WR; 976 if (ndesc_remaining % SGE_MAX_WR_NDESC) 977 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 978 } 979 980 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 981 npkt_remaining -= n; 982 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining); 983 } 984 MPASS(npkt_remaining == 0); 985 MPASS(kring->nr_hwcur == head); 986 MPASS(nm_txq->dbidx == nm_txq->pidx); 987 988 /* 989 * Second part: reclaim buffers for completed transmissions. 990 */ 991 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 992 reclaimed += reclaim_nm_tx_desc(nm_txq); 993 kring->nr_hwtail += reclaimed; 994 if (kring->nr_hwtail >= kring->nkr_num_slots) 995 kring->nr_hwtail -= kring->nkr_num_slots; 996 } 997 998 return (0); 999 } 1000 1001 static int 1002 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 1003 { 1004 struct netmap_adapter *na = kring->na; 1005 struct netmap_ring *ring = kring->ring; 1006 struct ifnet *ifp = na->ifp; 1007 struct vi_info *vi = ifp->if_softc; 1008 struct adapter *sc = vi->adapter; 1009 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; 1010 u_int const head = kring->rhead; 1011 u_int n; 1012 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1013 1014 if (black_hole) 1015 return (0); /* No updates ever. */ 1016 1017 if (netmap_no_pendintr || force_update) { 1018 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 1019 kring->nr_kflags &= ~NKR_PENDINTR; 1020 } 1021 1022 if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) { 1023 wmb(); 1024 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1025 nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved)); 1026 nm_rxq->fl_db_saved = 0; 1027 } 1028 1029 /* Userspace done with buffers from kring->nr_hwcur to head */ 1030 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 1031 kring->nkr_num_slots - kring->nr_hwcur + head; 1032 n &= ~7U; 1033 if (n > 0) { 1034 u_int fl_pidx = nm_rxq->fl_pidx; 1035 struct netmap_slot *slot = &ring->slot[fl_pidx]; 1036 uint64_t ba; 1037 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 1038 1039 /* 1040 * We always deal with 8 buffers at a time. We must have 1041 * stopped at an 8B boundary (fl_pidx) last time around and we 1042 * must have a multiple of 8B buffers to give to the freelist. 1043 */ 1044 MPASS((fl_pidx & 7) == 0); 1045 MPASS((n & 7) == 0); 1046 1047 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 1048 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx2); 1049 1050 while (n > 0) { 1051 for (i = 0; i < 8; i++, fl_pidx++, slot++) { 1052 PNMB(na, slot, &ba); 1053 MPASS(ba != 0); 1054 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 1055 slot->flags &= ~NS_BUF_CHANGED; 1056 MPASS(fl_pidx <= nm_rxq->fl_sidx2); 1057 } 1058 n -= 8; 1059 if (fl_pidx == nm_rxq->fl_sidx2) { 1060 fl_pidx = 0; 1061 slot = &ring->slot[0]; 1062 } 1063 if (++dbinc == 8 && n >= 32) { 1064 wmb(); 1065 if (starve_fl) 1066 nm_rxq->fl_db_saved += dbinc; 1067 else { 1068 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1069 nm_rxq->fl_db_val | V_PIDX(dbinc)); 1070 } 1071 dbinc = 0; 1072 } 1073 } 1074 MPASS(nm_rxq->fl_pidx == fl_pidx); 1075 1076 if (dbinc > 0) { 1077 wmb(); 1078 if (starve_fl) 1079 nm_rxq->fl_db_saved += dbinc; 1080 else { 1081 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1082 nm_rxq->fl_db_val | V_PIDX(dbinc)); 1083 } 1084 } 1085 } 1086 1087 return (0); 1088 } 1089 1090 void 1091 cxgbe_nm_attach(struct vi_info *vi) 1092 { 1093 struct port_info *pi; 1094 struct adapter *sc; 1095 struct netmap_adapter na; 1096 1097 MPASS(vi->nnmrxq > 0); 1098 MPASS(vi->ifp != NULL); 1099 1100 pi = vi->pi; 1101 sc = pi->adapter; 1102 1103 bzero(&na, sizeof(na)); 1104 1105 na.ifp = vi->ifp; 1106 na.na_flags = NAF_BDG_MAYSLEEP; 1107 1108 /* Netmap doesn't know about the space reserved for the status page. */ 1109 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; 1110 1111 /* 1112 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 1113 * num_rx_desc is based on the number of buffers that can be held in the 1114 * freelist, and not the number of entries in the iq. (These two are 1115 * not exactly the same due to the space taken up by the status page). 1116 */ 1117 na.num_rx_desc = rounddown(vi->qsize_rxq, 8); 1118 na.nm_txsync = cxgbe_netmap_txsync; 1119 na.nm_rxsync = cxgbe_netmap_rxsync; 1120 na.nm_register = cxgbe_netmap_reg; 1121 na.num_tx_rings = vi->nnmtxq; 1122 na.num_rx_rings = vi->nnmrxq; 1123 na.rx_buf_maxsize = MAX_MTU; 1124 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */ 1125 } 1126 1127 void 1128 cxgbe_nm_detach(struct vi_info *vi) 1129 { 1130 1131 MPASS(vi->nnmrxq > 0); 1132 MPASS(vi->ifp != NULL); 1133 1134 netmap_detach(vi->ifp); 1135 } 1136 1137 static inline const void * 1138 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) 1139 { 1140 1141 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); 1142 1143 /* data[0] is RSS header */ 1144 return (&cpl->data[1]); 1145 } 1146 1147 static void 1148 handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp, 1149 const struct cpl_sge_egr_update *egr) 1150 { 1151 uint32_t oq; 1152 struct sge_nm_txq *nm_txq; 1153 1154 oq = be32toh(egr->opcode_qid); 1155 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 1156 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 1157 1158 netmap_tx_irq(ifp, nm_txq->nid); 1159 } 1160 1161 void 1162 service_nm_rxq(struct sge_nm_rxq *nm_rxq) 1163 { 1164 struct vi_info *vi = nm_rxq->vi; 1165 struct adapter *sc = vi->adapter; 1166 struct ifnet *ifp = vi->ifp; 1167 struct netmap_adapter *na = NA(ifp); 1168 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; 1169 struct netmap_ring *ring = kring->ring; 1170 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 1171 const void *cpl; 1172 uint32_t lq; 1173 u_int work = 0; 1174 uint8_t opcode; 1175 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 1176 u_int fl_credits = fl_cidx & 7; 1177 u_int ndesc = 0; /* desc processed since last cidx update */ 1178 u_int nframes = 0; /* frames processed since last netmap wakeup */ 1179 1180 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 1181 1182 rmb(); 1183 1184 lq = be32toh(d->rsp.pldbuflen_qid); 1185 opcode = d->rss.opcode; 1186 cpl = &d->cpl[0]; 1187 1188 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 1189 case X_RSPD_TYPE_FLBUF: 1190 1191 /* fall through */ 1192 1193 case X_RSPD_TYPE_CPL: 1194 MPASS(opcode < NUM_CPL_CMDS); 1195 1196 switch (opcode) { 1197 case CPL_FW4_MSG: 1198 case CPL_FW6_MSG: 1199 cpl = unwrap_nm_fw6_msg(cpl); 1200 /* fall through */ 1201 case CPL_SGE_EGR_UPDATE: 1202 handle_nm_sge_egr_update(sc, ifp, cpl); 1203 break; 1204 case CPL_RX_PKT: 1205 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - 1206 sc->params.sge.fl_pktshift; 1207 ring->slot[fl_cidx].flags = 0; 1208 nframes++; 1209 if (!(lq & F_RSPD_NEWBUF)) { 1210 MPASS(black_hole == 2); 1211 break; 1212 } 1213 fl_credits++; 1214 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx)) 1215 fl_cidx = 0; 1216 break; 1217 default: 1218 panic("%s: unexpected opcode 0x%x on nm_rxq %p", 1219 __func__, opcode, nm_rxq); 1220 } 1221 break; 1222 1223 case X_RSPD_TYPE_INTR: 1224 /* Not equipped to handle forwarded interrupts. */ 1225 panic("%s: netmap queue received interrupt for iq %u\n", 1226 __func__, lq); 1227 1228 default: 1229 panic("%s: illegal response type %d on nm_rxq %p", 1230 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 1231 } 1232 1233 d++; 1234 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 1235 nm_rxq->iq_cidx = 0; 1236 d = &nm_rxq->iq_desc[0]; 1237 nm_rxq->iq_gen ^= F_RSPD_GEN; 1238 } 1239 1240 if (__predict_false(++nframes == rx_nframes) && !black_hole) { 1241 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1242 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1243 nframes = 0; 1244 } 1245 1246 if (__predict_false(++ndesc == rx_ndesc)) { 1247 if (black_hole && fl_credits >= 8) { 1248 fl_credits /= 8; 1249 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 1250 nm_rxq->fl_sidx); 1251 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1252 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1253 fl_credits = fl_cidx & 7; 1254 } 1255 t4_write_reg(sc, sc->sge_gts_reg, 1256 V_CIDXINC(ndesc) | 1257 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 1258 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1259 ndesc = 0; 1260 } 1261 } 1262 1263 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1264 if (black_hole) { 1265 fl_credits /= 8; 1266 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1267 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1268 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1269 } else if (nframes > 0) 1270 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1271 1272 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) | 1273 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 1274 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1275 } 1276 #endif 1277