1 /*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #ifdef DEV_NETMAP 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/eventhandler.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/selinfo.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <machine/bus.h> 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_media.h> 48 #include <net/if_var.h> 49 #include <net/if_clone.h> 50 #include <net/if_types.h> 51 #include <net/netmap.h> 52 #include <dev/netmap/netmap_kern.h> 53 54 #include "common/common.h" 55 #include "common/t4_regs.h" 56 #include "common/t4_regs_values.h" 57 58 extern int fl_pad; /* XXXNM */ 59 60 /* 61 * 0 = normal netmap rx 62 * 1 = black hole 63 * 2 = supermassive black hole (buffer packing enabled) 64 */ 65 int black_hole = 0; 66 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RWTUN, &black_hole, 0, 67 "Sink incoming packets."); 68 69 int rx_ndesc = 256; 70 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 71 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 72 73 int rx_nframes = 64; 74 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN, 75 &rx_nframes, 0, "max # of frames received before waking up netmap rx."); 76 77 int holdoff_tmr_idx = 2; 78 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 79 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 80 81 /* 82 * Congestion drops. 83 * -1: no congestion feedback (not recommended). 84 * 0: backpressure the channel instead of dropping packets right away. 85 * 1: no backpressure, drop packets for the congested queue immediately. 86 */ 87 static int nm_cong_drop = 1; 88 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_cong_drop, CTLFLAG_RWTUN, 89 &nm_cong_drop, 0, 90 "Congestion control for netmap rx queues (0 = backpressure, 1 = drop"); 91 92 int starve_fl = 0; 93 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN, 94 &starve_fl, 0, "Don't ring fl db for netmap rx queues."); 95 96 /* 97 * Try to process tx credits in bulk. This may cause a delay in the return of 98 * tx credits and is suitable for bursty or non-stop tx only. 99 */ 100 int lazy_tx_credit_flush = 1; 101 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN, 102 &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues."); 103 104 /* 105 * Split the netmap rx queues into two groups that populate separate halves of 106 * the RSS indirection table. This allows filters with hashmask to steer to a 107 * particular group of queues. 108 */ 109 static int nm_split_rss = 0; 110 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_split_rss, CTLFLAG_RWTUN, 111 &nm_split_rss, 0, "Split the netmap rx queues into two groups."); 112 113 /* 114 * netmap(4) says "netmap does not use features such as checksum offloading, TCP 115 * segmentation offloading, encryption, VLAN encapsulation/decapsulation, etc." 116 * but this knob can be used to get the hardware to checksum all tx traffic 117 * anyway. 118 */ 119 static int nm_txcsum = 0; 120 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_txcsum, CTLFLAG_RWTUN, 121 &nm_txcsum, 0, "Enable transmit checksum offloading."); 122 123 static int free_nm_rxq_hwq(struct vi_info *, struct sge_nm_rxq *); 124 static int free_nm_txq_hwq(struct vi_info *, struct sge_nm_txq *); 125 126 int 127 alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, 128 int idx) 129 { 130 int rc; 131 struct sysctl_oid *oid; 132 struct sysctl_oid_list *children; 133 struct sysctl_ctx_list *ctx; 134 char name[16]; 135 size_t len; 136 struct adapter *sc = vi->adapter; 137 struct netmap_adapter *na = NA(vi->ifp); 138 139 MPASS(na != NULL); 140 141 len = vi->qsize_rxq * IQ_ESIZE; 142 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 143 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 144 if (rc != 0) 145 return (rc); 146 147 len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; 148 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 149 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 150 if (rc != 0) 151 return (rc); 152 153 nm_rxq->vi = vi; 154 nm_rxq->nid = idx; 155 nm_rxq->iq_cidx = 0; 156 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; 157 nm_rxq->iq_gen = F_RSPD_GEN; 158 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 159 nm_rxq->fl_sidx = na->num_rx_desc; 160 nm_rxq->fl_sidx2 = nm_rxq->fl_sidx; /* copy for rxsync cacheline */ 161 nm_rxq->intr_idx = intr_idx; 162 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 163 164 ctx = &vi->ctx; 165 children = SYSCTL_CHILDREN(vi->nm_rxq_oid); 166 167 snprintf(name, sizeof(name), "%d", idx); 168 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, 169 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 170 children = SYSCTL_CHILDREN(oid); 171 172 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 173 &nm_rxq->iq_abs_id, 0, "absolute id of the queue"); 174 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 175 &nm_rxq->iq_cntxt_id, 0, "SGE context id of the queue"); 176 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 177 &nm_rxq->iq_cidx, 0, "consumer index"); 178 179 children = SYSCTL_CHILDREN(oid); 180 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 181 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 182 children = SYSCTL_CHILDREN(oid); 183 184 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 185 &nm_rxq->fl_cntxt_id, 0, "SGE context id of the freelist"); 186 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 187 &nm_rxq->fl_cidx, 0, "consumer index"); 188 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 189 &nm_rxq->fl_pidx, 0, "producer index"); 190 191 return (rc); 192 } 193 194 int 195 free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 196 { 197 struct adapter *sc = vi->adapter; 198 199 if (!(vi->flags & VI_INIT_DONE)) 200 return (0); 201 202 if (nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID) 203 free_nm_rxq_hwq(vi, nm_rxq); 204 MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID); 205 206 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 207 nm_rxq->iq_desc); 208 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 209 nm_rxq->fl_desc); 210 211 return (0); 212 } 213 214 int 215 alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx) 216 { 217 int rc; 218 size_t len; 219 struct port_info *pi = vi->pi; 220 struct adapter *sc = pi->adapter; 221 struct netmap_adapter *na = NA(vi->ifp); 222 char name[16]; 223 struct sysctl_oid *oid; 224 struct sysctl_oid_list *children = SYSCTL_CHILDREN(vi->nm_txq_oid); 225 226 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 227 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 228 &nm_txq->ba, (void **)&nm_txq->desc); 229 if (rc) 230 return (rc); 231 232 nm_txq->pidx = nm_txq->cidx = 0; 233 nm_txq->sidx = na->num_tx_desc; 234 nm_txq->nid = idx; 235 nm_txq->iqidx = iqidx; 236 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 237 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 238 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 239 if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0)) 240 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); 241 else 242 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 243 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 244 245 snprintf(name, sizeof(name), "%d", idx); 246 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 247 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queue"); 248 children = SYSCTL_CHILDREN(oid); 249 250 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 251 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 252 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 253 &nm_txq->cidx, 0, "consumer index"); 254 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 255 &nm_txq->pidx, 0, "producer index"); 256 257 return (rc); 258 } 259 260 int 261 free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 262 { 263 struct adapter *sc = vi->adapter; 264 265 if (!(vi->flags & VI_INIT_DONE)) 266 return (0); 267 268 if (nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID) 269 free_nm_txq_hwq(vi, nm_txq); 270 MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID); 271 272 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 273 nm_txq->desc); 274 275 return (0); 276 } 277 278 static int 279 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong) 280 { 281 int rc, cntxt_id, i; 282 __be32 v; 283 struct adapter *sc = vi->adapter; 284 struct sge_params *sp = &sc->params.sge; 285 struct netmap_adapter *na = NA(vi->ifp); 286 struct fw_iq_cmd c; 287 288 MPASS(na != NULL); 289 MPASS(nm_rxq->iq_desc != NULL); 290 MPASS(nm_rxq->fl_desc != NULL); 291 292 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); 293 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); 294 295 bzero(&c, sizeof(c)); 296 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 297 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 298 V_FW_IQ_CMD_VFN(0)); 299 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); 300 if (nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID) 301 c.alloc_to_len16 |= htobe32(F_FW_IQ_CMD_ALLOC); 302 else { 303 c.iqid = htobe16(nm_rxq->iq_cntxt_id); 304 c.fl0id = htobe16(nm_rxq->fl_cntxt_id); 305 c.fl1id = htobe16(0xffff); 306 c.physiqid = htobe16(nm_rxq->iq_abs_id); 307 } 308 MPASS(!forwarding_intr_to_fwq(sc)); 309 KASSERT(nm_rxq->intr_idx < sc->intr_count, 310 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); 311 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 312 c.type_to_iqandstindex = htobe32(v | 313 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 314 V_FW_IQ_CMD_VIID(vi->viid) | 315 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 316 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) | 317 F_FW_IQ_CMD_IQGTSMODE | 318 V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 319 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 320 c.iqsize = htobe16(vi->qsize_rxq); 321 c.iqaddr = htobe64(nm_rxq->iq_ba); 322 if (cong >= 0) { 323 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 324 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | 325 F_FW_IQ_CMD_FL0CONGEN); 326 } 327 c.iqns_to_fl0congen |= 328 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 329 V_FW_IQ_CMD_IQTYPE(FW_IQ_IQTYPE_NIC) | 330 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 331 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 332 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 333 c.fl0dcaen_to_fl0cidxfthresh = 334 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 335 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 336 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 337 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 338 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); 339 c.fl0addr = htobe64(nm_rxq->fl_ba); 340 341 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 342 if (rc != 0) { 343 device_printf(sc->dev, 344 "failed to create netmap ingress queue: %d\n", rc); 345 return (rc); 346 } 347 348 nm_rxq->iq_cidx = 0; 349 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); 350 nm_rxq->iq_gen = F_RSPD_GEN; 351 nm_rxq->iq_cntxt_id = be16toh(c.iqid); 352 nm_rxq->iq_abs_id = be16toh(c.physiqid); 353 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 354 if (cntxt_id >= sc->sge.iqmap_sz) { 355 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 356 __func__, cntxt_id, sc->sge.iqmap_sz - 1); 357 } 358 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 359 360 nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 361 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 362 nm_rxq->fl_db_saved = 0; 363 /* matches the X_FETCHBURSTMAX_512B or X_FETCHBURSTMAX_256B above. */ 364 nm_rxq->fl_db_threshold = chip_id(sc) <= CHELSIO_T5 ? 8 : 4; 365 MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 366 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 367 if (cntxt_id >= sc->sge.eqmap_sz) { 368 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 369 __func__, cntxt_id, sc->sge.eqmap_sz - 1); 370 } 371 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 372 373 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | 374 sc->chip_params->sge_fl_db; 375 376 if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) { 377 uint32_t param, val; 378 379 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 380 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 381 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 382 if (cong == 0) 383 val = 1 << 19; 384 else { 385 val = 2 << 19; 386 for (i = 0; i < 4; i++) { 387 if (cong & (1 << i)) 388 val |= 1 << (i << 2); 389 } 390 } 391 392 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 393 if (rc != 0) { 394 /* report error but carry on */ 395 device_printf(sc->dev, 396 "failed to set congestion manager context for " 397 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); 398 } 399 } 400 401 t4_write_reg(sc, sc->sge_gts_reg, 402 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 403 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 404 405 return (rc); 406 } 407 408 static int 409 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 410 { 411 struct adapter *sc = vi->adapter; 412 int rc; 413 414 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 415 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 416 if (rc != 0) 417 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 418 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 419 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 420 return (rc); 421 } 422 423 static int 424 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 425 { 426 int rc, cntxt_id; 427 size_t len; 428 struct adapter *sc = vi->adapter; 429 struct netmap_adapter *na = NA(vi->ifp); 430 struct fw_eq_eth_cmd c; 431 432 MPASS(na != NULL); 433 MPASS(nm_txq->desc != NULL); 434 435 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 436 bzero(nm_txq->desc, len); 437 438 bzero(&c, sizeof(c)); 439 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 440 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 441 V_FW_EQ_ETH_CMD_VFN(0)); 442 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 443 if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) 444 c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC); 445 else 446 c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id)); 447 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 448 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 449 c.fetchszm_to_iqid = 450 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 451 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 452 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 453 c.dcaen_to_eqsize = 454 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 455 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 456 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 457 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 458 c.eqaddr = htobe64(nm_txq->ba); 459 460 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 461 if (rc != 0) { 462 device_printf(vi->dev, 463 "failed to create netmap egress queue: %d\n", rc); 464 return (rc); 465 } 466 467 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 468 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 469 if (cntxt_id >= sc->sge.eqmap_sz) 470 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 471 cntxt_id, sc->sge.eqmap_sz - 1); 472 sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 473 474 nm_txq->pidx = nm_txq->cidx = 0; 475 MPASS(nm_txq->sidx == na->num_tx_desc); 476 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 477 478 nm_txq->doorbells = sc->doorbells; 479 if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 480 isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 481 isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 482 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 483 uint32_t mask = (1 << s_qpp) - 1; 484 volatile uint8_t *udb; 485 486 udb = sc->udbs_base + UDBS_DB_OFFSET; 487 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 488 nm_txq->udb_qid = nm_txq->cntxt_id & mask; 489 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 490 clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 491 else { 492 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 493 nm_txq->udb_qid = 0; 494 } 495 nm_txq->udb = (volatile void *)udb; 496 } 497 498 if (sc->params.fw_vers < FW_VERSION32(1, 25, 1, 0)) { 499 uint32_t param, val; 500 501 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 502 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) | 503 V_FW_PARAMS_PARAM_YZ(nm_txq->cntxt_id); 504 val = 0xff; 505 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 506 if (rc != 0) { 507 device_printf(vi->dev, 508 "failed to bind netmap txq %d to class 0xff: %d\n", 509 nm_txq->cntxt_id, rc); 510 rc = 0; 511 } 512 } 513 514 return (rc); 515 } 516 517 static int 518 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 519 { 520 struct adapter *sc = vi->adapter; 521 int rc; 522 523 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 524 if (rc != 0) 525 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 526 nm_txq->cntxt_id, rc); 527 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 528 return (rc); 529 } 530 531 static int 532 cxgbe_netmap_simple_rss(struct adapter *sc, struct vi_info *vi, 533 struct ifnet *ifp, struct netmap_adapter *na) 534 { 535 struct netmap_kring *kring; 536 struct sge_nm_rxq *nm_rxq; 537 int rc, i, j, nm_state, defq; 538 uint16_t *rss; 539 540 /* 541 * Check if there's at least one active (or about to go active) netmap 542 * rx queue. 543 */ 544 defq = -1; 545 for_each_nm_rxq(vi, j, nm_rxq) { 546 nm_state = atomic_load_int(&nm_rxq->nm_state); 547 kring = na->rx_rings[nm_rxq->nid]; 548 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 549 (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 550 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 551 if (defq == -1) { 552 defq = nm_rxq->iq_abs_id; 553 break; 554 } 555 } 556 } 557 558 if (defq == -1) { 559 /* No active netmap queues. Switch back to NIC queues. */ 560 rss = vi->rss; 561 defq = vi->rss[0]; 562 } else { 563 for (i = 0; i < vi->rss_size;) { 564 for_each_nm_rxq(vi, j, nm_rxq) { 565 nm_state = atomic_load_int(&nm_rxq->nm_state); 566 kring = na->rx_rings[nm_rxq->nid]; 567 if ((nm_state != NM_OFF && 568 !nm_kring_pending_off(kring)) || 569 (nm_state == NM_OFF && 570 nm_kring_pending_on(kring))) { 571 MPASS(nm_rxq->iq_cntxt_id != 572 INVALID_NM_RXQ_CNTXT_ID); 573 vi->nm_rss[i++] = nm_rxq->iq_abs_id; 574 if (i == vi->rss_size) 575 break; 576 } 577 } 578 } 579 rss = vi->nm_rss; 580 } 581 582 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 583 vi->rss_size); 584 if (rc != 0) 585 if_printf(ifp, "netmap rss_config failed: %d\n", rc); 586 587 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 588 if (rc != 0) { 589 if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 590 } 591 592 return (rc); 593 } 594 595 /* 596 * Odd number of rx queues work best for split RSS mode as the first queue can 597 * be dedicated for non-RSS traffic and the rest divided into two equal halves. 598 */ 599 static int 600 cxgbe_netmap_split_rss(struct adapter *sc, struct vi_info *vi, 601 struct ifnet *ifp, struct netmap_adapter *na) 602 { 603 struct netmap_kring *kring; 604 struct sge_nm_rxq *nm_rxq; 605 int rc, i, j, nm_state, defq; 606 int nactive[2] = {0, 0}; 607 int dq[2] = {-1, -1}; 608 bool dq_norss; /* default queue should not be in RSS table. */ 609 610 MPASS(nm_split_rss != 0); 611 MPASS(vi->nnmrxq > 1); 612 613 for_each_nm_rxq(vi, i, nm_rxq) { 614 j = i / ((vi->nnmrxq + 1) / 2); 615 nm_state = atomic_load_int(&nm_rxq->nm_state); 616 kring = na->rx_rings[nm_rxq->nid]; 617 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 618 (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 619 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 620 nactive[j]++; 621 if (dq[j] == -1) { 622 dq[j] = nm_rxq->iq_abs_id; 623 break; 624 } 625 } 626 } 627 628 if (nactive[0] == 0 || nactive[1] == 0) 629 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 630 631 MPASS(dq[0] != -1 && dq[1] != -1); 632 if (nactive[0] > nactive[1]) { 633 defq = dq[0]; 634 dq_norss = true; 635 } else if (nactive[0] < nactive[1]) { 636 defq = dq[1]; 637 dq_norss = true; 638 } else { 639 defq = dq[0]; 640 dq_norss = false; 641 } 642 643 i = 0; 644 nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq]; 645 while (i < vi->rss_size / 2) { 646 for (j = 0; j < (vi->nnmrxq + 1) / 2; j++) { 647 nm_state = atomic_load_int(&nm_rxq[j].nm_state); 648 kring = na->rx_rings[nm_rxq[j].nid]; 649 if ((nm_state == NM_OFF && 650 !nm_kring_pending_on(kring)) || 651 (nm_state == NM_ON && 652 nm_kring_pending_off(kring))) { 653 continue; 654 } 655 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 656 if (dq_norss && defq == nm_rxq[j].iq_abs_id) 657 continue; 658 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 659 if (i == vi->rss_size / 2) 660 break; 661 } 662 } 663 while (i < vi->rss_size) { 664 for (j = (vi->nnmrxq + 1) / 2; j < vi->nnmrxq; j++) { 665 nm_state = atomic_load_int(&nm_rxq[j].nm_state); 666 kring = na->rx_rings[nm_rxq[j].nid]; 667 if ((nm_state == NM_OFF && 668 !nm_kring_pending_on(kring)) || 669 (nm_state == NM_ON && 670 nm_kring_pending_off(kring))) { 671 continue; 672 } 673 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 674 if (dq_norss && defq == nm_rxq[j].iq_abs_id) 675 continue; 676 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 677 if (i == vi->rss_size) 678 break; 679 } 680 } 681 682 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 683 vi->nm_rss, vi->rss_size); 684 if (rc != 0) 685 if_printf(ifp, "netmap split_rss_config failed: %d\n", rc); 686 687 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 688 if (rc != 0) 689 if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 690 691 return (rc); 692 } 693 694 static inline int 695 cxgbe_netmap_rss(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 696 struct netmap_adapter *na) 697 { 698 699 if (nm_split_rss == 0 || vi->nnmrxq == 1) 700 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 701 else 702 return (cxgbe_netmap_split_rss(sc, vi, ifp, na)); 703 } 704 705 static int 706 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 707 struct netmap_adapter *na) 708 { 709 struct netmap_slot *slot; 710 struct netmap_kring *kring; 711 struct sge_nm_rxq *nm_rxq; 712 struct sge_nm_txq *nm_txq; 713 int i, j, hwidx; 714 struct rx_buf_info *rxb; 715 716 ASSERT_SYNCHRONIZED_OP(sc); 717 MPASS(vi->nnmrxq > 0); 718 MPASS(vi->nnmtxq > 0); 719 720 if ((vi->flags & VI_INIT_DONE) == 0 || 721 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 722 if_printf(ifp, "cannot enable netmap operation because " 723 "interface is not UP.\n"); 724 return (EAGAIN); 725 } 726 727 rxb = &sc->sge.rx_buf_info[0]; 728 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 729 if (rxb->size1 == NETMAP_BUF_SIZE(na)) { 730 hwidx = rxb->hwidx1; 731 break; 732 } 733 if (rxb->size2 == NETMAP_BUF_SIZE(na)) { 734 hwidx = rxb->hwidx2; 735 break; 736 } 737 } 738 if (i >= SW_ZONE_SIZES) { 739 if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 740 NETMAP_BUF_SIZE(na)); 741 return (ENXIO); 742 } 743 744 /* Must set caps before calling netmap_reset */ 745 nm_set_native_flags(na); 746 747 for_each_nm_rxq(vi, i, nm_rxq) { 748 kring = na->rx_rings[nm_rxq->nid]; 749 if (!nm_kring_pending_on(kring)) 750 continue; 751 752 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); 753 nm_rxq->fl_hwidx = hwidx; 754 slot = netmap_reset(na, NR_RX, i, 0); 755 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 756 757 /* We deal with 8 bufs at a time */ 758 MPASS((na->num_rx_desc & 7) == 0); 759 MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 760 for (j = 0; j < nm_rxq->fl_sidx; j++) { 761 uint64_t ba; 762 763 PNMB(na, &slot[j], &ba); 764 MPASS(ba != 0); 765 nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 766 } 767 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 768 MPASS((j & 7) == 0); 769 j /= 8; /* driver pidx to hardware pidx */ 770 wmb(); 771 t4_write_reg(sc, sc->sge_kdoorbell_reg, 772 nm_rxq->fl_db_val | V_PIDX(j)); 773 774 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON); 775 } 776 777 for_each_nm_txq(vi, i, nm_txq) { 778 kring = na->tx_rings[nm_txq->nid]; 779 if (!nm_kring_pending_on(kring)) 780 continue; 781 782 alloc_nm_txq_hwq(vi, nm_txq); 783 slot = netmap_reset(na, NR_TX, i, 0); 784 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 785 } 786 787 if (vi->nm_rss == NULL) { 788 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, 789 M_ZERO | M_WAITOK); 790 } 791 792 return (cxgbe_netmap_rss(sc, vi, ifp, na)); 793 } 794 795 static int 796 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 797 struct netmap_adapter *na) 798 { 799 struct netmap_kring *kring; 800 int rc, i, nm_state, nactive; 801 struct sge_nm_txq *nm_txq; 802 struct sge_nm_rxq *nm_rxq; 803 804 ASSERT_SYNCHRONIZED_OP(sc); 805 MPASS(vi->nnmrxq > 0); 806 MPASS(vi->nnmtxq > 0); 807 808 if (!nm_netmap_on(na)) 809 return (0); 810 811 if ((vi->flags & VI_INIT_DONE) == 0) 812 return (0); 813 814 /* First remove the queues that are stopping from the RSS table. */ 815 rc = cxgbe_netmap_rss(sc, vi, ifp, na); 816 if (rc != 0) 817 return (rc); /* error message logged already. */ 818 819 for_each_nm_txq(vi, i, nm_txq) { 820 kring = na->tx_rings[nm_txq->nid]; 821 if (!nm_kring_pending_off(kring)) 822 continue; 823 MPASS(nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID); 824 825 rc = -t4_eth_eq_stop(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 826 if (rc != 0) { 827 device_printf(vi->dev, 828 "failed to stop nm_txq[%d]: %d.\n", i, rc); 829 return (rc); 830 } 831 832 /* XXX: netmap, not the driver, should do this. */ 833 kring->rhead = kring->rcur = kring->nr_hwcur = 0; 834 kring->rtail = kring->nr_hwtail = kring->nkr_num_slots - 1; 835 } 836 nactive = 0; 837 for_each_nm_rxq(vi, i, nm_rxq) { 838 nm_state = atomic_load_int(&nm_rxq->nm_state); 839 kring = na->rx_rings[nm_rxq->nid]; 840 if (nm_state != NM_OFF && !nm_kring_pending_off(kring)) 841 nactive++; 842 if (!nm_kring_pending_off(kring)) 843 continue; 844 MPASS(nm_state != NM_OFF); 845 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 846 847 rc = -t4_iq_stop(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 848 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 849 if (rc != 0) { 850 device_printf(vi->dev, 851 "failed to stop nm_rxq[%d]: %d.\n", i, rc); 852 return (rc); 853 } 854 855 while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF)) 856 pause("nmst", 1); 857 858 /* XXX: netmap, not the driver, should do this. */ 859 kring->rhead = kring->rcur = kring->nr_hwcur = 0; 860 kring->rtail = kring->nr_hwtail = 0; 861 } 862 netmap_krings_mode_commit(na, 0); 863 if (nactive == 0) 864 nm_clear_native_flags(na); 865 866 return (rc); 867 } 868 869 static int 870 cxgbe_netmap_reg(struct netmap_adapter *na, int on) 871 { 872 struct ifnet *ifp = na->ifp; 873 struct vi_info *vi = ifp->if_softc; 874 struct adapter *sc = vi->adapter; 875 int rc; 876 877 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); 878 if (rc != 0) 879 return (rc); 880 if (on) 881 rc = cxgbe_netmap_on(sc, vi, ifp, na); 882 else 883 rc = cxgbe_netmap_off(sc, vi, ifp, na); 884 end_synchronized_op(sc, 0); 885 886 return (rc); 887 } 888 889 /* How many packets can a single type1 WR carry in n descriptors */ 890 static inline int 891 ndesc_to_npkt(const int n) 892 { 893 894 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 895 896 return (n * 2 - 1); 897 } 898 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 899 900 /* 901 * Space (in descriptors) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 902 * carries n packets 903 */ 904 static inline int 905 npkt_to_ndesc(const int n) 906 { 907 908 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 909 910 return ((n + 2) / 2); 911 } 912 913 /* 914 * Space (in 16B units) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 915 * carries n packets 916 */ 917 static inline int 918 npkt_to_len16(const int n) 919 { 920 921 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 922 923 return (n * 2 + 1); 924 } 925 926 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 927 928 static void 929 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 930 { 931 int n; 932 u_int db = nm_txq->doorbells; 933 934 MPASS(nm_txq->pidx != nm_txq->dbidx); 935 936 n = NMIDXDIFF(nm_txq, dbidx); 937 if (n > 1) 938 clrbit(&db, DOORBELL_WCWR); 939 wmb(); 940 941 switch (ffs(db) - 1) { 942 case DOORBELL_UDB: 943 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 944 break; 945 946 case DOORBELL_WCWR: { 947 volatile uint64_t *dst, *src; 948 949 /* 950 * Queues whose 128B doorbell segment fits in the page do not 951 * use relative qid (udb_qid is always 0). Only queues with 952 * doorbell segments can do WCWR. 953 */ 954 KASSERT(nm_txq->udb_qid == 0 && n == 1, 955 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 956 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 957 958 dst = (volatile void *)((uintptr_t)nm_txq->udb + 959 UDBS_WR_OFFSET - UDBS_DB_OFFSET); 960 src = (void *)&nm_txq->desc[nm_txq->dbidx]; 961 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 962 *dst++ = *src++; 963 wmb(); 964 break; 965 } 966 967 case DOORBELL_UDBWC: 968 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 969 wmb(); 970 break; 971 972 case DOORBELL_KDB: 973 t4_write_reg(sc, sc->sge_kdoorbell_reg, 974 V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 975 break; 976 } 977 nm_txq->dbidx = nm_txq->pidx; 978 } 979 980 /* 981 * Write work requests to send 'npkt' frames and ring the doorbell to send them 982 * on their way. No need to check for wraparound. 983 */ 984 static void 985 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 986 struct netmap_kring *kring, int npkt, int npkt_remaining) 987 { 988 struct netmap_ring *ring = kring->ring; 989 struct netmap_slot *slot; 990 const u_int lim = kring->nkr_num_slots - 1; 991 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 992 uint16_t len; 993 uint64_t ba; 994 struct cpl_tx_pkt_core *cpl; 995 struct ulptx_sgl *usgl; 996 int i, n; 997 998 while (npkt) { 999 n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 1000 len = 0; 1001 1002 wr = (void *)&nm_txq->desc[nm_txq->pidx]; 1003 wr->op_pkd = nm_txq->op_pkd; 1004 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 1005 wr->npkt = n; 1006 wr->r3 = 0; 1007 wr->type = 1; 1008 cpl = (void *)(wr + 1); 1009 1010 for (i = 0; i < n; i++) { 1011 slot = &ring->slot[kring->nr_hwcur]; 1012 PNMB(kring->na, slot, &ba); 1013 MPASS(ba != 0); 1014 1015 cpl->ctrl0 = nm_txq->cpl_ctrl0; 1016 cpl->pack = 0; 1017 cpl->len = htobe16(slot->len); 1018 cpl->ctrl1 = nm_txcsum ? 0 : 1019 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 1020 1021 usgl = (void *)(cpl + 1); 1022 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 1023 V_ULPTX_NSGE(1)); 1024 usgl->len0 = htobe32(slot->len); 1025 usgl->addr0 = htobe64(ba); 1026 1027 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1028 cpl = (void *)(usgl + 1); 1029 MPASS(slot->len + len <= UINT16_MAX); 1030 len += slot->len; 1031 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 1032 } 1033 wr->plen = htobe16(len); 1034 1035 npkt -= n; 1036 nm_txq->pidx += npkt_to_ndesc(n); 1037 MPASS(nm_txq->pidx <= nm_txq->sidx); 1038 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 1039 /* 1040 * This routine doesn't know how to write WRs that wrap 1041 * around. Make sure it wasn't asked to. 1042 */ 1043 MPASS(npkt == 0); 1044 nm_txq->pidx = 0; 1045 } 1046 1047 if (npkt == 0 && npkt_remaining == 0) { 1048 /* All done. */ 1049 if (lazy_tx_credit_flush == 0) { 1050 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 1051 F_FW_WR_EQUIQ); 1052 nm_txq->equeqidx = nm_txq->pidx; 1053 nm_txq->equiqidx = nm_txq->pidx; 1054 } 1055 ring_nm_txq_db(sc, nm_txq); 1056 return; 1057 } 1058 1059 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 1060 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 1061 F_FW_WR_EQUIQ); 1062 nm_txq->equeqidx = nm_txq->pidx; 1063 nm_txq->equiqidx = nm_txq->pidx; 1064 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 1065 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 1066 nm_txq->equeqidx = nm_txq->pidx; 1067 } 1068 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) 1069 ring_nm_txq_db(sc, nm_txq); 1070 } 1071 1072 /* Will get called again. */ 1073 MPASS(npkt_remaining); 1074 } 1075 1076 /* How many contiguous free descriptors starting at pidx */ 1077 static inline int 1078 contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 1079 { 1080 1081 if (nm_txq->cidx > nm_txq->pidx) 1082 return (nm_txq->cidx - nm_txq->pidx - 1); 1083 else if (nm_txq->cidx > 0) 1084 return (nm_txq->sidx - nm_txq->pidx); 1085 else 1086 return (nm_txq->sidx - nm_txq->pidx - 1); 1087 } 1088 1089 static int 1090 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 1091 { 1092 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 1093 uint16_t hw_cidx = spg->cidx; /* snapshot */ 1094 struct fw_eth_tx_pkts_wr *wr; 1095 int n = 0; 1096 1097 hw_cidx = be16toh(hw_cidx); 1098 1099 while (nm_txq->cidx != hw_cidx) { 1100 wr = (void *)&nm_txq->desc[nm_txq->cidx]; 1101 1102 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)) || 1103 wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR))); 1104 MPASS(wr->type == 1); 1105 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 1106 1107 n += wr->npkt; 1108 nm_txq->cidx += npkt_to_ndesc(wr->npkt); 1109 1110 /* 1111 * We never sent a WR that wrapped around so the credits coming 1112 * back, WR by WR, should never cause the cidx to wrap around 1113 * either. 1114 */ 1115 MPASS(nm_txq->cidx <= nm_txq->sidx); 1116 if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 1117 nm_txq->cidx = 0; 1118 } 1119 1120 return (n); 1121 } 1122 1123 static int 1124 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 1125 { 1126 struct netmap_adapter *na = kring->na; 1127 struct ifnet *ifp = na->ifp; 1128 struct vi_info *vi = ifp->if_softc; 1129 struct adapter *sc = vi->adapter; 1130 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; 1131 const u_int head = kring->rhead; 1132 u_int reclaimed = 0; 1133 int n, d, npkt_remaining, ndesc_remaining; 1134 1135 /* 1136 * Tx was at kring->nr_hwcur last time around and now we need to advance 1137 * to kring->rhead. Note that the driver's pidx moves independent of 1138 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 1139 * between descriptors and frames isn't 1:1). 1140 */ 1141 1142 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 1143 kring->nkr_num_slots - kring->nr_hwcur + head; 1144 while (npkt_remaining) { 1145 reclaimed += reclaim_nm_tx_desc(nm_txq); 1146 ndesc_remaining = contiguous_ndesc_available(nm_txq); 1147 /* Can't run out of descriptors with packets still remaining */ 1148 MPASS(ndesc_remaining > 0); 1149 1150 /* # of desc needed to tx all remaining packets */ 1151 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 1152 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 1153 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 1154 1155 if (d <= ndesc_remaining) 1156 n = npkt_remaining; 1157 else { 1158 /* Can't send all, calculate how many can be sent */ 1159 n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 1160 MAX_NPKT_IN_TYPE1_WR; 1161 if (ndesc_remaining % SGE_MAX_WR_NDESC) 1162 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 1163 } 1164 1165 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 1166 npkt_remaining -= n; 1167 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining); 1168 } 1169 MPASS(npkt_remaining == 0); 1170 MPASS(kring->nr_hwcur == head); 1171 MPASS(nm_txq->dbidx == nm_txq->pidx); 1172 1173 /* 1174 * Second part: reclaim buffers for completed transmissions. 1175 */ 1176 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 1177 reclaimed += reclaim_nm_tx_desc(nm_txq); 1178 kring->nr_hwtail += reclaimed; 1179 if (kring->nr_hwtail >= kring->nkr_num_slots) 1180 kring->nr_hwtail -= kring->nkr_num_slots; 1181 } 1182 1183 return (0); 1184 } 1185 1186 static int 1187 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 1188 { 1189 struct netmap_adapter *na = kring->na; 1190 struct netmap_ring *ring = kring->ring; 1191 struct ifnet *ifp = na->ifp; 1192 struct vi_info *vi = ifp->if_softc; 1193 struct adapter *sc = vi->adapter; 1194 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; 1195 u_int const head = kring->rhead; 1196 u_int n; 1197 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1198 1199 if (black_hole) 1200 return (0); /* No updates ever. */ 1201 1202 if (netmap_no_pendintr || force_update) { 1203 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 1204 kring->nr_kflags &= ~NKR_PENDINTR; 1205 } 1206 1207 if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) { 1208 wmb(); 1209 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1210 nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved)); 1211 nm_rxq->fl_db_saved = 0; 1212 } 1213 1214 /* Userspace done with buffers from kring->nr_hwcur to head */ 1215 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 1216 kring->nkr_num_slots - kring->nr_hwcur + head; 1217 n &= ~7U; 1218 if (n > 0) { 1219 u_int fl_pidx = nm_rxq->fl_pidx; 1220 struct netmap_slot *slot = &ring->slot[fl_pidx]; 1221 uint64_t ba; 1222 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 1223 1224 /* 1225 * We always deal with 8 buffers at a time. We must have 1226 * stopped at an 8B boundary (fl_pidx) last time around and we 1227 * must have a multiple of 8B buffers to give to the freelist. 1228 */ 1229 MPASS((fl_pidx & 7) == 0); 1230 MPASS((n & 7) == 0); 1231 1232 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 1233 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx2); 1234 1235 while (n > 0) { 1236 for (i = 0; i < 8; i++, fl_pidx++, slot++) { 1237 PNMB(na, slot, &ba); 1238 MPASS(ba != 0); 1239 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 1240 slot->flags &= ~NS_BUF_CHANGED; 1241 MPASS(fl_pidx <= nm_rxq->fl_sidx2); 1242 } 1243 n -= 8; 1244 if (fl_pidx == nm_rxq->fl_sidx2) { 1245 fl_pidx = 0; 1246 slot = &ring->slot[0]; 1247 } 1248 if (++dbinc == nm_rxq->fl_db_threshold) { 1249 wmb(); 1250 if (starve_fl) 1251 nm_rxq->fl_db_saved += dbinc; 1252 else { 1253 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1254 nm_rxq->fl_db_val | V_PIDX(dbinc)); 1255 } 1256 dbinc = 0; 1257 } 1258 } 1259 MPASS(nm_rxq->fl_pidx == fl_pidx); 1260 1261 if (dbinc > 0) { 1262 wmb(); 1263 if (starve_fl) 1264 nm_rxq->fl_db_saved += dbinc; 1265 else { 1266 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1267 nm_rxq->fl_db_val | V_PIDX(dbinc)); 1268 } 1269 } 1270 } 1271 1272 return (0); 1273 } 1274 1275 void 1276 cxgbe_nm_attach(struct vi_info *vi) 1277 { 1278 struct port_info *pi; 1279 struct adapter *sc; 1280 struct netmap_adapter na; 1281 1282 MPASS(vi->nnmrxq > 0); 1283 MPASS(vi->ifp != NULL); 1284 1285 pi = vi->pi; 1286 sc = pi->adapter; 1287 1288 bzero(&na, sizeof(na)); 1289 1290 na.ifp = vi->ifp; 1291 na.na_flags = NAF_BDG_MAYSLEEP; 1292 1293 /* Netmap doesn't know about the space reserved for the status page. */ 1294 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; 1295 1296 /* 1297 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 1298 * num_rx_desc is based on the number of buffers that can be held in the 1299 * freelist, and not the number of entries in the iq. (These two are 1300 * not exactly the same due to the space taken up by the status page). 1301 */ 1302 na.num_rx_desc = rounddown(vi->qsize_rxq, 8); 1303 na.nm_txsync = cxgbe_netmap_txsync; 1304 na.nm_rxsync = cxgbe_netmap_rxsync; 1305 na.nm_register = cxgbe_netmap_reg; 1306 na.num_tx_rings = vi->nnmtxq; 1307 na.num_rx_rings = vi->nnmrxq; 1308 na.rx_buf_maxsize = MAX_MTU; 1309 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */ 1310 } 1311 1312 void 1313 cxgbe_nm_detach(struct vi_info *vi) 1314 { 1315 1316 MPASS(vi->nnmrxq > 0); 1317 MPASS(vi->ifp != NULL); 1318 1319 netmap_detach(vi->ifp); 1320 } 1321 1322 static inline const void * 1323 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) 1324 { 1325 1326 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); 1327 1328 /* data[0] is RSS header */ 1329 return (&cpl->data[1]); 1330 } 1331 1332 static void 1333 handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp, 1334 const struct cpl_sge_egr_update *egr) 1335 { 1336 uint32_t oq; 1337 struct sge_nm_txq *nm_txq; 1338 1339 oq = be32toh(egr->opcode_qid); 1340 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 1341 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 1342 1343 netmap_tx_irq(ifp, nm_txq->nid); 1344 } 1345 1346 void 1347 service_nm_rxq(struct sge_nm_rxq *nm_rxq) 1348 { 1349 struct vi_info *vi = nm_rxq->vi; 1350 struct adapter *sc = vi->adapter; 1351 struct ifnet *ifp = vi->ifp; 1352 struct netmap_adapter *na = NA(ifp); 1353 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; 1354 struct netmap_ring *ring = kring->ring; 1355 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 1356 const void *cpl; 1357 uint32_t lq; 1358 u_int work = 0; 1359 uint8_t opcode; 1360 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 1361 u_int fl_credits = fl_cidx & 7; 1362 u_int ndesc = 0; /* desc processed since last cidx update */ 1363 u_int nframes = 0; /* frames processed since last netmap wakeup */ 1364 1365 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 1366 1367 rmb(); 1368 1369 lq = be32toh(d->rsp.pldbuflen_qid); 1370 opcode = d->rss.opcode; 1371 cpl = &d->cpl[0]; 1372 1373 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 1374 case X_RSPD_TYPE_FLBUF: 1375 1376 /* fall through */ 1377 1378 case X_RSPD_TYPE_CPL: 1379 MPASS(opcode < NUM_CPL_CMDS); 1380 1381 switch (opcode) { 1382 case CPL_FW4_MSG: 1383 case CPL_FW6_MSG: 1384 cpl = unwrap_nm_fw6_msg(cpl); 1385 /* fall through */ 1386 case CPL_SGE_EGR_UPDATE: 1387 handle_nm_sge_egr_update(sc, ifp, cpl); 1388 break; 1389 case CPL_RX_PKT: 1390 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - 1391 sc->params.sge.fl_pktshift; 1392 ring->slot[fl_cidx].flags = 0; 1393 nframes++; 1394 if (!(lq & F_RSPD_NEWBUF)) { 1395 MPASS(black_hole == 2); 1396 break; 1397 } 1398 fl_credits++; 1399 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx)) 1400 fl_cidx = 0; 1401 break; 1402 default: 1403 panic("%s: unexpected opcode 0x%x on nm_rxq %p", 1404 __func__, opcode, nm_rxq); 1405 } 1406 break; 1407 1408 case X_RSPD_TYPE_INTR: 1409 /* Not equipped to handle forwarded interrupts. */ 1410 panic("%s: netmap queue received interrupt for iq %u\n", 1411 __func__, lq); 1412 1413 default: 1414 panic("%s: illegal response type %d on nm_rxq %p", 1415 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 1416 } 1417 1418 d++; 1419 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 1420 nm_rxq->iq_cidx = 0; 1421 d = &nm_rxq->iq_desc[0]; 1422 nm_rxq->iq_gen ^= F_RSPD_GEN; 1423 } 1424 1425 if (__predict_false(++nframes == rx_nframes) && !black_hole) { 1426 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1427 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1428 nframes = 0; 1429 } 1430 1431 if (__predict_false(++ndesc == rx_ndesc)) { 1432 if (black_hole && fl_credits >= 8) { 1433 fl_credits /= 8; 1434 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 1435 nm_rxq->fl_sidx); 1436 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1437 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1438 fl_credits = fl_cidx & 7; 1439 } 1440 t4_write_reg(sc, sc->sge_gts_reg, 1441 V_CIDXINC(ndesc) | 1442 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 1443 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1444 ndesc = 0; 1445 } 1446 } 1447 1448 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1449 if (black_hole) { 1450 fl_credits /= 8; 1451 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1452 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1453 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1454 } else if (nframes > 0) 1455 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1456 1457 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) | 1458 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 1459 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1460 } 1461 #endif 1462