1 /*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #ifdef DEV_NETMAP 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/eventhandler.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/selinfo.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <machine/bus.h> 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_media.h> 48 #include <net/if_var.h> 49 #include <net/if_clone.h> 50 #include <net/if_types.h> 51 #include <net/netmap.h> 52 #include <dev/netmap/netmap_kern.h> 53 54 #include "common/common.h" 55 #include "common/t4_regs.h" 56 #include "common/t4_regs_values.h" 57 58 extern int fl_pad; /* XXXNM */ 59 60 /* 61 * 0 = normal netmap rx 62 * 1 = black hole 63 * 2 = supermassive black hole (buffer packing enabled) 64 */ 65 int black_hole = 0; 66 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RWTUN, &black_hole, 0, 67 "Sink incoming packets."); 68 69 int rx_ndesc = 256; 70 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 71 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 72 73 int rx_nframes = 64; 74 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN, 75 &rx_nframes, 0, "max # of frames received before waking up netmap rx."); 76 77 int holdoff_tmr_idx = 2; 78 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 79 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 80 81 /* 82 * Congestion drops. 83 * -1: no congestion feedback (not recommended). 84 * 0: backpressure the channel instead of dropping packets right away. 85 * 1: no backpressure, drop packets for the congested queue immediately. 86 */ 87 static int nm_cong_drop = 1; 88 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_cong_drop, CTLFLAG_RWTUN, 89 &nm_cong_drop, 0, 90 "Congestion control for netmap rx queues (0 = backpressure, 1 = drop"); 91 92 int starve_fl = 0; 93 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN, 94 &starve_fl, 0, "Don't ring fl db for netmap rx queues."); 95 96 /* 97 * Try to process tx credits in bulk. This may cause a delay in the return of 98 * tx credits and is suitable for bursty or non-stop tx only. 99 */ 100 int lazy_tx_credit_flush = 1; 101 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN, 102 &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues."); 103 104 /* 105 * Split the netmap rx queues into two groups that populate separate halves of 106 * the RSS indirection table. This allows filters with hashmask to steer to a 107 * particular group of queues. 108 */ 109 static int nm_split_rss = 0; 110 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_split_rss, CTLFLAG_RWTUN, 111 &nm_split_rss, 0, "Split the netmap rx queues into two groups."); 112 113 /* 114 * netmap(4) says "netmap does not use features such as checksum offloading, TCP 115 * segmentation offloading, encryption, VLAN encapsulation/decapsulation, etc." 116 * but this knob can be used to get the hardware to checksum all tx traffic 117 * anyway. 118 */ 119 static int nm_txcsum = 0; 120 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_txcsum, CTLFLAG_RWTUN, 121 &nm_txcsum, 0, "Enable transmit checksum offloading."); 122 123 static int free_nm_rxq_hwq(struct vi_info *, struct sge_nm_rxq *); 124 static int free_nm_txq_hwq(struct vi_info *, struct sge_nm_txq *); 125 126 int 127 alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, 128 int idx, struct sysctl_oid *oid) 129 { 130 int rc; 131 struct sysctl_oid_list *children; 132 struct sysctl_ctx_list *ctx; 133 char name[16]; 134 size_t len; 135 struct adapter *sc = vi->adapter; 136 struct netmap_adapter *na = NA(vi->ifp); 137 138 MPASS(na != NULL); 139 140 len = vi->qsize_rxq * IQ_ESIZE; 141 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 142 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 143 if (rc != 0) 144 return (rc); 145 146 len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; 147 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 148 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 149 if (rc != 0) 150 return (rc); 151 152 nm_rxq->vi = vi; 153 nm_rxq->nid = idx; 154 nm_rxq->iq_cidx = 0; 155 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; 156 nm_rxq->iq_gen = F_RSPD_GEN; 157 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 158 nm_rxq->fl_sidx = na->num_rx_desc; 159 nm_rxq->fl_sidx2 = nm_rxq->fl_sidx; /* copy for rxsync cacheline */ 160 nm_rxq->intr_idx = intr_idx; 161 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 162 163 ctx = &vi->ctx; 164 children = SYSCTL_CHILDREN(oid); 165 166 snprintf(name, sizeof(name), "%d", idx); 167 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, 168 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 169 children = SYSCTL_CHILDREN(oid); 170 171 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 172 &nm_rxq->iq_abs_id, 0, "absolute id of the queue"); 173 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 174 &nm_rxq->iq_cntxt_id, 0, "SGE context id of the queue"); 175 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 176 &nm_rxq->iq_cidx, 0, "consumer index"); 177 178 children = SYSCTL_CHILDREN(oid); 179 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 180 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 181 children = SYSCTL_CHILDREN(oid); 182 183 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 184 &nm_rxq->fl_cntxt_id, 0, "SGE context id of the freelist"); 185 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 186 &nm_rxq->fl_cidx, 0, "consumer index"); 187 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 188 &nm_rxq->fl_pidx, 0, "producer index"); 189 190 return (rc); 191 } 192 193 int 194 free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 195 { 196 struct adapter *sc = vi->adapter; 197 198 if (!(vi->flags & VI_INIT_DONE)) 199 return (0); 200 201 if (nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID) 202 free_nm_rxq_hwq(vi, nm_rxq); 203 MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID); 204 205 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 206 nm_rxq->iq_desc); 207 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 208 nm_rxq->fl_desc); 209 210 return (0); 211 } 212 213 int 214 alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 215 struct sysctl_oid *oid) 216 { 217 int rc; 218 size_t len; 219 struct port_info *pi = vi->pi; 220 struct adapter *sc = pi->adapter; 221 struct netmap_adapter *na = NA(vi->ifp); 222 char name[16]; 223 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 224 225 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 226 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 227 &nm_txq->ba, (void **)&nm_txq->desc); 228 if (rc) 229 return (rc); 230 231 nm_txq->pidx = nm_txq->cidx = 0; 232 nm_txq->sidx = na->num_tx_desc; 233 nm_txq->nid = idx; 234 nm_txq->iqidx = iqidx; 235 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 236 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 237 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 238 if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0)) 239 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); 240 else 241 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 242 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 243 244 snprintf(name, sizeof(name), "%d", idx); 245 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 246 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queue"); 247 children = SYSCTL_CHILDREN(oid); 248 249 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 250 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 251 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 252 &nm_txq->cidx, 0, "consumer index"); 253 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 254 &nm_txq->pidx, 0, "producer index"); 255 256 return (rc); 257 } 258 259 int 260 free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 261 { 262 struct adapter *sc = vi->adapter; 263 264 if (!(vi->flags & VI_INIT_DONE)) 265 return (0); 266 267 if (nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID) 268 free_nm_txq_hwq(vi, nm_txq); 269 MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID); 270 271 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 272 nm_txq->desc); 273 274 return (0); 275 } 276 277 static int 278 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong) 279 { 280 int rc, cntxt_id, i; 281 __be32 v; 282 struct adapter *sc = vi->adapter; 283 struct sge_params *sp = &sc->params.sge; 284 struct netmap_adapter *na = NA(vi->ifp); 285 struct fw_iq_cmd c; 286 287 MPASS(na != NULL); 288 MPASS(nm_rxq->iq_desc != NULL); 289 MPASS(nm_rxq->fl_desc != NULL); 290 291 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); 292 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); 293 294 bzero(&c, sizeof(c)); 295 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 296 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 297 V_FW_IQ_CMD_VFN(0)); 298 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); 299 if (nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID) 300 c.alloc_to_len16 |= htobe32(F_FW_IQ_CMD_ALLOC); 301 else { 302 c.iqid = htobe16(nm_rxq->iq_cntxt_id); 303 c.fl0id = htobe16(nm_rxq->fl_cntxt_id); 304 c.fl1id = htobe16(0xffff); 305 c.physiqid = htobe16(nm_rxq->iq_abs_id); 306 } 307 MPASS(!forwarding_intr_to_fwq(sc)); 308 KASSERT(nm_rxq->intr_idx < sc->intr_count, 309 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); 310 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 311 c.type_to_iqandstindex = htobe32(v | 312 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 313 V_FW_IQ_CMD_VIID(vi->viid) | 314 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 315 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) | 316 F_FW_IQ_CMD_IQGTSMODE | 317 V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 318 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 319 c.iqsize = htobe16(vi->qsize_rxq); 320 c.iqaddr = htobe64(nm_rxq->iq_ba); 321 if (cong >= 0) { 322 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 323 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | 324 F_FW_IQ_CMD_FL0CONGEN); 325 } 326 c.iqns_to_fl0congen |= 327 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 328 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 329 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 330 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 331 c.fl0dcaen_to_fl0cidxfthresh = 332 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 333 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 334 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 335 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 336 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); 337 c.fl0addr = htobe64(nm_rxq->fl_ba); 338 339 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 340 if (rc != 0) { 341 device_printf(sc->dev, 342 "failed to create netmap ingress queue: %d\n", rc); 343 return (rc); 344 } 345 346 nm_rxq->iq_cidx = 0; 347 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); 348 nm_rxq->iq_gen = F_RSPD_GEN; 349 nm_rxq->iq_cntxt_id = be16toh(c.iqid); 350 nm_rxq->iq_abs_id = be16toh(c.physiqid); 351 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 352 if (cntxt_id >= sc->sge.iqmap_sz) { 353 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 354 __func__, cntxt_id, sc->sge.iqmap_sz - 1); 355 } 356 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 357 358 nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 359 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 360 nm_rxq->fl_db_saved = 0; 361 /* matches the X_FETCHBURSTMAX_512B or X_FETCHBURSTMAX_256B above. */ 362 nm_rxq->fl_db_threshold = chip_id(sc) <= CHELSIO_T5 ? 8 : 4; 363 MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 364 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 365 if (cntxt_id >= sc->sge.eqmap_sz) { 366 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 367 __func__, cntxt_id, sc->sge.eqmap_sz - 1); 368 } 369 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 370 371 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | 372 sc->chip_params->sge_fl_db; 373 374 if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) { 375 uint32_t param, val; 376 377 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 378 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 379 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 380 if (cong == 0) 381 val = 1 << 19; 382 else { 383 val = 2 << 19; 384 for (i = 0; i < 4; i++) { 385 if (cong & (1 << i)) 386 val |= 1 << (i << 2); 387 } 388 } 389 390 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 391 if (rc != 0) { 392 /* report error but carry on */ 393 device_printf(sc->dev, 394 "failed to set congestion manager context for " 395 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); 396 } 397 } 398 399 t4_write_reg(sc, sc->sge_gts_reg, 400 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 401 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 402 403 return (rc); 404 } 405 406 static int 407 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 408 { 409 struct adapter *sc = vi->adapter; 410 int rc; 411 412 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 413 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 414 if (rc != 0) 415 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 416 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 417 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 418 return (rc); 419 } 420 421 static int 422 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 423 { 424 int rc, cntxt_id; 425 size_t len; 426 struct adapter *sc = vi->adapter; 427 struct netmap_adapter *na = NA(vi->ifp); 428 struct fw_eq_eth_cmd c; 429 430 MPASS(na != NULL); 431 MPASS(nm_txq->desc != NULL); 432 433 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 434 bzero(nm_txq->desc, len); 435 436 bzero(&c, sizeof(c)); 437 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 438 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 439 V_FW_EQ_ETH_CMD_VFN(0)); 440 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 441 if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) 442 c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC); 443 else 444 c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id)); 445 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 446 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 447 c.fetchszm_to_iqid = 448 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 449 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 450 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 451 c.dcaen_to_eqsize = 452 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 453 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 454 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 455 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 456 c.eqaddr = htobe64(nm_txq->ba); 457 458 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 459 if (rc != 0) { 460 device_printf(vi->dev, 461 "failed to create netmap egress queue: %d\n", rc); 462 return (rc); 463 } 464 465 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 466 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 467 if (cntxt_id >= sc->sge.eqmap_sz) 468 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 469 cntxt_id, sc->sge.eqmap_sz - 1); 470 sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 471 472 nm_txq->pidx = nm_txq->cidx = 0; 473 MPASS(nm_txq->sidx == na->num_tx_desc); 474 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 475 476 nm_txq->doorbells = sc->doorbells; 477 if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 478 isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 479 isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 480 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 481 uint32_t mask = (1 << s_qpp) - 1; 482 volatile uint8_t *udb; 483 484 udb = sc->udbs_base + UDBS_DB_OFFSET; 485 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 486 nm_txq->udb_qid = nm_txq->cntxt_id & mask; 487 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 488 clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 489 else { 490 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 491 nm_txq->udb_qid = 0; 492 } 493 nm_txq->udb = (volatile void *)udb; 494 } 495 496 if (sc->params.fw_vers < FW_VERSION32(1, 25, 1, 0)) { 497 uint32_t param, val; 498 499 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 500 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) | 501 V_FW_PARAMS_PARAM_YZ(nm_txq->cntxt_id); 502 val = 0xff; 503 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 504 if (rc != 0) { 505 device_printf(vi->dev, 506 "failed to bind netmap txq %d to class 0xff: %d\n", 507 nm_txq->cntxt_id, rc); 508 rc = 0; 509 } 510 } 511 512 return (rc); 513 } 514 515 static int 516 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 517 { 518 struct adapter *sc = vi->adapter; 519 int rc; 520 521 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 522 if (rc != 0) 523 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 524 nm_txq->cntxt_id, rc); 525 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 526 return (rc); 527 } 528 529 static int 530 cxgbe_netmap_simple_rss(struct adapter *sc, struct vi_info *vi, 531 struct ifnet *ifp, struct netmap_adapter *na) 532 { 533 struct netmap_kring *kring; 534 struct sge_nm_rxq *nm_rxq; 535 int rc, i, j, nm_state, defq; 536 uint16_t *rss; 537 538 /* 539 * Check if there's at least one active (or about to go active) netmap 540 * rx queue. 541 */ 542 defq = -1; 543 for_each_nm_rxq(vi, j, nm_rxq) { 544 nm_state = atomic_load_int(&nm_rxq->nm_state); 545 kring = na->rx_rings[nm_rxq->nid]; 546 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 547 (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 548 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 549 if (defq == -1) { 550 defq = nm_rxq->iq_abs_id; 551 break; 552 } 553 } 554 } 555 556 if (defq == -1) { 557 /* No active netmap queues. Switch back to NIC queues. */ 558 rss = vi->rss; 559 defq = vi->rss[0]; 560 } else { 561 for (i = 0; i < vi->rss_size;) { 562 for_each_nm_rxq(vi, j, nm_rxq) { 563 nm_state = atomic_load_int(&nm_rxq->nm_state); 564 kring = na->rx_rings[nm_rxq->nid]; 565 if ((nm_state != NM_OFF && 566 !nm_kring_pending_off(kring)) || 567 (nm_state == NM_OFF && 568 nm_kring_pending_on(kring))) { 569 MPASS(nm_rxq->iq_cntxt_id != 570 INVALID_NM_RXQ_CNTXT_ID); 571 vi->nm_rss[i++] = nm_rxq->iq_abs_id; 572 if (i == vi->rss_size) 573 break; 574 } 575 } 576 } 577 rss = vi->nm_rss; 578 } 579 580 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 581 vi->rss_size); 582 if (rc != 0) 583 if_printf(ifp, "netmap rss_config failed: %d\n", rc); 584 585 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 586 if (rc != 0) { 587 if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 588 } 589 590 return (rc); 591 } 592 593 /* 594 * Odd number of rx queues work best for split RSS mode as the first queue can 595 * be dedicated for non-RSS traffic and the rest divided into two equal halves. 596 */ 597 static int 598 cxgbe_netmap_split_rss(struct adapter *sc, struct vi_info *vi, 599 struct ifnet *ifp, struct netmap_adapter *na) 600 { 601 struct netmap_kring *kring; 602 struct sge_nm_rxq *nm_rxq; 603 int rc, i, j, nm_state, defq; 604 int nactive[2] = {0, 0}; 605 int dq[2] = {-1, -1}; 606 bool dq_norss; /* default queue should not be in RSS table. */ 607 608 MPASS(nm_split_rss != 0); 609 MPASS(vi->nnmrxq > 1); 610 611 for_each_nm_rxq(vi, i, nm_rxq) { 612 j = i / ((vi->nnmrxq + 1) / 2); 613 nm_state = atomic_load_int(&nm_rxq->nm_state); 614 kring = na->rx_rings[nm_rxq->nid]; 615 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 616 (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 617 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 618 nactive[j]++; 619 if (dq[j] == -1) { 620 dq[j] = nm_rxq->iq_abs_id; 621 break; 622 } 623 } 624 } 625 626 if (nactive[0] == 0 || nactive[1] == 0) 627 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 628 629 MPASS(dq[0] != -1 && dq[1] != -1); 630 if (nactive[0] > nactive[1]) { 631 defq = dq[0]; 632 dq_norss = true; 633 } else if (nactive[0] < nactive[1]) { 634 defq = dq[1]; 635 dq_norss = true; 636 } else { 637 defq = dq[0]; 638 dq_norss = false; 639 } 640 641 i = 0; 642 nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq]; 643 while (i < vi->rss_size / 2) { 644 for (j = 0; j < (vi->nnmrxq + 1) / 2; j++) { 645 nm_state = atomic_load_int(&nm_rxq[j].nm_state); 646 kring = na->rx_rings[nm_rxq[j].nid]; 647 if ((nm_state == NM_OFF && 648 !nm_kring_pending_on(kring)) || 649 (nm_state == NM_ON && 650 nm_kring_pending_off(kring))) { 651 continue; 652 } 653 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 654 if (dq_norss && defq == nm_rxq[j].iq_abs_id) 655 continue; 656 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 657 if (i == vi->rss_size / 2) 658 break; 659 } 660 } 661 while (i < vi->rss_size) { 662 for (j = (vi->nnmrxq + 1) / 2; j < vi->nnmrxq; j++) { 663 nm_state = atomic_load_int(&nm_rxq[j].nm_state); 664 kring = na->rx_rings[nm_rxq[j].nid]; 665 if ((nm_state == NM_OFF && 666 !nm_kring_pending_on(kring)) || 667 (nm_state == NM_ON && 668 nm_kring_pending_off(kring))) { 669 continue; 670 } 671 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 672 if (dq_norss && defq == nm_rxq[j].iq_abs_id) 673 continue; 674 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 675 if (i == vi->rss_size) 676 break; 677 } 678 } 679 680 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 681 vi->nm_rss, vi->rss_size); 682 if (rc != 0) 683 if_printf(ifp, "netmap split_rss_config failed: %d\n", rc); 684 685 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 686 if (rc != 0) 687 if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 688 689 return (rc); 690 } 691 692 static inline int 693 cxgbe_netmap_rss(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 694 struct netmap_adapter *na) 695 { 696 697 if (nm_split_rss == 0 || vi->nnmrxq == 1) 698 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 699 else 700 return (cxgbe_netmap_split_rss(sc, vi, ifp, na)); 701 } 702 703 static int 704 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 705 struct netmap_adapter *na) 706 { 707 struct netmap_slot *slot; 708 struct netmap_kring *kring; 709 struct sge_nm_rxq *nm_rxq; 710 struct sge_nm_txq *nm_txq; 711 int i, j, hwidx; 712 struct rx_buf_info *rxb; 713 714 ASSERT_SYNCHRONIZED_OP(sc); 715 MPASS(vi->nnmrxq > 0); 716 MPASS(vi->nnmtxq > 0); 717 718 if ((vi->flags & VI_INIT_DONE) == 0 || 719 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 720 if_printf(ifp, "cannot enable netmap operation because " 721 "interface is not UP.\n"); 722 return (EAGAIN); 723 } 724 725 rxb = &sc->sge.rx_buf_info[0]; 726 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 727 if (rxb->size1 == NETMAP_BUF_SIZE(na)) { 728 hwidx = rxb->hwidx1; 729 break; 730 } 731 if (rxb->size2 == NETMAP_BUF_SIZE(na)) { 732 hwidx = rxb->hwidx2; 733 break; 734 } 735 } 736 if (i >= SW_ZONE_SIZES) { 737 if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 738 NETMAP_BUF_SIZE(na)); 739 return (ENXIO); 740 } 741 742 /* Must set caps before calling netmap_reset */ 743 nm_set_native_flags(na); 744 745 for_each_nm_rxq(vi, i, nm_rxq) { 746 kring = na->rx_rings[nm_rxq->nid]; 747 if (!nm_kring_pending_on(kring)) 748 continue; 749 750 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); 751 nm_rxq->fl_hwidx = hwidx; 752 slot = netmap_reset(na, NR_RX, i, 0); 753 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 754 755 /* We deal with 8 bufs at a time */ 756 MPASS((na->num_rx_desc & 7) == 0); 757 MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 758 for (j = 0; j < nm_rxq->fl_sidx; j++) { 759 uint64_t ba; 760 761 PNMB(na, &slot[j], &ba); 762 MPASS(ba != 0); 763 nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 764 } 765 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 766 MPASS((j & 7) == 0); 767 j /= 8; /* driver pidx to hardware pidx */ 768 wmb(); 769 t4_write_reg(sc, sc->sge_kdoorbell_reg, 770 nm_rxq->fl_db_val | V_PIDX(j)); 771 772 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON); 773 } 774 775 for_each_nm_txq(vi, i, nm_txq) { 776 kring = na->tx_rings[nm_txq->nid]; 777 if (!nm_kring_pending_on(kring)) 778 continue; 779 780 alloc_nm_txq_hwq(vi, nm_txq); 781 slot = netmap_reset(na, NR_TX, i, 0); 782 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 783 } 784 785 if (vi->nm_rss == NULL) { 786 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, 787 M_ZERO | M_WAITOK); 788 } 789 790 return (cxgbe_netmap_rss(sc, vi, ifp, na)); 791 } 792 793 static int 794 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 795 struct netmap_adapter *na) 796 { 797 struct netmap_kring *kring; 798 int rc, i, nm_state, nactive; 799 struct sge_nm_txq *nm_txq; 800 struct sge_nm_rxq *nm_rxq; 801 802 ASSERT_SYNCHRONIZED_OP(sc); 803 MPASS(vi->nnmrxq > 0); 804 MPASS(vi->nnmtxq > 0); 805 806 if (!nm_netmap_on(na)) 807 return (0); 808 809 if ((vi->flags & VI_INIT_DONE) == 0) 810 return (0); 811 812 /* First remove the queues that are stopping from the RSS table. */ 813 rc = cxgbe_netmap_rss(sc, vi, ifp, na); 814 if (rc != 0) 815 return (rc); /* error message logged already. */ 816 817 for_each_nm_txq(vi, i, nm_txq) { 818 kring = na->tx_rings[nm_txq->nid]; 819 if (!nm_kring_pending_off(kring)) 820 continue; 821 MPASS(nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID); 822 823 rc = -t4_eth_eq_stop(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 824 if (rc != 0) { 825 device_printf(vi->dev, 826 "failed to stop nm_txq[%d]: %d.\n", i, rc); 827 return (rc); 828 } 829 830 /* XXX: netmap, not the driver, should do this. */ 831 kring->rhead = kring->rcur = kring->nr_hwcur = 0; 832 kring->rtail = kring->nr_hwtail = kring->nkr_num_slots - 1; 833 } 834 nactive = 0; 835 for_each_nm_rxq(vi, i, nm_rxq) { 836 nm_state = atomic_load_int(&nm_rxq->nm_state); 837 kring = na->rx_rings[nm_rxq->nid]; 838 if (nm_state != NM_OFF && !nm_kring_pending_off(kring)) 839 nactive++; 840 if (!nm_kring_pending_off(kring)) 841 continue; 842 MPASS(nm_state != NM_OFF); 843 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 844 845 rc = -t4_iq_stop(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 846 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 847 if (rc != 0) { 848 device_printf(vi->dev, 849 "failed to stop nm_rxq[%d]: %d.\n", i, rc); 850 return (rc); 851 } 852 853 while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF)) 854 pause("nmst", 1); 855 856 /* XXX: netmap, not the driver, should do this. */ 857 kring->rhead = kring->rcur = kring->nr_hwcur = 0; 858 kring->rtail = kring->nr_hwtail = 0; 859 } 860 netmap_krings_mode_commit(na, 0); 861 if (nactive == 0) 862 nm_clear_native_flags(na); 863 864 return (rc); 865 } 866 867 static int 868 cxgbe_netmap_reg(struct netmap_adapter *na, int on) 869 { 870 struct ifnet *ifp = na->ifp; 871 struct vi_info *vi = ifp->if_softc; 872 struct adapter *sc = vi->adapter; 873 int rc; 874 875 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); 876 if (rc != 0) 877 return (rc); 878 if (on) 879 rc = cxgbe_netmap_on(sc, vi, ifp, na); 880 else 881 rc = cxgbe_netmap_off(sc, vi, ifp, na); 882 end_synchronized_op(sc, 0); 883 884 return (rc); 885 } 886 887 /* How many packets can a single type1 WR carry in n descriptors */ 888 static inline int 889 ndesc_to_npkt(const int n) 890 { 891 892 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 893 894 return (n * 2 - 1); 895 } 896 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 897 898 /* 899 * Space (in descriptors) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 900 * carries n packets 901 */ 902 static inline int 903 npkt_to_ndesc(const int n) 904 { 905 906 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 907 908 return ((n + 2) / 2); 909 } 910 911 /* 912 * Space (in 16B units) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 913 * carries n packets 914 */ 915 static inline int 916 npkt_to_len16(const int n) 917 { 918 919 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 920 921 return (n * 2 + 1); 922 } 923 924 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 925 926 static void 927 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 928 { 929 int n; 930 u_int db = nm_txq->doorbells; 931 932 MPASS(nm_txq->pidx != nm_txq->dbidx); 933 934 n = NMIDXDIFF(nm_txq, dbidx); 935 if (n > 1) 936 clrbit(&db, DOORBELL_WCWR); 937 wmb(); 938 939 switch (ffs(db) - 1) { 940 case DOORBELL_UDB: 941 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 942 break; 943 944 case DOORBELL_WCWR: { 945 volatile uint64_t *dst, *src; 946 947 /* 948 * Queues whose 128B doorbell segment fits in the page do not 949 * use relative qid (udb_qid is always 0). Only queues with 950 * doorbell segments can do WCWR. 951 */ 952 KASSERT(nm_txq->udb_qid == 0 && n == 1, 953 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 954 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 955 956 dst = (volatile void *)((uintptr_t)nm_txq->udb + 957 UDBS_WR_OFFSET - UDBS_DB_OFFSET); 958 src = (void *)&nm_txq->desc[nm_txq->dbidx]; 959 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 960 *dst++ = *src++; 961 wmb(); 962 break; 963 } 964 965 case DOORBELL_UDBWC: 966 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 967 wmb(); 968 break; 969 970 case DOORBELL_KDB: 971 t4_write_reg(sc, sc->sge_kdoorbell_reg, 972 V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 973 break; 974 } 975 nm_txq->dbidx = nm_txq->pidx; 976 } 977 978 /* 979 * Write work requests to send 'npkt' frames and ring the doorbell to send them 980 * on their way. No need to check for wraparound. 981 */ 982 static void 983 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 984 struct netmap_kring *kring, int npkt, int npkt_remaining) 985 { 986 struct netmap_ring *ring = kring->ring; 987 struct netmap_slot *slot; 988 const u_int lim = kring->nkr_num_slots - 1; 989 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 990 uint16_t len; 991 uint64_t ba; 992 struct cpl_tx_pkt_core *cpl; 993 struct ulptx_sgl *usgl; 994 int i, n; 995 996 while (npkt) { 997 n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 998 len = 0; 999 1000 wr = (void *)&nm_txq->desc[nm_txq->pidx]; 1001 wr->op_pkd = nm_txq->op_pkd; 1002 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 1003 wr->npkt = n; 1004 wr->r3 = 0; 1005 wr->type = 1; 1006 cpl = (void *)(wr + 1); 1007 1008 for (i = 0; i < n; i++) { 1009 slot = &ring->slot[kring->nr_hwcur]; 1010 PNMB(kring->na, slot, &ba); 1011 MPASS(ba != 0); 1012 1013 cpl->ctrl0 = nm_txq->cpl_ctrl0; 1014 cpl->pack = 0; 1015 cpl->len = htobe16(slot->len); 1016 cpl->ctrl1 = nm_txcsum ? 0 : 1017 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 1018 1019 usgl = (void *)(cpl + 1); 1020 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 1021 V_ULPTX_NSGE(1)); 1022 usgl->len0 = htobe32(slot->len); 1023 usgl->addr0 = htobe64(ba); 1024 1025 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1026 cpl = (void *)(usgl + 1); 1027 MPASS(slot->len + len <= UINT16_MAX); 1028 len += slot->len; 1029 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 1030 } 1031 wr->plen = htobe16(len); 1032 1033 npkt -= n; 1034 nm_txq->pidx += npkt_to_ndesc(n); 1035 MPASS(nm_txq->pidx <= nm_txq->sidx); 1036 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 1037 /* 1038 * This routine doesn't know how to write WRs that wrap 1039 * around. Make sure it wasn't asked to. 1040 */ 1041 MPASS(npkt == 0); 1042 nm_txq->pidx = 0; 1043 } 1044 1045 if (npkt == 0 && npkt_remaining == 0) { 1046 /* All done. */ 1047 if (lazy_tx_credit_flush == 0) { 1048 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 1049 F_FW_WR_EQUIQ); 1050 nm_txq->equeqidx = nm_txq->pidx; 1051 nm_txq->equiqidx = nm_txq->pidx; 1052 } 1053 ring_nm_txq_db(sc, nm_txq); 1054 return; 1055 } 1056 1057 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 1058 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 1059 F_FW_WR_EQUIQ); 1060 nm_txq->equeqidx = nm_txq->pidx; 1061 nm_txq->equiqidx = nm_txq->pidx; 1062 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 1063 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 1064 nm_txq->equeqidx = nm_txq->pidx; 1065 } 1066 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) 1067 ring_nm_txq_db(sc, nm_txq); 1068 } 1069 1070 /* Will get called again. */ 1071 MPASS(npkt_remaining); 1072 } 1073 1074 /* How many contiguous free descriptors starting at pidx */ 1075 static inline int 1076 contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 1077 { 1078 1079 if (nm_txq->cidx > nm_txq->pidx) 1080 return (nm_txq->cidx - nm_txq->pidx - 1); 1081 else if (nm_txq->cidx > 0) 1082 return (nm_txq->sidx - nm_txq->pidx); 1083 else 1084 return (nm_txq->sidx - nm_txq->pidx - 1); 1085 } 1086 1087 static int 1088 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 1089 { 1090 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 1091 uint16_t hw_cidx = spg->cidx; /* snapshot */ 1092 struct fw_eth_tx_pkts_wr *wr; 1093 int n = 0; 1094 1095 hw_cidx = be16toh(hw_cidx); 1096 1097 while (nm_txq->cidx != hw_cidx) { 1098 wr = (void *)&nm_txq->desc[nm_txq->cidx]; 1099 1100 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)) || 1101 wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR))); 1102 MPASS(wr->type == 1); 1103 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 1104 1105 n += wr->npkt; 1106 nm_txq->cidx += npkt_to_ndesc(wr->npkt); 1107 1108 /* 1109 * We never sent a WR that wrapped around so the credits coming 1110 * back, WR by WR, should never cause the cidx to wrap around 1111 * either. 1112 */ 1113 MPASS(nm_txq->cidx <= nm_txq->sidx); 1114 if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 1115 nm_txq->cidx = 0; 1116 } 1117 1118 return (n); 1119 } 1120 1121 static int 1122 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 1123 { 1124 struct netmap_adapter *na = kring->na; 1125 struct ifnet *ifp = na->ifp; 1126 struct vi_info *vi = ifp->if_softc; 1127 struct adapter *sc = vi->adapter; 1128 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; 1129 const u_int head = kring->rhead; 1130 u_int reclaimed = 0; 1131 int n, d, npkt_remaining, ndesc_remaining; 1132 1133 /* 1134 * Tx was at kring->nr_hwcur last time around and now we need to advance 1135 * to kring->rhead. Note that the driver's pidx moves independent of 1136 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 1137 * between descriptors and frames isn't 1:1). 1138 */ 1139 1140 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 1141 kring->nkr_num_slots - kring->nr_hwcur + head; 1142 while (npkt_remaining) { 1143 reclaimed += reclaim_nm_tx_desc(nm_txq); 1144 ndesc_remaining = contiguous_ndesc_available(nm_txq); 1145 /* Can't run out of descriptors with packets still remaining */ 1146 MPASS(ndesc_remaining > 0); 1147 1148 /* # of desc needed to tx all remaining packets */ 1149 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 1150 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 1151 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 1152 1153 if (d <= ndesc_remaining) 1154 n = npkt_remaining; 1155 else { 1156 /* Can't send all, calculate how many can be sent */ 1157 n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 1158 MAX_NPKT_IN_TYPE1_WR; 1159 if (ndesc_remaining % SGE_MAX_WR_NDESC) 1160 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 1161 } 1162 1163 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 1164 npkt_remaining -= n; 1165 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining); 1166 } 1167 MPASS(npkt_remaining == 0); 1168 MPASS(kring->nr_hwcur == head); 1169 MPASS(nm_txq->dbidx == nm_txq->pidx); 1170 1171 /* 1172 * Second part: reclaim buffers for completed transmissions. 1173 */ 1174 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 1175 reclaimed += reclaim_nm_tx_desc(nm_txq); 1176 kring->nr_hwtail += reclaimed; 1177 if (kring->nr_hwtail >= kring->nkr_num_slots) 1178 kring->nr_hwtail -= kring->nkr_num_slots; 1179 } 1180 1181 return (0); 1182 } 1183 1184 static int 1185 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 1186 { 1187 struct netmap_adapter *na = kring->na; 1188 struct netmap_ring *ring = kring->ring; 1189 struct ifnet *ifp = na->ifp; 1190 struct vi_info *vi = ifp->if_softc; 1191 struct adapter *sc = vi->adapter; 1192 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; 1193 u_int const head = kring->rhead; 1194 u_int n; 1195 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1196 1197 if (black_hole) 1198 return (0); /* No updates ever. */ 1199 1200 if (netmap_no_pendintr || force_update) { 1201 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 1202 kring->nr_kflags &= ~NKR_PENDINTR; 1203 } 1204 1205 if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) { 1206 wmb(); 1207 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1208 nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved)); 1209 nm_rxq->fl_db_saved = 0; 1210 } 1211 1212 /* Userspace done with buffers from kring->nr_hwcur to head */ 1213 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 1214 kring->nkr_num_slots - kring->nr_hwcur + head; 1215 n &= ~7U; 1216 if (n > 0) { 1217 u_int fl_pidx = nm_rxq->fl_pidx; 1218 struct netmap_slot *slot = &ring->slot[fl_pidx]; 1219 uint64_t ba; 1220 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 1221 1222 /* 1223 * We always deal with 8 buffers at a time. We must have 1224 * stopped at an 8B boundary (fl_pidx) last time around and we 1225 * must have a multiple of 8B buffers to give to the freelist. 1226 */ 1227 MPASS((fl_pidx & 7) == 0); 1228 MPASS((n & 7) == 0); 1229 1230 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 1231 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx2); 1232 1233 while (n > 0) { 1234 for (i = 0; i < 8; i++, fl_pidx++, slot++) { 1235 PNMB(na, slot, &ba); 1236 MPASS(ba != 0); 1237 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 1238 slot->flags &= ~NS_BUF_CHANGED; 1239 MPASS(fl_pidx <= nm_rxq->fl_sidx2); 1240 } 1241 n -= 8; 1242 if (fl_pidx == nm_rxq->fl_sidx2) { 1243 fl_pidx = 0; 1244 slot = &ring->slot[0]; 1245 } 1246 if (++dbinc == nm_rxq->fl_db_threshold) { 1247 wmb(); 1248 if (starve_fl) 1249 nm_rxq->fl_db_saved += dbinc; 1250 else { 1251 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1252 nm_rxq->fl_db_val | V_PIDX(dbinc)); 1253 } 1254 dbinc = 0; 1255 } 1256 } 1257 MPASS(nm_rxq->fl_pidx == fl_pidx); 1258 1259 if (dbinc > 0) { 1260 wmb(); 1261 if (starve_fl) 1262 nm_rxq->fl_db_saved += dbinc; 1263 else { 1264 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1265 nm_rxq->fl_db_val | V_PIDX(dbinc)); 1266 } 1267 } 1268 } 1269 1270 return (0); 1271 } 1272 1273 void 1274 cxgbe_nm_attach(struct vi_info *vi) 1275 { 1276 struct port_info *pi; 1277 struct adapter *sc; 1278 struct netmap_adapter na; 1279 1280 MPASS(vi->nnmrxq > 0); 1281 MPASS(vi->ifp != NULL); 1282 1283 pi = vi->pi; 1284 sc = pi->adapter; 1285 1286 bzero(&na, sizeof(na)); 1287 1288 na.ifp = vi->ifp; 1289 na.na_flags = NAF_BDG_MAYSLEEP; 1290 1291 /* Netmap doesn't know about the space reserved for the status page. */ 1292 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; 1293 1294 /* 1295 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 1296 * num_rx_desc is based on the number of buffers that can be held in the 1297 * freelist, and not the number of entries in the iq. (These two are 1298 * not exactly the same due to the space taken up by the status page). 1299 */ 1300 na.num_rx_desc = rounddown(vi->qsize_rxq, 8); 1301 na.nm_txsync = cxgbe_netmap_txsync; 1302 na.nm_rxsync = cxgbe_netmap_rxsync; 1303 na.nm_register = cxgbe_netmap_reg; 1304 na.num_tx_rings = vi->nnmtxq; 1305 na.num_rx_rings = vi->nnmrxq; 1306 na.rx_buf_maxsize = MAX_MTU; 1307 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */ 1308 } 1309 1310 void 1311 cxgbe_nm_detach(struct vi_info *vi) 1312 { 1313 1314 MPASS(vi->nnmrxq > 0); 1315 MPASS(vi->ifp != NULL); 1316 1317 netmap_detach(vi->ifp); 1318 } 1319 1320 static inline const void * 1321 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) 1322 { 1323 1324 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); 1325 1326 /* data[0] is RSS header */ 1327 return (&cpl->data[1]); 1328 } 1329 1330 static void 1331 handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp, 1332 const struct cpl_sge_egr_update *egr) 1333 { 1334 uint32_t oq; 1335 struct sge_nm_txq *nm_txq; 1336 1337 oq = be32toh(egr->opcode_qid); 1338 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 1339 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 1340 1341 netmap_tx_irq(ifp, nm_txq->nid); 1342 } 1343 1344 void 1345 service_nm_rxq(struct sge_nm_rxq *nm_rxq) 1346 { 1347 struct vi_info *vi = nm_rxq->vi; 1348 struct adapter *sc = vi->adapter; 1349 struct ifnet *ifp = vi->ifp; 1350 struct netmap_adapter *na = NA(ifp); 1351 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; 1352 struct netmap_ring *ring = kring->ring; 1353 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 1354 const void *cpl; 1355 uint32_t lq; 1356 u_int work = 0; 1357 uint8_t opcode; 1358 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 1359 u_int fl_credits = fl_cidx & 7; 1360 u_int ndesc = 0; /* desc processed since last cidx update */ 1361 u_int nframes = 0; /* frames processed since last netmap wakeup */ 1362 1363 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 1364 1365 rmb(); 1366 1367 lq = be32toh(d->rsp.pldbuflen_qid); 1368 opcode = d->rss.opcode; 1369 cpl = &d->cpl[0]; 1370 1371 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 1372 case X_RSPD_TYPE_FLBUF: 1373 1374 /* fall through */ 1375 1376 case X_RSPD_TYPE_CPL: 1377 MPASS(opcode < NUM_CPL_CMDS); 1378 1379 switch (opcode) { 1380 case CPL_FW4_MSG: 1381 case CPL_FW6_MSG: 1382 cpl = unwrap_nm_fw6_msg(cpl); 1383 /* fall through */ 1384 case CPL_SGE_EGR_UPDATE: 1385 handle_nm_sge_egr_update(sc, ifp, cpl); 1386 break; 1387 case CPL_RX_PKT: 1388 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - 1389 sc->params.sge.fl_pktshift; 1390 ring->slot[fl_cidx].flags = 0; 1391 nframes++; 1392 if (!(lq & F_RSPD_NEWBUF)) { 1393 MPASS(black_hole == 2); 1394 break; 1395 } 1396 fl_credits++; 1397 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx)) 1398 fl_cidx = 0; 1399 break; 1400 default: 1401 panic("%s: unexpected opcode 0x%x on nm_rxq %p", 1402 __func__, opcode, nm_rxq); 1403 } 1404 break; 1405 1406 case X_RSPD_TYPE_INTR: 1407 /* Not equipped to handle forwarded interrupts. */ 1408 panic("%s: netmap queue received interrupt for iq %u\n", 1409 __func__, lq); 1410 1411 default: 1412 panic("%s: illegal response type %d on nm_rxq %p", 1413 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 1414 } 1415 1416 d++; 1417 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 1418 nm_rxq->iq_cidx = 0; 1419 d = &nm_rxq->iq_desc[0]; 1420 nm_rxq->iq_gen ^= F_RSPD_GEN; 1421 } 1422 1423 if (__predict_false(++nframes == rx_nframes) && !black_hole) { 1424 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1425 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1426 nframes = 0; 1427 } 1428 1429 if (__predict_false(++ndesc == rx_ndesc)) { 1430 if (black_hole && fl_credits >= 8) { 1431 fl_credits /= 8; 1432 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 1433 nm_rxq->fl_sidx); 1434 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1435 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1436 fl_credits = fl_cidx & 7; 1437 } 1438 t4_write_reg(sc, sc->sge_gts_reg, 1439 V_CIDXINC(ndesc) | 1440 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 1441 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1442 ndesc = 0; 1443 } 1444 } 1445 1446 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1447 if (black_hole) { 1448 fl_credits /= 8; 1449 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1450 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1451 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1452 } else if (nframes > 0) 1453 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1454 1455 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) | 1456 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 1457 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1458 } 1459 #endif 1460