xref: /freebsd/sys/dev/cxgbe/adapter.h (revision 8a0a413e)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #ifndef __T4_ADAPTER_H__
32 #define __T4_ADAPTER_H__
33 
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/rman.h>
37 #include <sys/types.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/rwlock.h>
41 #include <sys/sx.h>
42 #include <vm/uma.h>
43 
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcireg.h>
46 #include <machine/bus.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_media.h>
53 #include <netinet/in.h>
54 #include <netinet/tcp_lro.h>
55 
56 #include "offload.h"
57 #include "t4_ioctl.h"
58 #include "common/t4_msg.h"
59 #include "firmware/t4fw_interface.h"
60 
61 #define KTR_CXGBE	KTR_SPARE3
62 MALLOC_DECLARE(M_CXGBE);
63 #define CXGBE_UNIMPLEMENTED(s) \
64     panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
65 
66 #if defined(__i386__) || defined(__amd64__)
67 static __inline void
68 prefetch(void *x)
69 {
70 	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
71 }
72 #else
73 #define prefetch(x)
74 #endif
75 
76 #ifndef SYSCTL_ADD_UQUAD
77 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
78 #define sysctl_handle_64 sysctl_handle_quad
79 #define CTLTYPE_U64 CTLTYPE_QUAD
80 #endif
81 
82 #if (__FreeBSD_version >= 900030) || \
83     ((__FreeBSD_version >= 802507) && (__FreeBSD_version < 900000))
84 #define SBUF_DRAIN 1
85 #endif
86 
87 struct adapter;
88 typedef struct adapter adapter_t;
89 
90 enum {
91 	/*
92 	 * All ingress queues use this entry size.  Note that the firmware event
93 	 * queue and any iq expecting CPL_RX_PKT in the descriptor needs this to
94 	 * be at least 64.
95 	 */
96 	IQ_ESIZE = 64,
97 
98 	/* Default queue sizes for all kinds of ingress queues */
99 	FW_IQ_QSIZE = 256,
100 	RX_IQ_QSIZE = 1024,
101 
102 	/* All egress queues use this entry size */
103 	EQ_ESIZE = 64,
104 
105 	/* Default queue sizes for all kinds of egress queues */
106 	CTRL_EQ_QSIZE = 128,
107 	TX_EQ_QSIZE = 1024,
108 
109 #if MJUMPAGESIZE != MCLBYTES
110 	SW_ZONE_SIZES = 4,	/* cluster, jumbop, jumbo9k, jumbo16k */
111 #else
112 	SW_ZONE_SIZES = 3,	/* cluster, jumbo9k, jumbo16k */
113 #endif
114 	CL_METADATA_SIZE = CACHE_LINE_SIZE,
115 
116 	SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */
117 	TX_SGL_SEGS = 39,
118 	TX_SGL_SEGS_TSO = 38,
119 	TX_WR_FLITS = SGE_MAX_WR_LEN / 8
120 };
121 
122 enum {
123 	/* adapter intr_type */
124 	INTR_INTX	= (1 << 0),
125 	INTR_MSI 	= (1 << 1),
126 	INTR_MSIX	= (1 << 2)
127 };
128 
129 enum {
130 	XGMAC_MTU	= (1 << 0),
131 	XGMAC_PROMISC	= (1 << 1),
132 	XGMAC_ALLMULTI	= (1 << 2),
133 	XGMAC_VLANEX	= (1 << 3),
134 	XGMAC_UCADDR	= (1 << 4),
135 	XGMAC_MCADDRS	= (1 << 5),
136 
137 	XGMAC_ALL	= 0xffff
138 };
139 
140 enum {
141 	/* flags understood by begin_synchronized_op */
142 	HOLD_LOCK	= (1 << 0),
143 	SLEEP_OK	= (1 << 1),
144 	INTR_OK		= (1 << 2),
145 
146 	/* flags understood by end_synchronized_op */
147 	LOCK_HELD	= HOLD_LOCK,
148 };
149 
150 enum {
151 	/* adapter flags */
152 	FULL_INIT_DONE	= (1 << 0),
153 	FW_OK		= (1 << 1),
154 	CHK_MBOX_ACCESS	= (1 << 2),
155 	MASTER_PF	= (1 << 3),
156 	ADAP_SYSCTL_CTX	= (1 << 4),
157 	/* TOM_INIT_DONE= (1 << 5),	No longer used */
158 	BUF_PACKING_OK	= (1 << 6),
159 	IS_VF		= (1 << 7),
160 
161 	CXGBE_BUSY	= (1 << 9),
162 
163 	/* port flags */
164 	HAS_TRACEQ	= (1 << 3),
165 
166 	/* VI flags */
167 	DOOMED		= (1 << 0),
168 	VI_INIT_DONE	= (1 << 1),
169 	VI_SYSCTL_CTX	= (1 << 2),
170 	INTR_RXQ	= (1 << 4),	/* All NIC rxq's take interrupts */
171 	INTR_OFLD_RXQ	= (1 << 5),	/* All TOE rxq's take interrupts */
172 	INTR_ALL	= (INTR_RXQ | INTR_OFLD_RXQ),
173 
174 	/* adapter debug_flags */
175 	DF_DUMP_MBOX		= (1 << 0),	/* Log all mbox cmd/rpl. */
176 	DF_LOAD_FW_ANYTIME	= (1 << 1),	/* Allow LOAD_FW after init */
177 	DF_DISABLE_TCB_CACHE	= (1 << 2),	/* Disable TCB cache (T6+) */
178 };
179 
180 #define IS_DOOMED(vi)	((vi)->flags & DOOMED)
181 #define SET_DOOMED(vi)	do {(vi)->flags |= DOOMED;} while (0)
182 #define IS_BUSY(sc)	((sc)->flags & CXGBE_BUSY)
183 #define SET_BUSY(sc)	do {(sc)->flags |= CXGBE_BUSY;} while (0)
184 #define CLR_BUSY(sc)	do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
185 
186 struct vi_info {
187 	device_t dev;
188 	struct port_info *pi;
189 
190 	struct ifnet *ifp;
191 
192 	unsigned long flags;
193 	int if_flags;
194 
195 	uint16_t *rss, *nm_rss;
196 	int smt_idx;		/* for convenience */
197 	uint16_t viid;
198 	int16_t  xact_addr_filt;/* index of exact MAC address filter */
199 	uint16_t rss_size;	/* size of VI's RSS table slice */
200 	uint16_t rss_base;	/* start of VI's RSS table slice */
201 
202 	eventhandler_tag vlan_c;
203 
204 	int nintr;
205 	int first_intr;
206 
207 	/* These need to be int as they are used in sysctl */
208 	int ntxq;		/* # of tx queues */
209 	int first_txq;		/* index of first tx queue */
210 	int rsrv_noflowq; 	/* Reserve queue 0 for non-flowid packets */
211 	int nrxq;		/* # of rx queues */
212 	int first_rxq;		/* index of first rx queue */
213 	int nofldtxq;		/* # of offload tx queues */
214 	int first_ofld_txq;	/* index of first offload tx queue */
215 	int nofldrxq;		/* # of offload rx queues */
216 	int first_ofld_rxq;	/* index of first offload rx queue */
217 	int nnmtxq;
218 	int first_nm_txq;
219 	int nnmrxq;
220 	int first_nm_rxq;
221 	int tmr_idx;
222 	int ofld_tmr_idx;
223 	int pktc_idx;
224 	int ofld_pktc_idx;
225 	int qsize_rxq;
226 	int qsize_txq;
227 
228 	struct timeval last_refreshed;
229 	struct fw_vi_stats_vf stats;
230 
231 	struct callout tick;
232 	struct sysctl_ctx_list ctx;	/* from ifconfig up to driver detach */
233 
234 	uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
235 };
236 
237 struct tx_ch_rl_params {
238 	enum fw_sched_params_rate ratemode;	/* %port (REL) or kbps (ABS) */
239 	uint32_t maxrate;
240 };
241 
242 enum {
243 	TX_CLRL_REFRESH	= (1 << 0),	/* Need to update hardware state. */
244 	TX_CLRL_ERROR	= (1 << 1),	/* Error, hardware state unknown. */
245 };
246 
247 struct tx_cl_rl_params {
248 	int refcount;
249 	u_int flags;
250 	enum fw_sched_params_rate ratemode;	/* %port REL or ABS value */
251 	enum fw_sched_params_unit rateunit;	/* kbps or pps (when ABS) */
252 	enum fw_sched_params_mode mode;		/* aggr or per-flow */
253 	uint32_t maxrate;
254 	uint16_t pktsize;
255 };
256 
257 /* Tx scheduler parameters for a channel/port */
258 struct tx_sched_params {
259 	/* Channel Rate Limiter */
260 	struct tx_ch_rl_params ch_rl;
261 
262 	/* Class WRR */
263 	/* XXX */
264 
265 	/* Class Rate Limiter */
266 	struct tx_cl_rl_params cl_rl[];
267 };
268 
269 struct port_info {
270 	device_t dev;
271 	struct adapter *adapter;
272 
273 	struct vi_info *vi;
274 	int nvi;
275 	int up_vis;
276 	int uld_vis;
277 
278 	struct tx_sched_params *sched_params;
279 
280 	struct mtx pi_lock;
281 	char lockname[16];
282 	unsigned long flags;
283 
284 	uint8_t  lport;		/* associated offload logical port */
285 	int8_t   mdio_addr;
286 	uint8_t  port_type;
287 	uint8_t  mod_type;
288 	uint8_t  port_id;
289 	uint8_t  tx_chan;
290 	uint8_t  mps_bg_map;	/* rx MPS buffer group bitmap */
291 	uint8_t  rx_e_chan_map;	/* rx TP e-channel bitmap */
292 
293 	struct link_config link_cfg;
294 	struct link_config old_link_cfg;
295 	struct ifmedia media;
296 
297 	struct timeval last_refreshed;
298  	struct port_stats stats;
299 	u_int tnl_cong_drops;
300 	u_int tx_parse_error;
301 
302 	struct callout tick;
303 };
304 
305 #define	IS_MAIN_VI(vi)		((vi) == &((vi)->pi->vi[0]))
306 
307 /* Where the cluster came from, how it has been carved up. */
308 struct cluster_layout {
309 	int8_t zidx;
310 	int8_t hwidx;
311 	uint16_t region1;	/* mbufs laid out within this region */
312 				/* region2 is the DMA region */
313 	uint16_t region3;	/* cluster_metadata within this region */
314 };
315 
316 struct cluster_metadata {
317 	u_int refcount;
318 	struct fl_sdesc *sd;	/* For debug only.  Could easily be stale */
319 };
320 
321 struct fl_sdesc {
322 	caddr_t cl;
323 	uint16_t nmbuf;	/* # of driver originated mbufs with ref on cluster */
324 	struct cluster_layout cll;
325 };
326 
327 struct tx_desc {
328 	__be64 flit[8];
329 };
330 
331 struct tx_sdesc {
332 	struct mbuf *m;		/* m_nextpkt linked chain of frames */
333 	uint8_t desc_used;	/* # of hardware descriptors used by the WR */
334 };
335 
336 
337 #define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header))
338 struct iq_desc {
339 	struct rss_header rss;
340 	uint8_t cpl[IQ_PAD];
341 	struct rsp_ctrl rsp;
342 };
343 #undef IQ_PAD
344 CTASSERT(sizeof(struct iq_desc) == IQ_ESIZE);
345 
346 enum {
347 	/* iq flags */
348 	IQ_ALLOCATED	= (1 << 0),	/* firmware resources allocated */
349 	IQ_HAS_FL	= (1 << 1),	/* iq associated with a freelist */
350 	IQ_INTR		= (1 << 2),	/* iq takes direct interrupt */
351 	IQ_LRO_ENABLED	= (1 << 3),	/* iq is an eth rxq with LRO enabled */
352 	IQ_ADJ_CREDIT	= (1 << 4),	/* hw is off by 1 credit for this iq */
353 
354 	/* iq state */
355 	IQS_DISABLED	= 0,
356 	IQS_BUSY	= 1,
357 	IQS_IDLE	= 2,
358 
359 	/* netmap related flags */
360 	NM_OFF	= 0,
361 	NM_ON	= 1,
362 	NM_BUSY	= 2,
363 };
364 
365 struct sge_iq;
366 struct rss_header;
367 typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
368     struct mbuf *);
369 typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
370 typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
371 
372 /*
373  * Ingress Queue: T4 is producer, driver is consumer.
374  */
375 struct sge_iq {
376 	uint32_t flags;
377 	volatile int state;
378 	struct adapter *adapter;
379 	cpl_handler_t set_tcb_rpl;
380 	cpl_handler_t l2t_write_rpl;
381 	struct iq_desc  *desc;	/* KVA of descriptor ring */
382 	int8_t   intr_pktc_idx;	/* packet count threshold index */
383 	uint8_t  gen;		/* generation bit */
384 	uint8_t  intr_params;	/* interrupt holdoff parameters */
385 	uint8_t  intr_next;	/* XXX: holdoff for next interrupt */
386 	uint16_t qsize;		/* size (# of entries) of the queue */
387 	uint16_t sidx;		/* index of the entry with the status page */
388 	uint16_t cidx;		/* consumer index */
389 	uint16_t cntxt_id;	/* SGE context id for the iq */
390 	uint16_t abs_id;	/* absolute SGE id for the iq */
391 
392 	STAILQ_ENTRY(sge_iq) link;
393 
394 	bus_dma_tag_t desc_tag;
395 	bus_dmamap_t desc_map;
396 	bus_addr_t ba;		/* bus address of descriptor ring */
397 };
398 
399 enum {
400 	EQ_CTRL		= 1,
401 	EQ_ETH		= 2,
402 	EQ_OFLD		= 3,
403 
404 	/* eq flags */
405 	EQ_TYPEMASK	= 0x3,		/* 2 lsbits hold the type (see above) */
406 	EQ_ALLOCATED	= (1 << 2),	/* firmware resources allocated */
407 	EQ_ENABLED	= (1 << 3),	/* open for business */
408 	EQ_QFLUSH	= (1 << 4),	/* if_qflush in progress */
409 };
410 
411 /* Listed in order of preference.  Update t4_sysctls too if you change these */
412 enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB};
413 
414 /*
415  * Egress Queue: driver is producer, T4 is consumer.
416  *
417  * Note: A free list is an egress queue (driver produces the buffers and T4
418  * consumes them) but it's special enough to have its own struct (see sge_fl).
419  */
420 struct sge_eq {
421 	unsigned int flags;	/* MUST be first */
422 	unsigned int cntxt_id;	/* SGE context id for the eq */
423 	unsigned int abs_id;	/* absolute SGE id for the eq */
424 	struct mtx eq_lock;
425 
426 	struct tx_desc *desc;	/* KVA of descriptor ring */
427 	uint8_t doorbells;
428 	volatile uint32_t *udb;	/* KVA of doorbell (lies within BAR2) */
429 	u_int udb_qid;		/* relative qid within the doorbell page */
430 	uint16_t sidx;		/* index of the entry with the status page */
431 	uint16_t cidx;		/* consumer idx (desc idx) */
432 	uint16_t pidx;		/* producer idx (desc idx) */
433 	uint16_t equeqidx;	/* EQUEQ last requested at this pidx */
434 	uint16_t dbidx;		/* pidx of the most recent doorbell */
435 	uint16_t iqid;		/* iq that gets egr_update for the eq */
436 	uint8_t tx_chan;	/* tx channel used by the eq */
437 	volatile u_int equiq;	/* EQUIQ outstanding */
438 
439 	bus_dma_tag_t desc_tag;
440 	bus_dmamap_t desc_map;
441 	bus_addr_t ba;		/* bus address of descriptor ring */
442 	char lockname[16];
443 };
444 
445 struct sw_zone_info {
446 	uma_zone_t zone;	/* zone that this cluster comes from */
447 	int size;		/* size of cluster: 2K, 4K, 9K, 16K, etc. */
448 	int type;		/* EXT_xxx type of the cluster */
449 	int8_t head_hwidx;
450 	int8_t tail_hwidx;
451 };
452 
453 struct hw_buf_info {
454 	int8_t zidx;		/* backpointer to zone; -ve means unused */
455 	int8_t next;		/* next hwidx for this zone; -1 means no more */
456 	int size;
457 };
458 
459 enum {
460 	NUM_MEMWIN = 3,
461 
462 	MEMWIN0_APERTURE = 2048,
463 	MEMWIN0_BASE     = 0x1b800,
464 
465 	MEMWIN1_APERTURE = 32768,
466 	MEMWIN1_BASE     = 0x28000,
467 
468 	MEMWIN2_APERTURE_T4 = 65536,
469 	MEMWIN2_BASE_T4     = 0x30000,
470 
471 	MEMWIN2_APERTURE_T5 = 128 * 1024,
472 	MEMWIN2_BASE_T5     = 0x60000,
473 };
474 
475 struct memwin {
476 	struct rwlock mw_lock __aligned(CACHE_LINE_SIZE);
477 	uint32_t mw_base;	/* constant after setup_memwin */
478 	uint32_t mw_aperture;	/* ditto */
479 	uint32_t mw_curpos;	/* protected by mw_lock */
480 };
481 
482 enum {
483 	FL_STARVING	= (1 << 0), /* on the adapter's list of starving fl's */
484 	FL_DOOMED	= (1 << 1), /* about to be destroyed */
485 	FL_BUF_PACKING	= (1 << 2), /* buffer packing enabled */
486 	FL_BUF_RESUME	= (1 << 3), /* resume from the middle of the frame */
487 };
488 
489 #define FL_RUNNING_LOW(fl) \
490     (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) <= fl->lowat)
491 #define FL_NOT_RUNNING_LOW(fl) \
492     (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) >= 2 * fl->lowat)
493 
494 struct sge_fl {
495 	struct mtx fl_lock;
496 	__be64 *desc;		/* KVA of descriptor ring, ptr to addresses */
497 	struct fl_sdesc *sdesc;	/* KVA of software descriptor ring */
498 	struct cluster_layout cll_def;	/* default refill zone, layout */
499 	uint16_t lowat;		/* # of buffers <= this means fl needs help */
500 	int flags;
501 	uint16_t buf_boundary;
502 
503 	/* The 16b idx all deal with hw descriptors */
504 	uint16_t dbidx;		/* hw pidx after last doorbell */
505 	uint16_t sidx;		/* index of status page */
506 	volatile uint16_t hw_cidx;
507 
508 	/* The 32b idx are all buffer idx, not hardware descriptor idx */
509 	uint32_t cidx;		/* consumer index */
510 	uint32_t pidx;		/* producer index */
511 
512 	uint32_t dbval;
513 	u_int rx_offset;	/* offset in fl buf (when buffer packing) */
514 	volatile uint32_t *udb;
515 
516 	uint64_t mbuf_allocated;/* # of mbuf allocated from zone_mbuf */
517 	uint64_t mbuf_inlined;	/* # of mbuf created within clusters */
518 	uint64_t cl_allocated;	/* # of clusters allocated */
519 	uint64_t cl_recycled;	/* # of clusters recycled */
520 	uint64_t cl_fast_recycled; /* # of clusters recycled (fast) */
521 
522 	/* These 3 are valid when FL_BUF_RESUME is set, stale otherwise. */
523 	struct mbuf *m0;
524 	struct mbuf **pnext;
525 	u_int remaining;
526 
527 	uint16_t qsize;		/* # of hw descriptors (status page included) */
528 	uint16_t cntxt_id;	/* SGE context id for the freelist */
529 	TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
530 	bus_dma_tag_t desc_tag;
531 	bus_dmamap_t desc_map;
532 	char lockname[16];
533 	bus_addr_t ba;		/* bus address of descriptor ring */
534 	struct cluster_layout cll_alt;	/* alternate refill zone, layout */
535 };
536 
537 struct mp_ring;
538 
539 /* txq: SGE egress queue + what's needed for Ethernet NIC */
540 struct sge_txq {
541 	struct sge_eq eq;	/* MUST be first */
542 
543 	struct ifnet *ifp;	/* the interface this txq belongs to */
544 	struct mp_ring *r;	/* tx software ring */
545 	struct tx_sdesc *sdesc;	/* KVA of software descriptor ring */
546 	struct sglist *gl;
547 	__be32 cpl_ctrl0;	/* for convenience */
548 	int tc_idx;		/* traffic class */
549 
550 	struct task tx_reclaim_task;
551 	/* stats for common events first */
552 
553 	uint64_t txcsum;	/* # of times hardware assisted with checksum */
554 	uint64_t tso_wrs;	/* # of TSO work requests */
555 	uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
556 	uint64_t imm_wrs;	/* # of work requests with immediate data */
557 	uint64_t sgl_wrs;	/* # of work requests with direct SGL */
558 	uint64_t txpkt_wrs;	/* # of txpkt work requests (not coalesced) */
559 	uint64_t txpkts0_wrs;	/* # of type0 coalesced tx work requests */
560 	uint64_t txpkts1_wrs;	/* # of type1 coalesced tx work requests */
561 	uint64_t txpkts0_pkts;	/* # of frames in type0 coalesced tx WRs */
562 	uint64_t txpkts1_pkts;	/* # of frames in type1 coalesced tx WRs */
563 
564 	/* stats for not-that-common events */
565 } __aligned(CACHE_LINE_SIZE);
566 
567 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */
568 struct sge_rxq {
569 	struct sge_iq iq;	/* MUST be first */
570 	struct sge_fl fl;	/* MUST follow iq */
571 
572 	struct ifnet *ifp;	/* the interface this rxq belongs to */
573 #if defined(INET) || defined(INET6)
574 	struct lro_ctrl lro;	/* LRO state */
575 #endif
576 
577 	/* stats for common events first */
578 
579 	uint64_t rxcsum;	/* # of times hardware assisted with checksum */
580 	uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
581 
582 	/* stats for not-that-common events */
583 
584 } __aligned(CACHE_LINE_SIZE);
585 
586 static inline struct sge_rxq *
587 iq_to_rxq(struct sge_iq *iq)
588 {
589 
590 	return (__containerof(iq, struct sge_rxq, iq));
591 }
592 
593 
594 /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */
595 struct sge_ofld_rxq {
596 	struct sge_iq iq;	/* MUST be first */
597 	struct sge_fl fl;	/* MUST follow iq */
598 } __aligned(CACHE_LINE_SIZE);
599 
600 static inline struct sge_ofld_rxq *
601 iq_to_ofld_rxq(struct sge_iq *iq)
602 {
603 
604 	return (__containerof(iq, struct sge_ofld_rxq, iq));
605 }
606 
607 struct wrqe {
608 	STAILQ_ENTRY(wrqe) link;
609 	struct sge_wrq *wrq;
610 	int wr_len;
611 	char wr[] __aligned(16);
612 };
613 
614 struct wrq_cookie {
615 	TAILQ_ENTRY(wrq_cookie) link;
616 	int ndesc;
617 	int pidx;
618 };
619 
620 /*
621  * wrq: SGE egress queue that is given prebuilt work requests.  Both the control
622  * and offload tx queues are of this type.
623  */
624 struct sge_wrq {
625 	struct sge_eq eq;	/* MUST be first */
626 
627 	struct adapter *adapter;
628 	struct task wrq_tx_task;
629 
630 	/* Tx desc reserved but WR not "committed" yet. */
631 	TAILQ_HEAD(wrq_incomplete_wrs , wrq_cookie) incomplete_wrs;
632 
633 	/* List of WRs ready to go out as soon as descriptors are available. */
634 	STAILQ_HEAD(, wrqe) wr_list;
635 	u_int nwr_pending;
636 	u_int ndesc_needed;
637 
638 	/* stats for common events first */
639 
640 	uint64_t tx_wrs_direct;	/* # of WRs written directly to desc ring. */
641 	uint64_t tx_wrs_ss;	/* # of WRs copied from scratch space. */
642 	uint64_t tx_wrs_copied;	/* # of WRs queued and copied to desc ring. */
643 
644 	/* stats for not-that-common events */
645 
646 	/*
647 	 * Scratch space for work requests that wrap around after reaching the
648 	 * status page, and some information about the last WR that used it.
649 	 */
650 	uint16_t ss_pidx;
651 	uint16_t ss_len;
652 	uint8_t ss[SGE_MAX_WR_LEN];
653 
654 } __aligned(CACHE_LINE_SIZE);
655 
656 #define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1))
657 struct sge_nm_rxq {
658 	struct vi_info *vi;
659 
660 	struct iq_desc *iq_desc;
661 	uint16_t iq_abs_id;
662 	uint16_t iq_cntxt_id;
663 	uint16_t iq_cidx;
664 	uint16_t iq_sidx;
665 	uint8_t iq_gen;
666 
667 	__be64  *fl_desc;
668 	uint16_t fl_cntxt_id;
669 	uint32_t fl_cidx;
670 	uint32_t fl_pidx;
671 	uint32_t fl_sidx;
672 	uint32_t fl_db_val;
673 	u_int fl_hwidx:4;
674 
675 	u_int nid;		/* netmap ring # for this queue */
676 
677 	/* infrequently used items after this */
678 
679 	bus_dma_tag_t iq_desc_tag;
680 	bus_dmamap_t iq_desc_map;
681 	bus_addr_t iq_ba;
682 	int intr_idx;
683 
684 	bus_dma_tag_t fl_desc_tag;
685 	bus_dmamap_t fl_desc_map;
686 	bus_addr_t fl_ba;
687 } __aligned(CACHE_LINE_SIZE);
688 
689 #define INVALID_NM_TXQ_CNTXT_ID ((u_int)(-1))
690 struct sge_nm_txq {
691 	struct tx_desc *desc;
692 	uint16_t cidx;
693 	uint16_t pidx;
694 	uint16_t sidx;
695 	uint16_t equiqidx;	/* EQUIQ last requested at this pidx */
696 	uint16_t equeqidx;	/* EQUEQ last requested at this pidx */
697 	uint16_t dbidx;		/* pidx of the most recent doorbell */
698 	uint8_t doorbells;
699 	volatile uint32_t *udb;
700 	u_int udb_qid;
701 	u_int cntxt_id;
702 	__be32 cpl_ctrl0;	/* for convenience */
703 	u_int nid;		/* netmap ring # for this queue */
704 
705 	/* infrequently used items after this */
706 
707 	bus_dma_tag_t desc_tag;
708 	bus_dmamap_t desc_map;
709 	bus_addr_t ba;
710 	int iqidx;
711 } __aligned(CACHE_LINE_SIZE);
712 
713 struct sge {
714 	int nrxq;	/* total # of Ethernet rx queues */
715 	int ntxq;	/* total # of Ethernet tx queues */
716 	int nofldrxq;	/* total # of TOE rx queues */
717 	int nofldtxq;	/* total # of TOE tx queues */
718 	int nnmrxq;	/* total # of netmap rx queues */
719 	int nnmtxq;	/* total # of netmap tx queues */
720 	int niq;	/* total # of ingress queues */
721 	int neq;	/* total # of egress queues */
722 
723 	struct sge_iq fwq;	/* Firmware event queue */
724 	struct sge_wrq mgmtq;	/* Management queue (control queue) */
725 	struct sge_wrq *ctrlq;	/* Control queues */
726 	struct sge_txq *txq;	/* NIC tx queues */
727 	struct sge_rxq *rxq;	/* NIC rx queues */
728 	struct sge_wrq *ofld_txq;	/* TOE tx queues */
729 	struct sge_ofld_rxq *ofld_rxq;	/* TOE rx queues */
730 	struct sge_nm_txq *nm_txq;	/* netmap tx queues */
731 	struct sge_nm_rxq *nm_rxq;	/* netmap rx queues */
732 
733 	uint16_t iq_start;	/* first cntxt_id */
734 	uint16_t iq_base;	/* first abs_id */
735 	int eq_start;		/* first cntxt_id */
736 	int eq_base;		/* first abs_id */
737 	struct sge_iq **iqmap;	/* iq->cntxt_id to iq mapping */
738 	struct sge_eq **eqmap;	/* eq->cntxt_id to eq mapping */
739 
740 	int8_t safe_hwidx1;	/* may not have room for metadata */
741 	int8_t safe_hwidx2;	/* with room for metadata and maybe more */
742 	struct sw_zone_info sw_zone_info[SW_ZONE_SIZES];
743 	struct hw_buf_info hw_buf_info[SGE_FLBUF_SIZES];
744 };
745 
746 struct devnames {
747 	const char *nexus_name;
748 	const char *ifnet_name;
749 	const char *vi_ifnet_name;
750 	const char *pf03_drv_name;
751 	const char *vf_nexus_name;
752 	const char *vf_ifnet_name;
753 };
754 
755 struct adapter {
756 	SLIST_ENTRY(adapter) link;
757 	device_t dev;
758 	struct cdev *cdev;
759 	const struct devnames *names;
760 
761 	/* PCIe register resources */
762 	int regs_rid;
763 	struct resource *regs_res;
764 	int msix_rid;
765 	struct resource *msix_res;
766 	bus_space_handle_t bh;
767 	bus_space_tag_t bt;
768 	bus_size_t mmio_len;
769 	int udbs_rid;
770 	struct resource *udbs_res;
771 	volatile uint8_t *udbs_base;
772 
773 	unsigned int pf;
774 	unsigned int mbox;
775 	unsigned int vpd_busy;
776 	unsigned int vpd_flag;
777 
778 	/* Interrupt information */
779 	int intr_type;
780 	int intr_count;
781 	struct irq {
782 		struct resource *res;
783 		int rid;
784 		volatile int nm_state;	/* NM_OFF, NM_ON, or NM_BUSY */
785 		void *tag;
786 		struct sge_rxq *rxq;
787 		struct sge_nm_rxq *nm_rxq;
788 	} __aligned(CACHE_LINE_SIZE) *irq;
789 	int sge_gts_reg;
790 	int sge_kdoorbell_reg;
791 
792 	bus_dma_tag_t dmat;	/* Parent DMA tag */
793 
794 	struct sge sge;
795 	int lro_timeout;
796 	int sc_do_rxcopy;
797 
798 	struct taskqueue *tq[MAX_NCHAN];	/* General purpose taskqueues */
799 	struct port_info *port[MAX_NPORTS];
800 	uint8_t chan_map[MAX_NCHAN];		/* channel -> port */
801 
802 	void *tom_softc;	/* (struct tom_data *) */
803 	struct tom_tunables tt;
804 	void *iwarp_softc;	/* (struct c4iw_dev *) */
805 	void *iscsi_ulp_softc;	/* (struct cxgbei_data *) */
806 	void *ccr_softc;	/* (struct ccr_softc *) */
807 	struct l2t_data *l2t;	/* L2 table */
808 	struct tid_info tids;
809 
810 	uint8_t doorbells;
811 	int offload_map;	/* ports with IFCAP_TOE enabled */
812 	int active_ulds;	/* ULDs activated on this adapter */
813 	int flags;
814 	int debug_flags;
815 
816 	char ifp_lockname[16];
817 	struct mtx ifp_lock;
818 	struct ifnet *ifp;	/* tracer ifp */
819 	struct ifmedia media;
820 	int traceq;		/* iq used by all tracers, -1 if none */
821 	int tracer_valid;	/* bitmap of valid tracers */
822 	int tracer_enabled;	/* bitmap of enabled tracers */
823 
824 	char fw_version[16];
825 	char tp_version[16];
826 	char er_version[16];
827 	char bs_version[16];
828 	char cfg_file[32];
829 	u_int cfcsum;
830 	struct adapter_params params;
831 	const struct chip_params *chip_params;
832 	struct t4_virt_res vres;
833 
834 	uint16_t nbmcaps;
835 	uint16_t linkcaps;
836 	uint16_t switchcaps;
837 	uint16_t niccaps;
838 	uint16_t toecaps;
839 	uint16_t rdmacaps;
840 	uint16_t cryptocaps;
841 	uint16_t iscsicaps;
842 	uint16_t fcoecaps;
843 
844 	struct sysctl_ctx_list ctx; /* from adapter_full_init to full_uninit */
845 
846 	struct mtx sc_lock;
847 	char lockname[16];
848 
849 	/* Starving free lists */
850 	struct mtx sfl_lock;	/* same cache-line as sc_lock? but that's ok */
851 	TAILQ_HEAD(, sge_fl) sfl;
852 	struct callout sfl_callout;
853 
854 	struct mtx reg_lock;	/* for indirect register access */
855 
856 	struct memwin memwin[NUM_MEMWIN];	/* memory windows */
857 
858 	struct mtx tc_lock;
859 	struct task tc_task;
860 
861 	const char *last_op;
862 	const void *last_op_thr;
863 	int last_op_flags;
864 };
865 
866 #define ADAPTER_LOCK(sc)		mtx_lock(&(sc)->sc_lock)
867 #define ADAPTER_UNLOCK(sc)		mtx_unlock(&(sc)->sc_lock)
868 #define ADAPTER_LOCK_ASSERT_OWNED(sc)	mtx_assert(&(sc)->sc_lock, MA_OWNED)
869 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
870 
871 #define ASSERT_SYNCHRONIZED_OP(sc)	\
872     KASSERT(IS_BUSY(sc) && \
873 	(mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
874 	("%s: operation not synchronized.", __func__))
875 
876 #define PORT_LOCK(pi)			mtx_lock(&(pi)->pi_lock)
877 #define PORT_UNLOCK(pi)			mtx_unlock(&(pi)->pi_lock)
878 #define PORT_LOCK_ASSERT_OWNED(pi)	mtx_assert(&(pi)->pi_lock, MA_OWNED)
879 #define PORT_LOCK_ASSERT_NOTOWNED(pi)	mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
880 
881 #define FL_LOCK(fl)			mtx_lock(&(fl)->fl_lock)
882 #define FL_TRYLOCK(fl)			mtx_trylock(&(fl)->fl_lock)
883 #define FL_UNLOCK(fl)			mtx_unlock(&(fl)->fl_lock)
884 #define FL_LOCK_ASSERT_OWNED(fl)	mtx_assert(&(fl)->fl_lock, MA_OWNED)
885 #define FL_LOCK_ASSERT_NOTOWNED(fl)	mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
886 
887 #define RXQ_FL_LOCK(rxq)		FL_LOCK(&(rxq)->fl)
888 #define RXQ_FL_UNLOCK(rxq)		FL_UNLOCK(&(rxq)->fl)
889 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq)	FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
890 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
891 
892 #define EQ_LOCK(eq)			mtx_lock(&(eq)->eq_lock)
893 #define EQ_TRYLOCK(eq)			mtx_trylock(&(eq)->eq_lock)
894 #define EQ_UNLOCK(eq)			mtx_unlock(&(eq)->eq_lock)
895 #define EQ_LOCK_ASSERT_OWNED(eq)	mtx_assert(&(eq)->eq_lock, MA_OWNED)
896 #define EQ_LOCK_ASSERT_NOTOWNED(eq)	mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
897 
898 #define TXQ_LOCK(txq)			EQ_LOCK(&(txq)->eq)
899 #define TXQ_TRYLOCK(txq)		EQ_TRYLOCK(&(txq)->eq)
900 #define TXQ_UNLOCK(txq)			EQ_UNLOCK(&(txq)->eq)
901 #define TXQ_LOCK_ASSERT_OWNED(txq)	EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
902 #define TXQ_LOCK_ASSERT_NOTOWNED(txq)	EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
903 
904 #define CH_DUMP_MBOX(sc, mbox, data_reg) \
905 	do { \
906 		if (sc->debug_flags & DF_DUMP_MBOX) { \
907 			log(LOG_NOTICE, \
908 			    "%s mbox %u: %016llx %016llx %016llx %016llx " \
909 			    "%016llx %016llx %016llx %016llx\n", \
910 			    device_get_nameunit(sc->dev), mbox, \
911 			    (unsigned long long)t4_read_reg64(sc, data_reg), \
912 			    (unsigned long long)t4_read_reg64(sc, data_reg + 8), \
913 			    (unsigned long long)t4_read_reg64(sc, data_reg + 16), \
914 			    (unsigned long long)t4_read_reg64(sc, data_reg + 24), \
915 			    (unsigned long long)t4_read_reg64(sc, data_reg + 32), \
916 			    (unsigned long long)t4_read_reg64(sc, data_reg + 40), \
917 			    (unsigned long long)t4_read_reg64(sc, data_reg + 48), \
918 			    (unsigned long long)t4_read_reg64(sc, data_reg + 56)); \
919 		} \
920 	} while (0)
921 
922 #define for_each_txq(vi, iter, q) \
923 	for (q = &vi->pi->adapter->sge.txq[vi->first_txq], iter = 0; \
924 	    iter < vi->ntxq; ++iter, ++q)
925 #define for_each_rxq(vi, iter, q) \
926 	for (q = &vi->pi->adapter->sge.rxq[vi->first_rxq], iter = 0; \
927 	    iter < vi->nrxq; ++iter, ++q)
928 #define for_each_ofld_txq(vi, iter, q) \
929 	for (q = &vi->pi->adapter->sge.ofld_txq[vi->first_ofld_txq], iter = 0; \
930 	    iter < vi->nofldtxq; ++iter, ++q)
931 #define for_each_ofld_rxq(vi, iter, q) \
932 	for (q = &vi->pi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \
933 	    iter < vi->nofldrxq; ++iter, ++q)
934 #define for_each_nm_txq(vi, iter, q) \
935 	for (q = &vi->pi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \
936 	    iter < vi->nnmtxq; ++iter, ++q)
937 #define for_each_nm_rxq(vi, iter, q) \
938 	for (q = &vi->pi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \
939 	    iter < vi->nnmrxq; ++iter, ++q)
940 #define for_each_vi(_pi, _iter, _vi) \
941 	for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \
942 	     ++(_iter), ++(_vi))
943 
944 #define IDXINCR(idx, incr, wrap) do { \
945 	idx = wrap - idx > incr ? idx + incr : incr - (wrap - idx); \
946 } while (0)
947 #define IDXDIFF(head, tail, wrap) \
948 	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
949 
950 /* One for errors, one for firmware events */
951 #define T4_EXTRA_INTR 2
952 
953 /* One for firmware events */
954 #define T4VF_EXTRA_INTR 1
955 
956 static inline uint32_t
957 t4_read_reg(struct adapter *sc, uint32_t reg)
958 {
959 
960 	return bus_space_read_4(sc->bt, sc->bh, reg);
961 }
962 
963 static inline void
964 t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
965 {
966 
967 	bus_space_write_4(sc->bt, sc->bh, reg, val);
968 }
969 
970 static inline uint64_t
971 t4_read_reg64(struct adapter *sc, uint32_t reg)
972 {
973 
974 #ifdef __LP64__
975 	return bus_space_read_8(sc->bt, sc->bh, reg);
976 #else
977 	return (uint64_t)bus_space_read_4(sc->bt, sc->bh, reg) +
978 	    ((uint64_t)bus_space_read_4(sc->bt, sc->bh, reg + 4) << 32);
979 
980 #endif
981 }
982 
983 static inline void
984 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
985 {
986 
987 #ifdef __LP64__
988 	bus_space_write_8(sc->bt, sc->bh, reg, val);
989 #else
990 	bus_space_write_4(sc->bt, sc->bh, reg, val);
991 	bus_space_write_4(sc->bt, sc->bh, reg + 4, val>> 32);
992 #endif
993 }
994 
995 static inline void
996 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
997 {
998 
999 	*val = pci_read_config(sc->dev, reg, 1);
1000 }
1001 
1002 static inline void
1003 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
1004 {
1005 
1006 	pci_write_config(sc->dev, reg, val, 1);
1007 }
1008 
1009 static inline void
1010 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
1011 {
1012 
1013 	*val = pci_read_config(sc->dev, reg, 2);
1014 }
1015 
1016 static inline void
1017 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
1018 {
1019 
1020 	pci_write_config(sc->dev, reg, val, 2);
1021 }
1022 
1023 static inline void
1024 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
1025 {
1026 
1027 	*val = pci_read_config(sc->dev, reg, 4);
1028 }
1029 
1030 static inline void
1031 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
1032 {
1033 
1034 	pci_write_config(sc->dev, reg, val, 4);
1035 }
1036 
1037 static inline struct port_info *
1038 adap2pinfo(struct adapter *sc, int idx)
1039 {
1040 
1041 	return (sc->port[idx]);
1042 }
1043 
1044 static inline void
1045 t4_os_set_hw_addr(struct port_info *pi, uint8_t hw_addr[])
1046 {
1047 
1048 	bcopy(hw_addr, pi->vi[0].hw_addr, ETHER_ADDR_LEN);
1049 }
1050 
1051 static inline bool
1052 is_10G_port(const struct port_info *pi)
1053 {
1054 
1055 	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
1056 }
1057 
1058 static inline bool
1059 is_25G_port(const struct port_info *pi)
1060 {
1061 
1062 	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) != 0);
1063 }
1064 
1065 static inline bool
1066 is_40G_port(const struct port_info *pi)
1067 {
1068 
1069 	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) != 0);
1070 }
1071 
1072 static inline bool
1073 is_100G_port(const struct port_info *pi)
1074 {
1075 
1076 	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) != 0);
1077 }
1078 
1079 static inline int
1080 port_top_speed(const struct port_info *pi)
1081 {
1082 
1083 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
1084 		return (100);
1085 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
1086 		return (40);
1087 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
1088 		return (25);
1089 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
1090 		return (10);
1091 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
1092 		return (1);
1093 
1094 	return (0);
1095 }
1096 
1097 static inline int
1098 tx_resume_threshold(struct sge_eq *eq)
1099 {
1100 
1101 	/* not quite the same as qsize / 4, but this will do. */
1102 	return (eq->sidx / 4);
1103 }
1104 
1105 static inline int
1106 t4_use_ldst(struct adapter *sc)
1107 {
1108 
1109 #ifdef notyet
1110 	return (sc->flags & FW_OK || !sc->use_bd);
1111 #else
1112 	return (0);
1113 #endif
1114 }
1115 
1116 /* t4_main.c */
1117 extern int t4_ntxq;
1118 extern int t4_nrxq;
1119 extern int t4_intr_types;
1120 extern int t4_tmr_idx;
1121 extern int t4_pktc_idx;
1122 extern unsigned int t4_qsize_rxq;
1123 extern unsigned int t4_qsize_txq;
1124 extern device_method_t cxgbe_methods[];
1125 
1126 int t4_os_find_pci_capability(struct adapter *, int);
1127 int t4_os_pci_save_state(struct adapter *);
1128 int t4_os_pci_restore_state(struct adapter *);
1129 void t4_os_portmod_changed(struct port_info *);
1130 void t4_os_link_changed(struct port_info *);
1131 void t4_iterate(void (*)(struct adapter *, void *), void *);
1132 void t4_init_devnames(struct adapter *);
1133 void t4_add_adapter(struct adapter *);
1134 int t4_detach_common(device_t);
1135 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
1136 int t4_map_bars_0_and_4(struct adapter *);
1137 int t4_map_bar_2(struct adapter *);
1138 int t4_setup_intr_handlers(struct adapter *);
1139 void t4_sysctls(struct adapter *);
1140 int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *);
1141 void doom_vi(struct adapter *, struct vi_info *);
1142 void end_synchronized_op(struct adapter *, int);
1143 int update_mac_settings(struct ifnet *, int);
1144 int adapter_full_init(struct adapter *);
1145 int adapter_full_uninit(struct adapter *);
1146 uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
1147 int vi_full_init(struct vi_info *);
1148 int vi_full_uninit(struct vi_info *);
1149 void vi_sysctls(struct vi_info *);
1150 void vi_tick(void *);
1151 
1152 #ifdef DEV_NETMAP
1153 /* t4_netmap.c */
1154 void cxgbe_nm_attach(struct vi_info *);
1155 void cxgbe_nm_detach(struct vi_info *);
1156 void t4_nm_intr(void *);
1157 #endif
1158 
1159 /* t4_sge.c */
1160 void t4_sge_modload(void);
1161 void t4_sge_modunload(void);
1162 uint64_t t4_sge_extfree_refs(void);
1163 void t4_tweak_chip_settings(struct adapter *);
1164 int t4_read_chip_settings(struct adapter *);
1165 int t4_create_dma_tag(struct adapter *);
1166 void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *,
1167     struct sysctl_oid_list *);
1168 int t4_destroy_dma_tag(struct adapter *);
1169 int t4_setup_adapter_queues(struct adapter *);
1170 int t4_teardown_adapter_queues(struct adapter *);
1171 int t4_setup_vi_queues(struct vi_info *);
1172 int t4_teardown_vi_queues(struct vi_info *);
1173 void t4_intr_all(void *);
1174 void t4_intr(void *);
1175 void t4_vi_intr(void *);
1176 void t4_intr_err(void *);
1177 void t4_intr_evt(void *);
1178 void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
1179 void t4_update_fl_bufsize(struct ifnet *);
1180 int parse_pkt(struct adapter *, struct mbuf **);
1181 void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *);
1182 void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *);
1183 int tnl_cong(struct port_info *, int);
1184 int t4_register_an_handler(an_handler_t);
1185 int t4_register_fw_msg_handler(int, fw_msg_handler_t);
1186 int t4_register_cpl_handler(int, cpl_handler_t);
1187 
1188 /* t4_tracer.c */
1189 struct t4_tracer;
1190 void t4_tracer_modload(void);
1191 void t4_tracer_modunload(void);
1192 void t4_tracer_port_detach(struct adapter *);
1193 int t4_get_tracer(struct adapter *, struct t4_tracer *);
1194 int t4_set_tracer(struct adapter *, struct t4_tracer *);
1195 int t4_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *);
1196 int t5_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *);
1197 
1198 /* t4_sched.c */
1199 int t4_set_sched_class(struct adapter *, struct t4_sched_params *);
1200 int t4_set_sched_queue(struct adapter *, struct t4_sched_queue *);
1201 int t4_init_tx_sched(struct adapter *);
1202 int t4_free_tx_sched(struct adapter *);
1203 void t4_update_tx_sched(struct adapter *);
1204 int t4_reserve_cl_rl_kbps(struct adapter *, int, u_int, int *);
1205 void t4_release_cl_rl_kbps(struct adapter *, int, int);
1206 
1207 static inline struct wrqe *
1208 alloc_wrqe(int wr_len, struct sge_wrq *wrq)
1209 {
1210 	int len = offsetof(struct wrqe, wr) + wr_len;
1211 	struct wrqe *wr;
1212 
1213 	wr = malloc(len, M_CXGBE, M_NOWAIT);
1214 	if (__predict_false(wr == NULL))
1215 		return (NULL);
1216 	wr->wr_len = wr_len;
1217 	wr->wrq = wrq;
1218 	return (wr);
1219 }
1220 
1221 static inline void *
1222 wrtod(struct wrqe *wr)
1223 {
1224 	return (&wr->wr[0]);
1225 }
1226 
1227 static inline void
1228 free_wrqe(struct wrqe *wr)
1229 {
1230 	free(wr, M_CXGBE);
1231 }
1232 
1233 static inline void
1234 t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
1235 {
1236 	struct sge_wrq *wrq = wr->wrq;
1237 
1238 	TXQ_LOCK(wrq);
1239 	t4_wrq_tx_locked(sc, wrq, wr);
1240 	TXQ_UNLOCK(wrq);
1241 }
1242 
1243 #endif
1244