1 /*-
2  * Copyright (c) 2013 Tsubai Masanari
3  * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $FreeBSD: head/sys/dev/vmware/vmxnet3/if_vmxvar.h 313911 2017-02-18 06:18:14Z loos $
18  */
19 
20 #ifndef _IF_VMXVAR_H
21 #define _IF_VMXVAR_H
22 
23 struct vmxnet3_softc;
24 
25 struct vmxnet3_dma_alloc {
26 	bus_addr_t		dma_paddr;
27 	caddr_t			dma_vaddr;
28 	bus_dma_tag_t		dma_tag;
29 	bus_dmamap_t		dma_map;
30 	bus_size_t		dma_size;
31 };
32 
33 /*
34  * The number of Rx/Tx queues this driver prefers.
35  */
36 #define VMXNET3_DEF_RX_QUEUES	8
37 #define VMXNET3_DEF_TX_QUEUES	8
38 
39 /*
40  * The number of Rx rings in each Rx queue.
41  */
42 #define VMXNET3_RXRINGS_PERQ	2
43 
44 /*
45  * The number of descriptors in each Rx/Tx ring.
46  */
47 #define VMXNET3_DEF_TX_NDESC		512
48 #define VMXNET3_MAX_TX_NDESC		4096
49 #define VMXNET3_MIN_TX_NDESC		32
50 #define VMXNET3_MASK_TX_NDESC		0x1F
51 #define VMXNET3_DEF_RX_NDESC		256
52 #define VMXNET3_MAX_RX_NDESC		2048
53 #define VMXNET3_MIN_RX_NDESC		32
54 #define VMXNET3_MASK_RX_NDESC		0x1F
55 
56 #define VMXNET3_MAX_TX_NCOMPDESC	VMXNET3_MAX_TX_NDESC
57 #define VMXNET3_MAX_RX_NCOMPDESC \
58     (VMXNET3_MAX_RX_NDESC * VMXNET3_RXRINGS_PERQ)
59 
60 struct vmxnet3_txbuf {
61 	bus_dmamap_t		 vtxb_dmamap;
62 	struct mbuf		*vtxb_m;
63 };
64 
65 struct vmxnet3_txring {
66 	struct vmxnet3_txbuf	*vxtxr_txbuf;
67 	u_int			 vxtxr_head;
68 	u_int			 vxtxr_next;
69 	u_int			 vxtxr_ndesc;
70 	int			 vxtxr_gen;
71 	bus_dma_tag_t		 vxtxr_txtag;
72 	struct vmxnet3_txdesc	*vxtxr_txd;
73 	struct vmxnet3_dma_alloc vxtxr_dma;
74 };
75 
76 static inline int
77 VMXNET3_TXRING_AVAIL(struct vmxnet3_txring *txr)
78 {
79 	int avail = txr->vxtxr_next - txr->vxtxr_head - 1;
80 	return (avail < 0 ? txr->vxtxr_ndesc + avail : avail);
81 }
82 
83 struct vmxnet3_rxbuf {
84 	bus_dmamap_t		 vrxb_dmamap;
85 	struct mbuf		*vrxb_m;
86 };
87 
88 struct vmxnet3_rxring {
89 	struct vmxnet3_rxbuf	*vxrxr_rxbuf;
90 	struct vmxnet3_rxdesc	*vxrxr_rxd;
91 	u_int			 vxrxr_fill;
92 	u_int			 vxrxr_ndesc;
93 	int			 vxrxr_gen;
94 	int			 vxrxr_rid;
95 	bus_dma_tag_t		 vxrxr_rxtag;
96 	struct vmxnet3_dma_alloc vxrxr_dma;
97 	bus_dmamap_t		 vxrxr_spare_dmap;
98 };
99 
100 static inline void
101 vmxnet3_rxr_increment_fill(struct vmxnet3_rxring *rxr)
102 {
103 
104 	if (++rxr->vxrxr_fill == rxr->vxrxr_ndesc) {
105 		rxr->vxrxr_fill = 0;
106 		rxr->vxrxr_gen ^= 1;
107 	}
108 }
109 
110 struct vmxnet3_comp_ring {
111 	union {
112 		struct vmxnet3_txcompdesc *txcd;
113 		struct vmxnet3_rxcompdesc *rxcd;
114 	}			 vxcr_u;
115 	u_int			 vxcr_next;
116 	u_int			 vxcr_ndesc;
117 	int			 vxcr_gen;
118 	struct vmxnet3_dma_alloc vxcr_dma;
119 };
120 
121 struct vmxnet3_txq_stats {
122 	uint64_t		vmtxs_opackets;	/* if_opackets */
123 	uint64_t		vmtxs_obytes;	/* if_obytes */
124 	uint64_t		vmtxs_omcasts;	/* if_omcasts */
125 	uint64_t		vmtxs_csum;
126 	uint64_t		vmtxs_tso;
127 	uint64_t		vmtxs_full;
128 	uint64_t		vmtxs_offload_failed;
129 };
130 
131 struct vmxnet3_txqueue {
132 	struct lock			 vxtxq_lock;
133 	struct vmxnet3_softc		*vxtxq_sc;
134 #ifndef VMXNET3_LEGACY_TX
135 	struct buf_ring			*vxtxq_br;
136 #endif
137 	int				 vxtxq_id;
138 	int				 vxtxq_intr_idx;
139 	int				 vxtxq_watchdog;
140 	struct vmxnet3_txring		 vxtxq_cmd_ring;
141 	struct vmxnet3_comp_ring	 vxtxq_comp_ring;
142 	struct vmxnet3_txq_stats	 vxtxq_stats;
143 	struct vmxnet3_txq_shared	*vxtxq_ts;
144 	struct sysctl_oid_list		*vxtxq_sysctl;
145 #ifndef VMXNET3_LEGACY_TX
146 	struct task			 vxtxq_defrtask;
147 #endif
148 	char				 vxtxq_name[16];
149 } __cachealign;
150 
151 #define VMXNET3_TXQ_LOCK(_txq)		lockmgr(&(_txq)->vxtxq_lock, LK_EXCLUSIVE)
152 #define VMXNET3_TXQ_TRYLOCK(_txq)	lockmgr_try(&(_txq)->vxtxq_lock, LK_EXCLUSIVE)
153 #define VMXNET3_TXQ_UNLOCK(_txq)	lockmgr(&(_txq)->vxtxq_lock, LK_RELEASE)
154 #define VMXNET3_TXQ_LOCK_ASSERT(_txq)		\
155     KKASSERT(lockowned(&(_txq)->vxtxq_lock) != 0)
156 #define VMXNET3_TXQ_LOCK_ASSERT_NOTOWNED(_txq)	\
157     KKASSERT(lockowned(&(_txq)->vxtxq_lock) == 0)
158 
159 struct vmxnet3_rxq_stats {
160 	uint64_t		vmrxs_ipackets;	/* if_ipackets */
161 	uint64_t		vmrxs_ibytes;	/* if_ibytes */
162 	uint64_t		vmrxs_iqdrops;	/* if_iqdrops */
163 	uint64_t		vmrxs_ierrors;	/* if_ierrors */
164 };
165 
166 struct vmxnet3_rxqueue {
167 	struct lock			 vxrxq_lock;
168 	struct vmxnet3_softc		*vxrxq_sc;
169 	int				 vxrxq_id;
170 	int				 vxrxq_intr_idx;
171 	struct mbuf			*vxrxq_mhead;
172 	struct mbuf			*vxrxq_mtail;
173 	struct vmxnet3_rxring		 vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ];
174 	struct vmxnet3_comp_ring	 vxrxq_comp_ring;
175 	struct vmxnet3_rxq_stats	 vxrxq_stats;
176 	struct vmxnet3_rxq_shared	*vxrxq_rs;
177 	struct sysctl_oid_list		*vxrxq_sysctl;
178 	char				 vxrxq_name[16];
179 } __cachealign;
180 
181 #define VMXNET3_RXQ_LOCK(_rxq)		lockmgr(&(_rxq)->vxrxq_lock, LK_EXCLUSIVE)
182 #define VMXNET3_RXQ_UNLOCK(_rxq)	lockmgr(&(_rxq)->vxrxq_lock, LK_RELEASE)
183 #define VMXNET3_RXQ_LOCK_ASSERT(_rxq)		\
184     KKASSERT(lockowned(&(_rxq)->vxrxq_lock) != 0)
185 #define VMXNET3_RXQ_LOCK_ASSERT_NOTOWNED(_rxq)	\
186     KKASSERT(lockowned(&(_rxq)->vxrxq_lock) == 0)
187 
188 struct vmxnet3_statistics {
189 	uint32_t		vmst_defragged;
190 	uint32_t		vmst_defrag_failed;
191 	uint32_t		vmst_mgetcl_failed;
192 	uint32_t		vmst_mbuf_load_failed;
193 };
194 
195 struct vmxnet3_interrupt {
196 	struct resource		*vmxi_irq;
197 	int			 vmxi_rid;
198 	void			*vmxi_handler;
199 };
200 
201 struct vmxnet3_softc {
202 	device_t			 vmx_dev;
203 	struct ifnet			*vmx_ifp;
204 	struct vmxnet3_driver_shared	*vmx_ds;
205 	uint32_t			 vmx_flags;
206 #define VMXNET3_FLAG_NO_MSIX	0x0001
207 #define VMXNET3_FLAG_RSS	0x0002
208 
209 	struct vmxnet3_rxqueue		*vmx_rxq;
210 	struct vmxnet3_txqueue		*vmx_txq;
211 
212 	struct resource			*vmx_res0;
213 	bus_space_tag_t			 vmx_iot0;
214 	bus_space_handle_t		 vmx_ioh0;
215 	struct resource			*vmx_res1;
216 	bus_space_tag_t			 vmx_iot1;
217 	bus_space_handle_t		 vmx_ioh1;
218 	struct resource			*vmx_msix_res;
219 
220 	int				 vmx_link_active;
221 	int				 vmx_link_speed;
222 	int				 vmx_if_flags;
223 	int				 vmx_ntxqueues;
224 	int				 vmx_nrxqueues;
225 	int				 vmx_ntxdescs;
226 	int				 vmx_nrxdescs;
227 	int				 vmx_max_rxsegs;
228 	int				 vmx_rx_max_chain;
229 
230 	struct vmxnet3_statistics	 vmx_stats;
231 
232 	int				 vmx_irq_type;
233 	u_int				 vmx_irq_flags;
234 
235 	int				 vmx_intr_type;
236 	int				 vmx_intr_mask_mode;
237 	int				 vmx_event_intr_idx;
238 	int				 vmx_nintrs;
239 	struct vmxnet3_interrupt	 vmx_intrs[VMXNET3_MAX_INTRS];
240 
241 	struct lock			 vmx_lock;
242 #ifndef VMXNET3_LEGACY_TX
243 	struct taskqueue		*vmx_tq;
244 #endif
245 	uint8_t				*vmx_mcast;
246 	void				*vmx_qs;
247 	struct vmxnet3_rss_shared	*vmx_rss;
248 	struct callout			 vmx_tick;
249 	struct vmxnet3_dma_alloc	 vmx_ds_dma;
250 	struct vmxnet3_dma_alloc	 vmx_qs_dma;
251 	struct vmxnet3_dma_alloc	 vmx_mcast_dma;
252 	struct vmxnet3_dma_alloc	 vmx_rss_dma;
253 	struct ifmedia			 vmx_media;
254 	int				 vmx_max_ntxqueues;
255 	int				 vmx_max_nrxqueues;
256 	eventhandler_tag		 vmx_vlan_attach;
257 	eventhandler_tag		 vmx_vlan_detach;
258 	uint32_t			 vmx_vlan_filter[4096/32];
259 	uint8_t				 vmx_lladdr[ETHER_ADDR_LEN];
260 };
261 
262 #define VMXNET3_CORE_LOCK_INIT(_sc, _name) \
263     lockinit(&(_sc)->vmx_lock, "VMXNET3 Lock", 0, 0)
264 #define VMXNET3_CORE_LOCK_DESTROY(_sc)	lockuninit(&(_sc)->vmx_lock)
265 #define VMXNET3_CORE_LOCK(_sc)		lockmgr(&(_sc)->vmx_lock, LK_EXCLUSIVE)
266 #define VMXNET3_CORE_UNLOCK(_sc)	lockmgr(&(_sc)->vmx_lock, LK_RELEASE)
267 #define VMXNET3_CORE_LOCK_ASSERT(_sc)	KKASSERT(lockowned(&(_sc)->vmx_lock) != 0)
268 #define VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(_sc) \
269     KKASSERT(lockowned(&(_sc)->vmx_lock) == 0)
270 
271 /*
272  * Our driver version we report to the hypervisor; we just keep
273  * this value constant.
274  */
275 #define VMXNET3_DRIVER_VERSION 0x00010000
276 
277 /*
278  * Max descriptors per Tx packet. We must limit the size of the
279  * any TSO packets based on the number of segments.
280  */
281 #define VMXNET3_TX_MAXSEGS		32
282 #define VMXNET3_TX_MAXSIZE		(VMXNET3_TX_MAXSEGS * MCLBYTES)
283 
284 /*
285  * Maximum support Tx segments size. The length field in the
286  * Tx descriptor is 14 bits.
287  */
288 #define VMXNET3_TX_MAXSEGSIZE		(1 << 14)
289 
290 /*
291  * The maximum number of Rx segments we accept. When LRO is enabled,
292  * this allows us to receive the maximum sized frame with one MCLBYTES
293  * cluster followed by 16 MJUMPAGESIZE clusters.
294  */
295 #define VMXNET3_MAX_RX_SEGS		17
296 
297 /*
298  * Predetermined size of the multicast MACs filter table. If the
299  * number of multicast addresses exceeds this size, then the
300  * ALL_MULTI mode is use instead.
301  */
302 #define VMXNET3_MULTICAST_MAX		32
303 
304 /*
305  * Our Tx watchdog timeout.
306  */
307 #define VMXNET3_WATCHDOG_TIMEOUT	5
308 
309 /*
310  * Number of slots in the Tx bufrings. This value matches most other
311  * multiqueue drivers.
312  */
313 #define VMXNET3_DEF_BUFRING_SIZE	4096
314 
315 /*
316  * IP protocols that we can perform Tx checksum offloading of.
317  */
318 #define VMXNET3_CSUM_OFFLOAD		(CSUM_TCP | CSUM_UDP)
319 #define VMXNET3_CSUM_OFFLOAD_IPV6	(CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
320 
321 #define VMXNET3_CSUM_ALL_OFFLOAD	\
322     (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
323 
324 /*
325  * Compat macros to keep this driver compiling on old releases.
326  */
327 
328 #if !defined(SYSCTL_ADD_UQUAD)
329 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
330 #endif
331 
332 #if !defined(IFCAP_TXCSUM_IPV6)
333 #define IFCAP_TXCSUM_IPV6 0
334 #endif
335 
336 #if !defined(IFCAP_RXCSUM_IPV6)
337 #define IFCAP_RXCSUM_IPV6 0
338 #endif
339 
340 #if !defined(CSUM_TCP_IPV6)
341 #define CSUM_TCP_IPV6 0
342 #endif
343 
344 #if !defined(CSUM_UDP_IPV6)
345 #define CSUM_UDP_IPV6	0
346 #endif
347 
348 #endif /* _IF_VMXVAR_H */
349