1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _MANA_H
5 #define _MANA_H
6 
7 #include "gdma.h"
8 #include "hw_channel.h"
9 
10 /* Microsoft Azure Network Adapter (MANA)'s definitions
11  *
12  * Structures labeled with "HW DATA" are exchanged with the hardware. All of
13  * them are naturally aligned and hence don't need __packed.
14  */
15 
16 /* MANA protocol version */
17 #define MANA_MAJOR_VERSION	0
18 #define MANA_MINOR_VERSION	1
19 #define MANA_MICRO_VERSION	1
20 
21 typedef u64 mana_handle_t;
22 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
23 
24 enum TRI_STATE {
25 	TRI_STATE_UNKNOWN = -1,
26 	TRI_STATE_FALSE = 0,
27 	TRI_STATE_TRUE = 1
28 };
29 
30 /* Number of entries for hardware indirection table must be in power of 2 */
31 #define MANA_INDIRECT_TABLE_SIZE 64
32 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
33 
34 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
35 #define MANA_HASH_KEY_SIZE 40
36 
37 #define COMP_ENTRY_SIZE 64
38 
39 #define ADAPTER_MTU_SIZE 1500
40 #define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
41 
42 #define RX_BUFFERS_PER_QUEUE 512
43 
44 #define MAX_SEND_BUFFERS_PER_QUEUE 256
45 
46 #define EQ_SIZE (8 * PAGE_SIZE)
47 #define LOG2_EQ_THROTTLE 3
48 
49 #define MAX_PORTS_IN_MANA_DEV 16
50 
51 struct mana_stats {
52 	u64 packets;
53 	u64 bytes;
54 	struct u64_stats_sync syncp;
55 };
56 
57 struct mana_txq {
58 	struct gdma_queue *gdma_sq;
59 
60 	union {
61 		u32 gdma_txq_id;
62 		struct {
63 			u32 reserved1	: 10;
64 			u32 vsq_frame	: 14;
65 			u32 reserved2	: 8;
66 		};
67 	};
68 
69 	u16 vp_offset;
70 
71 	struct net_device *ndev;
72 
73 	/* The SKBs are sent to the HW and we are waiting for the CQEs. */
74 	struct sk_buff_head pending_skbs;
75 	struct netdev_queue *net_txq;
76 
77 	atomic_t pending_sends;
78 
79 	struct mana_stats stats;
80 };
81 
82 /* skb data and frags dma mappings */
83 struct mana_skb_head {
84 	dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
85 
86 	u32 size[MAX_SKB_FRAGS + 1];
87 };
88 
89 #define MANA_HEADROOM sizeof(struct mana_skb_head)
90 
91 enum mana_tx_pkt_format {
92 	MANA_SHORT_PKT_FMT	= 0,
93 	MANA_LONG_PKT_FMT	= 1,
94 };
95 
96 struct mana_tx_short_oob {
97 	u32 pkt_fmt		: 2;
98 	u32 is_outer_ipv4	: 1;
99 	u32 is_outer_ipv6	: 1;
100 	u32 comp_iphdr_csum	: 1;
101 	u32 comp_tcp_csum	: 1;
102 	u32 comp_udp_csum	: 1;
103 	u32 supress_txcqe_gen	: 1;
104 	u32 vcq_num		: 24;
105 
106 	u32 trans_off		: 10; /* Transport header offset */
107 	u32 vsq_frame		: 14;
108 	u32 short_vp_offset	: 8;
109 }; /* HW DATA */
110 
111 struct mana_tx_long_oob {
112 	u32 is_encap		: 1;
113 	u32 inner_is_ipv6	: 1;
114 	u32 inner_tcp_opt	: 1;
115 	u32 inject_vlan_pri_tag : 1;
116 	u32 reserved1		: 12;
117 	u32 pcp			: 3;  /* 802.1Q */
118 	u32 dei			: 1;  /* 802.1Q */
119 	u32 vlan_id		: 12; /* 802.1Q */
120 
121 	u32 inner_frame_offset	: 10;
122 	u32 inner_ip_rel_offset : 6;
123 	u32 long_vp_offset	: 12;
124 	u32 reserved2		: 4;
125 
126 	u32 reserved3;
127 	u32 reserved4;
128 }; /* HW DATA */
129 
130 struct mana_tx_oob {
131 	struct mana_tx_short_oob s_oob;
132 	struct mana_tx_long_oob l_oob;
133 }; /* HW DATA */
134 
135 enum mana_cq_type {
136 	MANA_CQ_TYPE_RX,
137 	MANA_CQ_TYPE_TX,
138 };
139 
140 enum mana_cqe_type {
141 	CQE_INVALID			= 0,
142 	CQE_RX_OKAY			= 1,
143 	CQE_RX_COALESCED_4		= 2,
144 	CQE_RX_OBJECT_FENCE		= 3,
145 	CQE_RX_TRUNCATED		= 4,
146 
147 	CQE_TX_OKAY			= 32,
148 	CQE_TX_SA_DROP			= 33,
149 	CQE_TX_MTU_DROP			= 34,
150 	CQE_TX_INVALID_OOB		= 35,
151 	CQE_TX_INVALID_ETH_TYPE		= 36,
152 	CQE_TX_HDR_PROCESSING_ERROR	= 37,
153 	CQE_TX_VF_DISABLED		= 38,
154 	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
155 	CQE_TX_VPORT_DISABLED		= 40,
156 	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
157 };
158 
159 #define MANA_CQE_COMPLETION 1
160 
161 struct mana_cqe_header {
162 	u32 cqe_type	: 6;
163 	u32 client_type	: 2;
164 	u32 vendor_err	: 24;
165 }; /* HW DATA */
166 
167 /* NDIS HASH Types */
168 #define NDIS_HASH_IPV4		BIT(0)
169 #define NDIS_HASH_TCP_IPV4	BIT(1)
170 #define NDIS_HASH_UDP_IPV4	BIT(2)
171 #define NDIS_HASH_IPV6		BIT(3)
172 #define NDIS_HASH_TCP_IPV6	BIT(4)
173 #define NDIS_HASH_UDP_IPV6	BIT(5)
174 #define NDIS_HASH_IPV6_EX	BIT(6)
175 #define NDIS_HASH_TCP_IPV6_EX	BIT(7)
176 #define NDIS_HASH_UDP_IPV6_EX	BIT(8)
177 
178 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
179 #define MANA_HASH_L4                                                         \
180 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
181 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
182 
183 struct mana_rxcomp_perpkt_info {
184 	u32 pkt_len	: 16;
185 	u32 reserved1	: 16;
186 	u32 reserved2;
187 	u32 pkt_hash;
188 }; /* HW DATA */
189 
190 #define MANA_RXCOMP_OOB_NUM_PPI 4
191 
192 /* Receive completion OOB */
193 struct mana_rxcomp_oob {
194 	struct mana_cqe_header cqe_hdr;
195 
196 	u32 rx_vlan_id			: 12;
197 	u32 rx_vlantag_present		: 1;
198 	u32 rx_outer_iphdr_csum_succeed	: 1;
199 	u32 rx_outer_iphdr_csum_fail	: 1;
200 	u32 reserved1			: 1;
201 	u32 rx_hashtype			: 9;
202 	u32 rx_iphdr_csum_succeed	: 1;
203 	u32 rx_iphdr_csum_fail		: 1;
204 	u32 rx_tcp_csum_succeed		: 1;
205 	u32 rx_tcp_csum_fail		: 1;
206 	u32 rx_udp_csum_succeed		: 1;
207 	u32 rx_udp_csum_fail		: 1;
208 	u32 reserved2			: 1;
209 
210 	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
211 
212 	u32 rx_wqe_offset;
213 }; /* HW DATA */
214 
215 struct mana_tx_comp_oob {
216 	struct mana_cqe_header cqe_hdr;
217 
218 	u32 tx_data_offset;
219 
220 	u32 tx_sgl_offset	: 5;
221 	u32 tx_wqe_offset	: 27;
222 
223 	u32 reserved[12];
224 }; /* HW DATA */
225 
226 struct mana_rxq;
227 
228 struct mana_cq {
229 	struct gdma_queue *gdma_cq;
230 
231 	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
232 	u32 gdma_id;
233 
234 	/* Type of the CQ: TX or RX */
235 	enum mana_cq_type type;
236 
237 	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
238 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
239 	 */
240 	struct mana_rxq *rxq;
241 
242 	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
243 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
244 	 */
245 	struct mana_txq *txq;
246 
247 	/* Pointer to a buffer which the CQ handler can copy the CQE's into. */
248 	struct gdma_comp *gdma_comp_buf;
249 };
250 
251 #define GDMA_MAX_RQE_SGES 15
252 
253 struct mana_recv_buf_oob {
254 	/* A valid GDMA work request representing the data buffer. */
255 	struct gdma_wqe_request wqe_req;
256 
257 	void *buf_va;
258 	dma_addr_t buf_dma_addr;
259 
260 	/* SGL of the buffer going to be sent has part of the work request. */
261 	u32 num_sge;
262 	struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
263 
264 	/* Required to store the result of mana_gd_post_work_request.
265 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
266 	 * work queue when the WQE is consumed.
267 	 */
268 	struct gdma_posted_wqe_info wqe_inf;
269 };
270 
271 struct mana_rxq {
272 	struct gdma_queue *gdma_rq;
273 	/* Cache the gdma receive queue id */
274 	u32 gdma_id;
275 
276 	/* Index of RQ in the vPort, not gdma receive queue id */
277 	u32 rxq_idx;
278 
279 	u32 datasize;
280 
281 	mana_handle_t rxobj;
282 
283 	struct mana_cq rx_cq;
284 
285 	struct net_device *ndev;
286 
287 	/* Total number of receive buffers to be allocated */
288 	u32 num_rx_buf;
289 
290 	u32 buf_index;
291 
292 	struct mana_stats stats;
293 
294 	/* MUST BE THE LAST MEMBER:
295 	 * Each receive buffer has an associated mana_recv_buf_oob.
296 	 */
297 	struct mana_recv_buf_oob rx_oobs[];
298 };
299 
300 struct mana_tx_qp {
301 	struct mana_txq txq;
302 
303 	struct mana_cq tx_cq;
304 
305 	mana_handle_t tx_object;
306 };
307 
308 struct mana_ethtool_stats {
309 	u64 stop_queue;
310 	u64 wake_queue;
311 };
312 
313 struct mana_context {
314 	struct gdma_dev *gdma_dev;
315 
316 	u16 num_ports;
317 
318 	struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
319 };
320 
321 struct mana_port_context {
322 	struct mana_context *ac;
323 	struct net_device *ndev;
324 
325 	u8 mac_addr[ETH_ALEN];
326 
327 	struct mana_eq *eqs;
328 
329 	enum TRI_STATE rss_state;
330 
331 	mana_handle_t default_rxobj;
332 	bool tx_shortform_allowed;
333 	u16 tx_vp_offset;
334 
335 	struct mana_tx_qp *tx_qp;
336 
337 	/* Indirection Table for RX & TX. The values are queue indexes */
338 	u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
339 
340 	/* Indirection table containing RxObject Handles */
341 	mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
342 
343 	/*  Hash key used by the NIC */
344 	u8 hashkey[MANA_HASH_KEY_SIZE];
345 
346 	/* This points to an array of num_queues of RQ pointers. */
347 	struct mana_rxq **rxqs;
348 
349 	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
350 	unsigned int max_queues;
351 	unsigned int num_queues;
352 
353 	mana_handle_t port_handle;
354 
355 	u16 port_idx;
356 
357 	bool port_is_up;
358 	bool port_st_save; /* Saved port state */
359 
360 	struct mana_ethtool_stats eth_stats;
361 };
362 
363 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
364 		    bool update_hash, bool update_tab);
365 
366 int mana_alloc_queues(struct net_device *ndev);
367 int mana_attach(struct net_device *ndev);
368 int mana_detach(struct net_device *ndev, bool from_close);
369 
370 int mana_probe(struct gdma_dev *gd);
371 void mana_remove(struct gdma_dev *gd);
372 
373 extern const struct ethtool_ops mana_ethtool_ops;
374 
375 struct mana_obj_spec {
376 	u32 queue_index;
377 	u64 gdma_region;
378 	u32 queue_size;
379 	u32 attached_eq;
380 	u32 modr_ctx_id;
381 };
382 
383 enum mana_command_code {
384 	MANA_QUERY_DEV_CONFIG	= 0x20001,
385 	MANA_QUERY_GF_STAT	= 0x20002,
386 	MANA_CONFIG_VPORT_TX	= 0x20003,
387 	MANA_CREATE_WQ_OBJ	= 0x20004,
388 	MANA_DESTROY_WQ_OBJ	= 0x20005,
389 	MANA_FENCE_RQ		= 0x20006,
390 	MANA_CONFIG_VPORT_RX	= 0x20007,
391 	MANA_QUERY_VPORT_CONFIG	= 0x20008,
392 };
393 
394 /* Query Device Configuration */
395 struct mana_query_device_cfg_req {
396 	struct gdma_req_hdr hdr;
397 
398 	/* Driver Capability flags */
399 	u64 drv_cap_flags1;
400 	u64 drv_cap_flags2;
401 	u64 drv_cap_flags3;
402 	u64 drv_cap_flags4;
403 
404 	u32 proto_major_ver;
405 	u32 proto_minor_ver;
406 	u32 proto_micro_ver;
407 
408 	u32 reserved;
409 }; /* HW DATA */
410 
411 struct mana_query_device_cfg_resp {
412 	struct gdma_resp_hdr hdr;
413 
414 	u64 pf_cap_flags1;
415 	u64 pf_cap_flags2;
416 	u64 pf_cap_flags3;
417 	u64 pf_cap_flags4;
418 
419 	u16 max_num_vports;
420 	u16 reserved;
421 	u32 max_num_eqs;
422 }; /* HW DATA */
423 
424 /* Query vPort Configuration */
425 struct mana_query_vport_cfg_req {
426 	struct gdma_req_hdr hdr;
427 	u32 vport_index;
428 }; /* HW DATA */
429 
430 struct mana_query_vport_cfg_resp {
431 	struct gdma_resp_hdr hdr;
432 	u32 max_num_sq;
433 	u32 max_num_rq;
434 	u32 num_indirection_ent;
435 	u32 reserved1;
436 	u8 mac_addr[6];
437 	u8 reserved2[2];
438 	mana_handle_t vport;
439 }; /* HW DATA */
440 
441 /* Configure vPort */
442 struct mana_config_vport_req {
443 	struct gdma_req_hdr hdr;
444 	mana_handle_t vport;
445 	u32 pdid;
446 	u32 doorbell_pageid;
447 }; /* HW DATA */
448 
449 struct mana_config_vport_resp {
450 	struct gdma_resp_hdr hdr;
451 	u16 tx_vport_offset;
452 	u8 short_form_allowed;
453 	u8 reserved;
454 }; /* HW DATA */
455 
456 /* Create WQ Object */
457 struct mana_create_wqobj_req {
458 	struct gdma_req_hdr hdr;
459 	mana_handle_t vport;
460 	u32 wq_type;
461 	u32 reserved;
462 	u64 wq_gdma_region;
463 	u64 cq_gdma_region;
464 	u32 wq_size;
465 	u32 cq_size;
466 	u32 cq_moderation_ctx_id;
467 	u32 cq_parent_qid;
468 }; /* HW DATA */
469 
470 struct mana_create_wqobj_resp {
471 	struct gdma_resp_hdr hdr;
472 	u32 wq_id;
473 	u32 cq_id;
474 	mana_handle_t wq_obj;
475 }; /* HW DATA */
476 
477 /* Destroy WQ Object */
478 struct mana_destroy_wqobj_req {
479 	struct gdma_req_hdr hdr;
480 	u32 wq_type;
481 	u32 reserved;
482 	mana_handle_t wq_obj_handle;
483 }; /* HW DATA */
484 
485 struct mana_destroy_wqobj_resp {
486 	struct gdma_resp_hdr hdr;
487 }; /* HW DATA */
488 
489 /* Fence RQ */
490 struct mana_fence_rq_req {
491 	struct gdma_req_hdr hdr;
492 	mana_handle_t wq_obj_handle;
493 }; /* HW DATA */
494 
495 struct mana_fence_rq_resp {
496 	struct gdma_resp_hdr hdr;
497 }; /* HW DATA */
498 
499 /* Configure vPort Rx Steering */
500 struct mana_cfg_rx_steer_req {
501 	struct gdma_req_hdr hdr;
502 	mana_handle_t vport;
503 	u16 num_indir_entries;
504 	u16 indir_tab_offset;
505 	u32 rx_enable;
506 	u32 rss_enable;
507 	u8 update_default_rxobj;
508 	u8 update_hashkey;
509 	u8 update_indir_tab;
510 	u8 reserved;
511 	mana_handle_t default_rxobj;
512 	u8 hashkey[MANA_HASH_KEY_SIZE];
513 }; /* HW DATA */
514 
515 struct mana_cfg_rx_steer_resp {
516 	struct gdma_resp_hdr hdr;
517 }; /* HW DATA */
518 
519 #define MANA_MAX_NUM_QUEUES 16
520 
521 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
522 
523 struct mana_tx_package {
524 	struct gdma_wqe_request wqe_req;
525 	struct gdma_sge sgl_array[5];
526 	struct gdma_sge *sgl_ptr;
527 
528 	struct mana_tx_oob tx_oob;
529 
530 	struct gdma_posted_wqe_info wqe_info;
531 };
532 
533 #endif /* _MANA_H */
534