1 /* SPDX-License-Identifier: GPL-2.0+ */
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #ifndef __HNS3_ENET_H
5 #define __HNS3_ENET_H
6 
7 #include <linux/dim.h>
8 #include <linux/if_vlan.h>
9 #include <net/page_pool/types.h>
10 #include <asm/barrier.h>
11 
12 #include "hnae3.h"
13 
14 struct iphdr;
15 struct ipv6hdr;
16 
17 enum hns3_nic_state {
18 	HNS3_NIC_STATE_TESTING,
19 	HNS3_NIC_STATE_RESETTING,
20 	HNS3_NIC_STATE_INITED,
21 	HNS3_NIC_STATE_DOWN,
22 	HNS3_NIC_STATE_DISABLED,
23 	HNS3_NIC_STATE_REMOVING,
24 	HNS3_NIC_STATE_SERVICE_INITED,
25 	HNS3_NIC_STATE_SERVICE_SCHED,
26 	HNS3_NIC_STATE2_RESET_REQUESTED,
27 	HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
28 	HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
29 	HNS3_NIC_STATE_TX_PUSH_ENABLE,
30 	HNS3_NIC_STATE_MAX
31 };
32 
33 #define HNS3_MAX_PUSH_BD_NUM		2
34 
35 #define HNS3_RING_RX_RING_BASEADDR_L_REG	0x00000
36 #define HNS3_RING_RX_RING_BASEADDR_H_REG	0x00004
37 #define HNS3_RING_RX_RING_BD_NUM_REG		0x00008
38 #define HNS3_RING_RX_RING_BD_LEN_REG		0x0000C
39 #define HNS3_RING_RX_RING_TAIL_REG		0x00018
40 #define HNS3_RING_RX_RING_HEAD_REG		0x0001C
41 #define HNS3_RING_RX_RING_FBDNUM_REG		0x00020
42 #define HNS3_RING_RX_RING_PKTNUM_RECORD_REG	0x0002C
43 
44 #define HNS3_RING_TX_RING_BASEADDR_L_REG	0x00040
45 #define HNS3_RING_TX_RING_BASEADDR_H_REG	0x00044
46 #define HNS3_RING_TX_RING_BD_NUM_REG		0x00048
47 #define HNS3_RING_TX_RING_TC_REG		0x00050
48 #define HNS3_RING_TX_RING_TAIL_REG		0x00058
49 #define HNS3_RING_TX_RING_HEAD_REG		0x0005C
50 #define HNS3_RING_TX_RING_FBDNUM_REG		0x00060
51 #define HNS3_RING_TX_RING_OFFSET_REG		0x00064
52 #define HNS3_RING_TX_RING_EBDNUM_REG		0x00068
53 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG	0x0006C
54 #define HNS3_RING_TX_RING_EBD_OFFSET_REG	0x00070
55 #define HNS3_RING_TX_RING_BD_ERR_REG		0x00074
56 #define HNS3_RING_EN_REG			0x00090
57 #define HNS3_RING_RX_EN_REG			0x00098
58 #define HNS3_RING_TX_EN_REG			0x000D4
59 
60 #define HNS3_RX_HEAD_SIZE			256
61 
62 #define HNS3_TX_TIMEOUT (5 * HZ)
63 #define HNS3_RING_NAME_LEN			16
64 #define HNS3_BUFFER_SIZE_2048			2048
65 #define HNS3_RING_MAX_PENDING			32760
66 #define HNS3_RING_MIN_PENDING			72
67 #define HNS3_RING_BD_MULTIPLE			8
68 /* max frame size of mac */
69 #define HNS3_MAX_MTU(max_frm_size) \
70 	((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
71 
72 #define HNS3_BD_SIZE_512_TYPE			0
73 #define HNS3_BD_SIZE_1024_TYPE			1
74 #define HNS3_BD_SIZE_2048_TYPE			2
75 #define HNS3_BD_SIZE_4096_TYPE			3
76 
77 #define HNS3_RX_FLAG_VLAN_PRESENT		0x1
78 #define HNS3_RX_FLAG_L3ID_IPV4			0x0
79 #define HNS3_RX_FLAG_L3ID_IPV6			0x1
80 #define HNS3_RX_FLAG_L4ID_UDP			0x0
81 #define HNS3_RX_FLAG_L4ID_TCP			0x1
82 
83 #define HNS3_RXD_DMAC_S				0
84 #define HNS3_RXD_DMAC_M				(0x3 << HNS3_RXD_DMAC_S)
85 #define HNS3_RXD_VLAN_S				2
86 #define HNS3_RXD_VLAN_M				(0x3 << HNS3_RXD_VLAN_S)
87 #define HNS3_RXD_L3ID_S				4
88 #define HNS3_RXD_L3ID_M				(0xf << HNS3_RXD_L3ID_S)
89 #define HNS3_RXD_L4ID_S				8
90 #define HNS3_RXD_L4ID_M				(0xf << HNS3_RXD_L4ID_S)
91 #define HNS3_RXD_FRAG_B				12
92 #define HNS3_RXD_STRP_TAGP_S			13
93 #define HNS3_RXD_STRP_TAGP_M			(0x3 << HNS3_RXD_STRP_TAGP_S)
94 
95 #define HNS3_RXD_L2E_B				16
96 #define HNS3_RXD_L3E_B				17
97 #define HNS3_RXD_L4E_B				18
98 #define HNS3_RXD_TRUNCAT_B			19
99 #define HNS3_RXD_HOI_B				20
100 #define HNS3_RXD_DOI_B				21
101 #define HNS3_RXD_OL3E_B				22
102 #define HNS3_RXD_OL4E_B				23
103 #define HNS3_RXD_GRO_COUNT_S			24
104 #define HNS3_RXD_GRO_COUNT_M			(0x3f << HNS3_RXD_GRO_COUNT_S)
105 #define HNS3_RXD_GRO_FIXID_B			30
106 #define HNS3_RXD_GRO_ECN_B			31
107 
108 #define HNS3_RXD_ODMAC_S			0
109 #define HNS3_RXD_ODMAC_M			(0x3 << HNS3_RXD_ODMAC_S)
110 #define HNS3_RXD_OVLAN_S			2
111 #define HNS3_RXD_OVLAN_M			(0x3 << HNS3_RXD_OVLAN_S)
112 #define HNS3_RXD_OL3ID_S			4
113 #define HNS3_RXD_OL3ID_M			(0xf << HNS3_RXD_OL3ID_S)
114 #define HNS3_RXD_OL4ID_S			8
115 #define HNS3_RXD_OL4ID_M			(0xf << HNS3_RXD_OL4ID_S)
116 #define HNS3_RXD_FBHI_S				12
117 #define HNS3_RXD_FBHI_M				(0x3 << HNS3_RXD_FBHI_S)
118 #define HNS3_RXD_FBLI_S				14
119 #define HNS3_RXD_FBLI_M				(0x3 << HNS3_RXD_FBLI_S)
120 
121 #define HNS3_RXD_PTYPE_S			4
122 #define HNS3_RXD_PTYPE_M			GENMASK(11, 4)
123 
124 #define HNS3_RXD_BDTYPE_S			0
125 #define HNS3_RXD_BDTYPE_M			(0xf << HNS3_RXD_BDTYPE_S)
126 #define HNS3_RXD_VLD_B				4
127 #define HNS3_RXD_UDP0_B				5
128 #define HNS3_RXD_EXTEND_B			7
129 #define HNS3_RXD_FE_B				8
130 #define HNS3_RXD_LUM_B				9
131 #define HNS3_RXD_CRCP_B				10
132 #define HNS3_RXD_L3L4P_B			11
133 #define HNS3_RXD_TSIDX_S			12
134 #define HNS3_RXD_TSIDX_M			(0x3 << HNS3_RXD_TSIDX_S)
135 #define HNS3_RXD_TS_VLD_B			14
136 #define HNS3_RXD_LKBK_B				15
137 #define HNS3_RXD_GRO_SIZE_S			16
138 #define HNS3_RXD_GRO_SIZE_M			(0x3fff << HNS3_RXD_GRO_SIZE_S)
139 
140 #define HNS3_TXD_L3T_S				0
141 #define HNS3_TXD_L3T_M				(0x3 << HNS3_TXD_L3T_S)
142 #define HNS3_TXD_L4T_S				2
143 #define HNS3_TXD_L4T_M				(0x3 << HNS3_TXD_L4T_S)
144 #define HNS3_TXD_L3CS_B				4
145 #define HNS3_TXD_L4CS_B				5
146 #define HNS3_TXD_VLAN_B				6
147 #define HNS3_TXD_TSO_B				7
148 
149 #define HNS3_TXD_L2LEN_S			8
150 #define HNS3_TXD_L2LEN_M			(0xff << HNS3_TXD_L2LEN_S)
151 #define HNS3_TXD_L3LEN_S			16
152 #define HNS3_TXD_L3LEN_M			(0xff << HNS3_TXD_L3LEN_S)
153 #define HNS3_TXD_L4LEN_S			24
154 #define HNS3_TXD_L4LEN_M			(0xff << HNS3_TXD_L4LEN_S)
155 
156 #define HNS3_TXD_CSUM_START_S		8
157 #define HNS3_TXD_CSUM_START_M		(0xffff << HNS3_TXD_CSUM_START_S)
158 
159 #define HNS3_TXD_OL3T_S				0
160 #define HNS3_TXD_OL3T_M				(0x3 << HNS3_TXD_OL3T_S)
161 #define HNS3_TXD_OVLAN_B			2
162 #define HNS3_TXD_MACSEC_B			3
163 #define HNS3_TXD_TUNTYPE_S			4
164 #define HNS3_TXD_TUNTYPE_M			(0xf << HNS3_TXD_TUNTYPE_S)
165 
166 #define HNS3_TXD_CSUM_OFFSET_S		8
167 #define HNS3_TXD_CSUM_OFFSET_M		(0xffff << HNS3_TXD_CSUM_OFFSET_S)
168 
169 #define HNS3_TXD_BDTYPE_S			0
170 #define HNS3_TXD_BDTYPE_M			(0xf << HNS3_TXD_BDTYPE_S)
171 #define HNS3_TXD_FE_B				4
172 #define HNS3_TXD_SC_S				5
173 #define HNS3_TXD_SC_M				(0x3 << HNS3_TXD_SC_S)
174 #define HNS3_TXD_EXTEND_B			7
175 #define HNS3_TXD_VLD_B				8
176 #define HNS3_TXD_RI_B				9
177 #define HNS3_TXD_RA_B				10
178 #define HNS3_TXD_TSYN_B				11
179 #define HNS3_TXD_DECTTL_S			12
180 #define HNS3_TXD_DECTTL_M			(0xf << HNS3_TXD_DECTTL_S)
181 
182 #define HNS3_TXD_OL4CS_B			22
183 
184 #define HNS3_TXD_MSS_S				0
185 #define HNS3_TXD_MSS_M				(0x3fff << HNS3_TXD_MSS_S)
186 #define HNS3_TXD_HW_CS_B			14
187 
188 #define HNS3_VECTOR_TX_IRQ			BIT_ULL(0)
189 #define HNS3_VECTOR_RX_IRQ			BIT_ULL(1)
190 
191 #define HNS3_VECTOR_NOT_INITED			0
192 #define HNS3_VECTOR_INITED			1
193 
194 #define HNS3_MAX_BD_SIZE			65535
195 #define HNS3_MAX_TSO_BD_NUM			63U
196 #define HNS3_MAX_TSO_SIZE			1048576U
197 #define HNS3_MAX_NON_TSO_SIZE			9728U
198 
199 #define HNS3_VECTOR_GL_MASK			GENMASK(11, 0)
200 #define HNS3_VECTOR_GL0_OFFSET			0x100
201 #define HNS3_VECTOR_GL1_OFFSET			0x200
202 #define HNS3_VECTOR_GL2_OFFSET			0x300
203 #define HNS3_VECTOR_RL_OFFSET			0x900
204 #define HNS3_VECTOR_RL_EN_B			6
205 #define HNS3_VECTOR_QL_MASK			GENMASK(9, 0)
206 #define HNS3_VECTOR_TX_QL_OFFSET		0xe00
207 #define HNS3_VECTOR_RX_QL_OFFSET		0xf00
208 
209 #define HNS3_RING_EN_B				0
210 
211 #define HNS3_GL0_CQ_MODE_REG			0x20d00
212 #define HNS3_GL1_CQ_MODE_REG			0x20d04
213 #define HNS3_GL2_CQ_MODE_REG			0x20d08
214 #define HNS3_CQ_MODE_EQE			1U
215 #define HNS3_CQ_MODE_CQE			0U
216 
217 #define HNS3_RESCHED_BD_NUM			1024
218 
219 enum hns3_pkt_l2t_type {
220 	HNS3_L2_TYPE_UNICAST,
221 	HNS3_L2_TYPE_MULTICAST,
222 	HNS3_L2_TYPE_BROADCAST,
223 	HNS3_L2_TYPE_INVALID,
224 };
225 
226 enum hns3_pkt_l3t_type {
227 	HNS3_L3T_NONE,
228 	HNS3_L3T_IPV6,
229 	HNS3_L3T_IPV4,
230 	HNS3_L3T_RESERVED
231 };
232 
233 enum hns3_pkt_l4t_type {
234 	HNS3_L4T_UNKNOWN,
235 	HNS3_L4T_TCP,
236 	HNS3_L4T_UDP,
237 	HNS3_L4T_SCTP
238 };
239 
240 enum hns3_pkt_ol3t_type {
241 	HNS3_OL3T_NONE,
242 	HNS3_OL3T_IPV6,
243 	HNS3_OL3T_IPV4_NO_CSUM,
244 	HNS3_OL3T_IPV4_CSUM
245 };
246 
247 enum hns3_pkt_tun_type {
248 	HNS3_TUN_NONE,
249 	HNS3_TUN_MAC_IN_UDP,
250 	HNS3_TUN_NVGRE,
251 	HNS3_TUN_OTHER
252 };
253 
254 /* hardware spec ring buffer format */
255 struct __packed hns3_desc {
256 	union {
257 		__le64 addr;
258 		__le16 csum;
259 		struct {
260 			__le32 ts_nsec;
261 			__le32 ts_sec;
262 		};
263 	};
264 	union {
265 		struct {
266 			__le16 vlan_tag;
267 			__le16 send_size;
268 			union {
269 				__le32 type_cs_vlan_tso_len;
270 				struct {
271 					__u8 type_cs_vlan_tso;
272 					__u8 l2_len;
273 					__u8 l3_len;
274 					__u8 l4_len;
275 				};
276 			};
277 			__le16 outer_vlan_tag;
278 			__le16 tv;
279 
280 		union {
281 			__le32 ol_type_vlan_len_msec;
282 			struct {
283 				__u8 ol_type_vlan_msec;
284 				__u8 ol2_len;
285 				__u8 ol3_len;
286 				__u8 ol4_len;
287 			};
288 		};
289 
290 			__le32 paylen_ol4cs;
291 			__le16 bdtp_fe_sc_vld_ra_ri;
292 			__le16 mss_hw_csum;
293 		} tx;
294 
295 		struct {
296 			__le32 l234_info;
297 			__le16 pkt_len;
298 			__le16 size;
299 
300 			__le32 rss_hash;
301 			__le16 fd_id;
302 			__le16 vlan_tag;
303 
304 			union {
305 				__le32 ol_info;
306 				struct {
307 					__le16 o_dm_vlan_id_fb;
308 					__le16 ot_vlan_tag;
309 				};
310 			};
311 
312 			__le32 bd_base_info;
313 		} rx;
314 	};
315 };
316 
317 enum hns3_desc_type {
318 	DESC_TYPE_UNKNOWN		= 0,
319 	DESC_TYPE_SKB			= 1 << 0,
320 	DESC_TYPE_FRAGLIST_SKB		= 1 << 1,
321 	DESC_TYPE_PAGE			= 1 << 2,
322 	DESC_TYPE_BOUNCE_ALL		= 1 << 3,
323 	DESC_TYPE_BOUNCE_HEAD		= 1 << 4,
324 	DESC_TYPE_SGL_SKB		= 1 << 5,
325 	DESC_TYPE_PP_FRAG		= 1 << 6,
326 };
327 
328 struct hns3_desc_cb {
329 	dma_addr_t dma; /* dma address of this desc */
330 	void *buf;      /* cpu addr for a desc */
331 
332 	/* priv data for the desc, e.g. skb when use with ip stack */
333 	void *priv;
334 
335 	union {
336 		u32 page_offset;	/* for rx */
337 		u32 send_bytes;		/* for tx */
338 	};
339 
340 	u32 length;     /* length of the buffer */
341 
342 	u16 reuse_flag;
343 	u16 refill;
344 
345 	/* desc type, used by the ring user to mark the type of the priv data */
346 	u16 type;
347 	u16 pagecnt_bias;
348 };
349 
350 enum hns3_pkt_l3type {
351 	HNS3_L3_TYPE_IPV4,
352 	HNS3_L3_TYPE_IPV6,
353 	HNS3_L3_TYPE_ARP,
354 	HNS3_L3_TYPE_RARP,
355 	HNS3_L3_TYPE_IPV4_OPT,
356 	HNS3_L3_TYPE_IPV6_EXT,
357 	HNS3_L3_TYPE_LLDP,
358 	HNS3_L3_TYPE_BPDU,
359 	HNS3_L3_TYPE_MAC_PAUSE,
360 	HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */
361 
362 	/* reserved for 0xA~0xB */
363 
364 	HNS3_L3_TYPE_CNM = 0xc,
365 
366 	/* reserved for 0xD~0xE */
367 
368 	HNS3_L3_TYPE_PARSE_FAIL	= 0xf /* must be last */
369 };
370 
371 enum hns3_pkt_l4type {
372 	HNS3_L4_TYPE_UDP,
373 	HNS3_L4_TYPE_TCP,
374 	HNS3_L4_TYPE_GRE,
375 	HNS3_L4_TYPE_SCTP,
376 	HNS3_L4_TYPE_IGMP,
377 	HNS3_L4_TYPE_ICMP,
378 
379 	/* reserved for 0x6~0xE */
380 
381 	HNS3_L4_TYPE_PARSE_FAIL	= 0xf /* must be last */
382 };
383 
384 enum hns3_pkt_ol3type {
385 	HNS3_OL3_TYPE_IPV4 = 0,
386 	HNS3_OL3_TYPE_IPV6,
387 	/* reserved for 0x2~0x3 */
388 	HNS3_OL3_TYPE_IPV4_OPT = 4,
389 	HNS3_OL3_TYPE_IPV6_EXT,
390 
391 	/* reserved for 0x6~0xE */
392 
393 	HNS3_OL3_TYPE_PARSE_FAIL = 0xf	/* must be last */
394 };
395 
396 enum hns3_pkt_ol4type {
397 	HNS3_OL4_TYPE_NO_TUN,
398 	HNS3_OL4_TYPE_MAC_IN_UDP,
399 	HNS3_OL4_TYPE_NVGRE,
400 	HNS3_OL4_TYPE_UNKNOWN
401 };
402 
403 struct hns3_rx_ptype {
404 	u32 ptype : 8;
405 	u32 csum_level : 2;
406 	u32 ip_summed : 2;
407 	u32 l3_type : 4;
408 	u32 valid : 1;
409 	u32 hash_type: 3;
410 };
411 
412 struct ring_stats {
413 	u64 sw_err_cnt;
414 	u64 seg_pkt_cnt;
415 	union {
416 		struct {
417 			u64 tx_pkts;
418 			u64 tx_bytes;
419 			u64 tx_more;
420 			u64 tx_push;
421 			u64 tx_mem_doorbell;
422 			u64 restart_queue;
423 			u64 tx_busy;
424 			u64 tx_copy;
425 			u64 tx_vlan_err;
426 			u64 tx_l4_proto_err;
427 			u64 tx_l2l3l4_err;
428 			u64 tx_tso_err;
429 			u64 over_max_recursion;
430 			u64 hw_limitation;
431 			u64 tx_bounce;
432 			u64 tx_spare_full;
433 			u64 copy_bits_err;
434 			u64 tx_sgl;
435 			u64 skb2sgl_err;
436 			u64 map_sg_err;
437 		};
438 		struct {
439 			u64 rx_pkts;
440 			u64 rx_bytes;
441 			u64 rx_err_cnt;
442 			u64 reuse_pg_cnt;
443 			u64 err_pkt_len;
444 			u64 err_bd_num;
445 			u64 l2_err;
446 			u64 l3l4_csum_err;
447 			u64 csum_complete;
448 			u64 rx_multicast;
449 			u64 non_reuse_pg;
450 			u64 frag_alloc_err;
451 			u64 frag_alloc;
452 		};
453 		__le16 csum;
454 	};
455 };
456 
457 struct hns3_tx_spare {
458 	dma_addr_t dma;
459 	void *buf;
460 	u32 next_to_use;
461 	u32 next_to_clean;
462 	u32 last_to_clean;
463 	u32 len;
464 };
465 
466 struct hns3_enet_ring {
467 	struct hns3_desc *desc; /* dma map address space */
468 	struct hns3_desc_cb *desc_cb;
469 	struct hns3_enet_ring *next;
470 	struct hns3_enet_tqp_vector *tqp_vector;
471 	struct hnae3_queue *tqp;
472 	int queue_index;
473 	struct device *dev; /* will be used for DMA mapping of descriptors */
474 	struct page_pool *page_pool;
475 
476 	/* statistic */
477 	struct ring_stats stats;
478 	struct u64_stats_sync syncp;
479 
480 	dma_addr_t desc_dma_addr;
481 	u32 buf_size;       /* size for hnae_desc->addr, preset by AE */
482 	u16 desc_num;       /* total number of desc */
483 	int next_to_use;    /* idx of next spare desc */
484 
485 	/* idx of lastest sent desc, the ring is empty when equal to
486 	 * next_to_use
487 	 */
488 	int next_to_clean;
489 	u32 flag;          /* ring attribute */
490 
491 	int pending_buf;
492 	union {
493 		/* for Tx ring */
494 		struct {
495 			u32 fd_qb_tx_sample;
496 			int last_to_use;        /* last idx used by xmit */
497 			u32 tx_copybreak;
498 			struct hns3_tx_spare *tx_spare;
499 		};
500 
501 		/* for Rx ring */
502 		struct {
503 			u32 pull_len;   /* memcpy len for current rx packet */
504 			u32 rx_copybreak;
505 			u32 frag_num;
506 			/* first buffer address for current packet */
507 			unsigned char *va;
508 			struct sk_buff *skb;
509 			struct sk_buff *tail_skb;
510 		};
511 	};
512 } ____cacheline_internodealigned_in_smp;
513 
514 enum hns3_flow_level_range {
515 	HNS3_FLOW_LOW = 0,
516 	HNS3_FLOW_MID = 1,
517 	HNS3_FLOW_HIGH = 2,
518 	HNS3_FLOW_ULTRA = 3,
519 };
520 
521 #define HNS3_INT_GL_50K			0x0014
522 #define HNS3_INT_GL_20K			0x0032
523 #define HNS3_INT_GL_18K			0x0036
524 #define HNS3_INT_GL_8K			0x007C
525 
526 #define HNS3_INT_GL_1US			BIT(31)
527 
528 #define HNS3_INT_RL_MAX			0x00EC
529 #define HNS3_INT_RL_ENABLE_MASK		0x40
530 
531 #define HNS3_INT_QL_DEFAULT_CFG		0x20
532 
533 struct hns3_enet_coalesce {
534 	u16 int_gl;
535 	u16 int_ql;
536 	u16 int_ql_max;
537 	u8 adapt_enable : 1;
538 	u8 ql_enable : 1;
539 	u8 unit_1us : 1;
540 	enum hns3_flow_level_range flow_level;
541 };
542 
543 struct hns3_enet_ring_group {
544 	/* array of pointers to rings */
545 	struct hns3_enet_ring *ring;
546 	u64 total_bytes;	/* total bytes processed this group */
547 	u64 total_packets;	/* total packets processed this group */
548 	u16 count;
549 	struct hns3_enet_coalesce coal;
550 	struct dim dim;
551 };
552 
553 struct hns3_enet_tqp_vector {
554 	struct hnae3_handle *handle;
555 	u8 __iomem *mask_addr;
556 	int vector_irq;
557 	int irq_init_flag;
558 
559 	u16 idx;		/* index in the TQP vector array per handle. */
560 
561 	struct napi_struct napi;
562 
563 	struct hns3_enet_ring_group rx_group;
564 	struct hns3_enet_ring_group tx_group;
565 
566 	cpumask_t affinity_mask;
567 	u16 num_tqps;	/* total number of tqps in TQP vector */
568 	struct irq_affinity_notify affinity_notify;
569 
570 	char name[HNAE3_INT_NAME_LEN];
571 
572 	u64 event_cnt;
573 } ____cacheline_internodealigned_in_smp;
574 
575 struct hns3_nic_priv {
576 	struct hnae3_handle *ae_handle;
577 	struct net_device *netdev;
578 	struct device *dev;
579 
580 	/**
581 	 * the cb for nic to manage the ring buffer, the first half of the
582 	 * array is for tx_ring and vice versa for the second half
583 	 */
584 	struct hns3_enet_ring *ring;
585 	struct hns3_enet_tqp_vector *tqp_vector;
586 	u16 vector_num;
587 	u8 max_non_tso_bd_num;
588 
589 	u64 tx_timeout_count;
590 
591 	unsigned long state;
592 
593 	enum dim_cq_period_mode tx_cqe_mode;
594 	enum dim_cq_period_mode rx_cqe_mode;
595 	struct hns3_enet_coalesce tx_coal;
596 	struct hns3_enet_coalesce rx_coal;
597 	u32 tx_copybreak;
598 	u32 rx_copybreak;
599 	u32 min_tx_copybreak;
600 	u32 min_tx_spare_buf_size;
601 };
602 
603 union l3_hdr_info {
604 	struct iphdr *v4;
605 	struct ipv6hdr *v6;
606 	unsigned char *hdr;
607 };
608 
609 union l4_hdr_info {
610 	struct tcphdr *tcp;
611 	struct udphdr *udp;
612 	struct gre_base_hdr *gre;
613 	unsigned char *hdr;
614 };
615 
616 struct hns3_hw_error_info {
617 	enum hnae3_hw_error_type type;
618 	const char *msg;
619 };
620 
621 struct hns3_reset_type_map {
622 	enum ethtool_reset_flags rst_flags;
623 	enum hnae3_reset_type rst_type;
624 };
625 
ring_space(struct hns3_enet_ring * ring)626 static inline int ring_space(struct hns3_enet_ring *ring)
627 {
628 	/* This smp_load_acquire() pairs with smp_store_release() in
629 	 * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
630 	 */
631 	int begin = smp_load_acquire(&ring->next_to_clean);
632 	int end = READ_ONCE(ring->next_to_use);
633 
634 	return ((end >= begin) ? (ring->desc_num - end + begin) :
635 			(begin - end)) - 1;
636 }
637 
hns3_tqp_read_reg(struct hns3_enet_ring * ring,u32 reg)638 static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg)
639 {
640 	return readl_relaxed(ring->tqp->io_base + reg);
641 }
642 
hns3_read_reg(void __iomem * base,u32 reg)643 static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
644 {
645 	return readl(base + reg);
646 }
647 
hns3_write_reg(void __iomem * base,u32 reg,u32 value)648 static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
649 {
650 	u8 __iomem *reg_addr = READ_ONCE(base);
651 
652 	writel(value, reg_addr + reg);
653 }
654 
655 #define hns3_read_dev(a, reg) \
656 	hns3_read_reg((a)->io_base, reg)
657 
hns3_nic_resetting(struct net_device * netdev)658 static inline bool hns3_nic_resetting(struct net_device *netdev)
659 {
660 	struct hns3_nic_priv *priv = netdev_priv(netdev);
661 
662 	return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
663 }
664 
665 #define hns3_write_dev(a, reg, value) \
666 	hns3_write_reg((a)->io_base, reg, value)
667 
668 #define ring_to_dev(ring) ((ring)->dev)
669 
670 #define ring_to_netdev(ring)	((ring)->tqp_vector->napi.dev)
671 
672 #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
673 	DMA_TO_DEVICE : DMA_FROM_DEVICE)
674 
675 #define hns3_buf_size(_ring) ((_ring)->buf_size)
676 
677 #define hns3_ring_stats_update(ring, cnt) do { \
678 	typeof(ring) (tmp) = (ring); \
679 	u64_stats_update_begin(&(tmp)->syncp); \
680 	((tmp)->stats.cnt)++; \
681 	u64_stats_update_end(&(tmp)->syncp); \
682 } while (0) \
683 
hns3_page_order(struct hns3_enet_ring * ring)684 static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
685 {
686 #if (PAGE_SIZE < 8192)
687 	if (ring->buf_size > (PAGE_SIZE / 2))
688 		return 1;
689 #endif
690 	return 0;
691 }
692 
693 #define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
694 
695 /* iterator for handling rings in ring group */
696 #define hns3_for_each_ring(pos, head) \
697 	for (pos = (head).ring; (pos); pos = (pos)->next)
698 
699 #define hns3_get_handle(ndev) \
700 	(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
701 
702 #define hns3_get_ae_dev(handle) \
703 	(pci_get_drvdata((handle)->pdev))
704 
705 #define hns3_get_ops(handle) \
706 	((handle)->ae_algo->ops)
707 
708 #define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1)
709 #define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
710 
711 #define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2)
712 #define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
713 
714 void hns3_ethtool_set_ops(struct net_device *netdev);
715 int hns3_set_channels(struct net_device *netdev,
716 		      struct ethtool_channels *ch);
717 
718 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
719 int hns3_init_all_ring(struct hns3_nic_priv *priv);
720 int hns3_nic_reset_all_ring(struct hnae3_handle *h);
721 void hns3_fini_ring(struct hns3_enet_ring *ring);
722 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
723 bool hns3_is_phys_func(struct pci_dev *pdev);
724 int hns3_clean_rx_ring(
725 		struct hns3_enet_ring *ring, int budget,
726 		void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
727 
728 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
729 				    u32 gl_value);
730 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
731 				    u32 gl_value);
732 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
733 				 u32 rl_value);
734 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
735 				    u32 ql_value);
736 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
737 				    u32 ql_value);
738 
739 void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
740 int hns3_reset_notify(struct hnae3_handle *handle,
741 		      enum hnae3_reset_notify_type type);
742 
743 #ifdef CONFIG_HNS3_DCB
744 void hns3_dcbnl_setup(struct hnae3_handle *handle);
745 #else
hns3_dcbnl_setup(struct hnae3_handle * handle)746 static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
747 #endif
748 
749 int hns3_dbg_init(struct hnae3_handle *handle);
750 void hns3_dbg_uninit(struct hnae3_handle *handle);
751 void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
752 void hns3_dbg_unregister_debugfs(void);
753 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
754 u16 hns3_get_max_available_channels(struct hnae3_handle *h);
755 void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
756 			      enum dim_cq_period_mode tx_mode,
757 			      enum dim_cq_period_mode rx_mode);
758 
759 void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
760 void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
761 #endif
762