1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #ifndef _IAVF_TXRX_H_
5 #define _IAVF_TXRX_H_
6 
7 /* Interrupt Throttling and Rate Limiting Goodies */
8 #define IAVF_DEFAULT_IRQ_WORK      256
9 
10 /* The datasheet for the X710 and XL710 indicate that the maximum value for
11  * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
12  * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
13  * the register value which is divided by 2 lets use the actual values and
14  * avoid an excessive amount of translation.
15  */
16 #define IAVF_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
17 #define IAVF_ITR_MASK		0x1FFE	/* mask for ITR register value */
18 #define IAVF_ITR_100K		    10	/* all values below must be even */
19 #define IAVF_ITR_50K		    20
20 #define IAVF_ITR_20K		    50
21 #define IAVF_ITR_18K		    60
22 #define IAVF_ITR_8K		   122
23 #define IAVF_MAX_ITR		  8160	/* maximum value as per datasheet */
24 #define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
25 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
26 #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
27 
28 #define IAVF_ITR_RX_DEF		(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
29 #define IAVF_ITR_TX_DEF		(IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
30 
31 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
32  * the value of the rate limit is non-zero
33  */
34 #define INTRL_ENA                  BIT(6)
35 #define IAVF_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
36 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
37 #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
38 #define IAVF_INTRL_8K              125     /* 8000 ints/sec */
39 #define IAVF_INTRL_62K             16      /* 62500 ints/sec */
40 #define IAVF_INTRL_83K             12      /* 83333 ints/sec */
41 
42 #define IAVF_QUEUE_END_OF_LIST 0x7FF
43 
44 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
45  * registers and QINT registers or more generally anywhere in the manual
46  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
47  * register but instead is a special value meaning "don't update" ITR0/1/2.
48  */
49 enum iavf_dyn_idx_t {
50 	IAVF_IDX_ITR0 = 0,
51 	IAVF_IDX_ITR1 = 1,
52 	IAVF_IDX_ITR2 = 2,
53 	IAVF_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
54 };
55 
56 /* these are indexes into ITRN registers */
57 #define IAVF_RX_ITR    IAVF_IDX_ITR0
58 #define IAVF_TX_ITR    IAVF_IDX_ITR1
59 #define IAVF_PE_ITR    IAVF_IDX_ITR2
60 
61 /* Supported RSS offloads */
62 #define IAVF_DEFAULT_RSS_HENA ( \
63 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
64 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
65 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
66 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
67 	BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
68 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
69 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
70 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
71 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
72 	BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
73 	BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
74 
75 #define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
76 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
77 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
78 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
79 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
80 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
81 	BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
82 
83 #define iavf_rx_desc iavf_32byte_rx_desc
84 
85 /**
86  * iavf_test_staterr - tests bits in Rx descriptor status and error fields
87  * @rx_desc: pointer to receive descriptor (in le64 format)
88  * @stat_err_bits: value to mask
89  *
90  * This function does some fast chicanery in order to return the
91  * value of the mask which is really only used for boolean tests.
92  * The status_error_len doesn't need to be shifted because it begins
93  * at offset zero.
94  */
iavf_test_staterr(union iavf_rx_desc * rx_desc,const u64 stat_err_bits)95 static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
96 				     const u64 stat_err_bits)
97 {
98 	return !!(rx_desc->wb.qword1.status_error_len &
99 		  cpu_to_le64(stat_err_bits));
100 }
101 
102 /* How many Rx Buffers do we bundle into one write to the hardware ? */
103 #define IAVF_RX_INCREMENT(r, i) \
104 	do {					\
105 		(i)++;				\
106 		if ((i) == (r)->count)		\
107 			i = 0;			\
108 		r->next_to_clean = i;		\
109 	} while (0)
110 
111 #define IAVF_RX_NEXT_DESC(r, i, n)		\
112 	do {					\
113 		(i)++;				\
114 		if ((i) == (r)->count)		\
115 			i = 0;			\
116 		(n) = IAVF_RX_DESC((r), (i));	\
117 	} while (0)
118 
119 #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n)		\
120 	do {						\
121 		IAVF_RX_NEXT_DESC((r), (i), (n));	\
122 		prefetch((n));				\
123 	} while (0)
124 
125 #define IAVF_MAX_BUFFER_TXD	8
126 #define IAVF_MIN_TX_LEN		17
127 
128 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
129  * In order to align with the read requests we will align the value to
130  * the nearest 4K which represents our maximum read request size.
131  */
132 #define IAVF_MAX_READ_REQ_SIZE		4096
133 #define IAVF_MAX_DATA_PER_TXD		(16 * 1024 - 1)
134 #define IAVF_MAX_DATA_PER_TXD_ALIGNED \
135 	(IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
136 
137 /**
138  * iavf_txd_use_count  - estimate the number of descriptors needed for Tx
139  * @size: transmit request size in bytes
140  *
141  * Due to hardware alignment restrictions (4K alignment), we need to
142  * assume that we can have no more than 12K of data per descriptor, even
143  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
144  * Thus, we need to divide by 12K. But division is slow! Instead,
145  * we decompose the operation into shifts and one relatively cheap
146  * multiply operation.
147  *
148  * To divide by 12K, we first divide by 4K, then divide by 3:
149  *     To divide by 4K, shift right by 12 bits
150  *     To divide by 3, multiply by 85, then divide by 256
151  *     (Divide by 256 is done by shifting right by 8 bits)
152  * Finally, we add one to round up. Because 256 isn't an exact multiple of
153  * 3, we'll underestimate near each multiple of 12K. This is actually more
154  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
155  * segment.  For our purposes this is accurate out to 1M which is orders of
156  * magnitude greater than our largest possible GSO size.
157  *
158  * This would then be implemented as:
159  *     return (((size >> 12) * 85) >> 8) + 1;
160  *
161  * Since multiplication and division are commutative, we can reorder
162  * operations into:
163  *     return ((size * 85) >> 20) + 1;
164  */
iavf_txd_use_count(unsigned int size)165 static inline unsigned int iavf_txd_use_count(unsigned int size)
166 {
167 	return ((size * 85) >> 20) + 1;
168 }
169 
170 /* Tx Descriptors needed, worst case */
171 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
172 #define IAVF_MIN_DESC_PENDING	4
173 
174 #define IAVF_TX_FLAGS_HW_VLAN			BIT(1)
175 #define IAVF_TX_FLAGS_SW_VLAN			BIT(2)
176 #define IAVF_TX_FLAGS_TSO			BIT(3)
177 #define IAVF_TX_FLAGS_IPV4			BIT(4)
178 #define IAVF_TX_FLAGS_IPV6			BIT(5)
179 #define IAVF_TX_FLAGS_FCCRC			BIT(6)
180 #define IAVF_TX_FLAGS_FSO			BIT(7)
181 #define IAVF_TX_FLAGS_FD_SB			BIT(9)
182 #define IAVF_TX_FLAGS_VXLAN_TUNNEL		BIT(10)
183 #define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN	BIT(11)
184 #define IAVF_TX_FLAGS_VLAN_MASK			0xffff0000
185 #define IAVF_TX_FLAGS_VLAN_PRIO_MASK		0xe0000000
186 #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT		29
187 #define IAVF_TX_FLAGS_VLAN_SHIFT		16
188 
189 struct iavf_tx_buffer {
190 	struct iavf_tx_desc *next_to_watch;
191 	union {
192 		struct sk_buff *skb;
193 		void *raw_buf;
194 	};
195 	unsigned int bytecount;
196 	unsigned short gso_segs;
197 
198 	DEFINE_DMA_UNMAP_ADDR(dma);
199 	DEFINE_DMA_UNMAP_LEN(len);
200 	u32 tx_flags;
201 };
202 
203 struct iavf_queue_stats {
204 	u64 packets;
205 	u64 bytes;
206 };
207 
208 struct iavf_tx_queue_stats {
209 	u64 restart_queue;
210 	u64 tx_busy;
211 	u64 tx_done_old;
212 	u64 tx_linearize;
213 	u64 tx_force_wb;
214 	u64 tx_lost_interrupt;
215 };
216 
217 struct iavf_rx_queue_stats {
218 	u64 non_eop_descs;
219 	u64 alloc_page_failed;
220 	u64 alloc_buff_failed;
221 };
222 
223 /* some useful defines for virtchannel interface, which
224  * is the only remaining user of header split
225  */
226 #define IAVF_RX_DTYPE_NO_SPLIT      0
227 #define IAVF_RX_DTYPE_HEADER_SPLIT  1
228 #define IAVF_RX_DTYPE_SPLIT_ALWAYS  2
229 #define IAVF_RX_SPLIT_L2      0x1
230 #define IAVF_RX_SPLIT_IP      0x2
231 #define IAVF_RX_SPLIT_TCP_UDP 0x4
232 #define IAVF_RX_SPLIT_SCTP    0x8
233 
234 /* struct that defines a descriptor ring, associated with a VSI */
235 struct iavf_ring {
236 	struct iavf_ring *next;		/* pointer to next ring in q_vector */
237 	void *desc;			/* Descriptor ring memory */
238 	union {
239 		struct page_pool *pp;	/* Used on Rx for buffer management */
240 		struct device *dev;	/* Used on Tx for DMA mapping */
241 	};
242 	struct net_device *netdev;	/* netdev ring maps to */
243 	union {
244 		struct libeth_fqe *rx_fqes;
245 		struct iavf_tx_buffer *tx_bi;
246 	};
247 	u8 __iomem *tail;
248 	u32 truesize;
249 
250 	u16 queue_index;		/* Queue number of ring */
251 
252 	/* high bit set means dynamic, use accessors routines to read/write.
253 	 * hardware only supports 2us resolution for the ITR registers.
254 	 * these values always store the USER setting, and must be converted
255 	 * before programming to a register.
256 	 */
257 	u16 itr_setting;
258 
259 	u16 count;			/* Number of descriptors */
260 
261 	/* used in interrupt processing */
262 	u16 next_to_use;
263 	u16 next_to_clean;
264 
265 	u16 flags;
266 #define IAVF_TXR_FLAGS_WB_ON_ITR		BIT(0)
267 #define IAVF_TXR_FLAGS_ARM_WB			BIT(1)
268 /* BIT(2) is free */
269 #define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1	BIT(3)
270 #define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2	BIT(4)
271 #define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2	BIT(5)
272 
273 	/* stats structs */
274 	struct iavf_queue_stats	stats;
275 	struct u64_stats_sync syncp;
276 	union {
277 		struct iavf_tx_queue_stats tx_stats;
278 		struct iavf_rx_queue_stats rx_stats;
279 	};
280 
281 	int prev_pkt_ctr;		/* For Tx stall detection */
282 	unsigned int size;		/* length of descriptor ring in bytes */
283 	dma_addr_t dma;			/* physical address of ring */
284 
285 	struct iavf_vsi *vsi;		/* Backreference to associated VSI */
286 	struct iavf_q_vector *q_vector;	/* Backreference to associated vector */
287 
288 	struct rcu_head rcu;		/* to avoid race on free */
289 	struct sk_buff *skb;		/* When iavf_clean_rx_ring_irq() must
290 					 * return before it sees the EOP for
291 					 * the current packet, we save that skb
292 					 * here and resume receiving this
293 					 * packet the next time
294 					 * iavf_clean_rx_ring_irq() is called
295 					 * for this ring.
296 					 */
297 
298 	u32 rx_buf_len;
299 } ____cacheline_internodealigned_in_smp;
300 
301 #define IAVF_ITR_ADAPTIVE_MIN_INC	0x0002
302 #define IAVF_ITR_ADAPTIVE_MIN_USECS	0x0002
303 #define IAVF_ITR_ADAPTIVE_MAX_USECS	0x007e
304 #define IAVF_ITR_ADAPTIVE_LATENCY	0x8000
305 #define IAVF_ITR_ADAPTIVE_BULK		0x0000
306 #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
307 
308 struct iavf_ring_container {
309 	struct iavf_ring *ring;		/* pointer to linked list of ring(s) */
310 	unsigned long next_update;	/* jiffies value of next update */
311 	unsigned int total_bytes;	/* total bytes processed this int */
312 	unsigned int total_packets;	/* total packets processed this int */
313 	u16 count;
314 	u16 target_itr;			/* target ITR setting for ring(s) */
315 	u16 current_itr;		/* current ITR setting for ring(s) */
316 };
317 
318 /* iterator for handling rings in ring container */
319 #define iavf_for_each_ring(pos, head) \
320 	for (pos = (head).ring; pos != NULL; pos = pos->next)
321 
322 bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
323 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
324 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
325 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
326 void iavf_free_tx_resources(struct iavf_ring *tx_ring);
327 void iavf_free_rx_resources(struct iavf_ring *rx_ring);
328 int iavf_napi_poll(struct napi_struct *napi, int budget);
329 void iavf_detect_recover_hung(struct iavf_vsi *vsi);
330 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
331 bool __iavf_chk_linearize(struct sk_buff *skb);
332 
333 /**
334  * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
335  * @skb:     send buffer
336  *
337  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
338  * there is not enough descriptors available in this ring since we need at least
339  * one descriptor.
340  **/
iavf_xmit_descriptor_count(struct sk_buff * skb)341 static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
342 {
343 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
344 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
345 	int count = 0, size = skb_headlen(skb);
346 
347 	for (;;) {
348 		count += iavf_txd_use_count(size);
349 
350 		if (!nr_frags--)
351 			break;
352 
353 		size = skb_frag_size(frag++);
354 	}
355 
356 	return count;
357 }
358 
359 /**
360  * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
361  * @tx_ring: the ring to be checked
362  * @size:    the size buffer we want to assure is available
363  *
364  * Returns 0 if stop is not needed
365  **/
iavf_maybe_stop_tx(struct iavf_ring * tx_ring,int size)366 static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
367 {
368 	if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
369 		return 0;
370 	return __iavf_maybe_stop_tx(tx_ring, size);
371 }
372 
373 /**
374  * iavf_chk_linearize - Check if there are more than 8 fragments per packet
375  * @skb:      send buffer
376  * @count:    number of buffers used
377  *
378  * Note: Our HW can't scatter-gather more than 8 fragments to build
379  * a packet on the wire and so we need to figure out the cases where we
380  * need to linearize the skb.
381  **/
iavf_chk_linearize(struct sk_buff * skb,int count)382 static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
383 {
384 	/* Both TSO and single send will work if count is less than 8 */
385 	if (likely(count < IAVF_MAX_BUFFER_TXD))
386 		return false;
387 
388 	if (skb_is_gso(skb))
389 		return __iavf_chk_linearize(skb);
390 
391 	/* we can support up to 8 data buffers for a single send */
392 	return count != IAVF_MAX_BUFFER_TXD;
393 }
394 /**
395  * txring_txq - helper to convert from a ring to a queue
396  * @ring: Tx ring to find the netdev equivalent of
397  **/
txring_txq(const struct iavf_ring * ring)398 static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
399 {
400 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
401 }
402 #endif /* _IAVF_TXRX_H_ */
403