1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_txrx_lib.h"
5 
6 /**
7  * ice_release_rx_desc - Store the new tail and head values
8  * @rx_ring: ring to bump
9  * @val: new head index
10  */
ice_release_rx_desc(struct ice_ring * rx_ring,u16 val)11 void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
12 {
13 	u16 prev_ntu = rx_ring->next_to_use & ~0x7;
14 
15 	rx_ring->next_to_use = val;
16 
17 	/* update next to alloc since we have filled the ring */
18 	rx_ring->next_to_alloc = val;
19 
20 	/* QRX_TAIL will be updated with any tail value, but hardware ignores
21 	 * the lower 3 bits. This makes it so we only bump tail on meaningful
22 	 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
23 	 * the budget depending on the current traffic load.
24 	 */
25 	val &= ~0x7;
26 	if (prev_ntu != val) {
27 		/* Force memory writes to complete before letting h/w
28 		 * know there are new descriptors to fetch. (Only
29 		 * applicable for weak-ordered memory model archs,
30 		 * such as IA-64).
31 		 */
32 		wmb();
33 		writel(val, rx_ring->tail);
34 	}
35 }
36 
37 /**
38  * ice_ptype_to_htype - get a hash type
39  * @ptype: the ptype value from the descriptor
40  *
41  * Returns a hash type to be used by skb_set_hash
42  */
ice_ptype_to_htype(u8 __always_unused ptype)43 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
44 {
45 	return PKT_HASH_TYPE_NONE;
46 }
47 
48 /**
49  * ice_rx_hash - set the hash value in the skb
50  * @rx_ring: descriptor ring
51  * @rx_desc: specific descriptor
52  * @skb: pointer to current skb
53  * @rx_ptype: the ptype value from the descriptor
54  */
55 static void
ice_rx_hash(struct ice_ring * rx_ring,union ice_32b_rx_flex_desc * rx_desc,struct sk_buff * skb,u8 rx_ptype)56 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
57 	    struct sk_buff *skb, u8 rx_ptype)
58 {
59 	struct ice_32b_rx_flex_desc_nic *nic_mdid;
60 	u32 hash;
61 
62 	if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
63 		return;
64 
65 	if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
66 		return;
67 
68 	nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
69 	hash = le32_to_cpu(nic_mdid->rss_hash);
70 	skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
71 }
72 
73 /**
74  * ice_rx_csum - Indicate in skb if checksum is good
75  * @ring: the ring we care about
76  * @skb: skb currently being received and modified
77  * @rx_desc: the receive descriptor
78  * @ptype: the packet type decoded by hardware
79  *
80  * skb->protocol must be set before this function is called
81  */
82 static void
ice_rx_csum(struct ice_ring * ring,struct sk_buff * skb,union ice_32b_rx_flex_desc * rx_desc,u8 ptype)83 ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
84 	    union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
85 {
86 	struct ice_rx_ptype_decoded decoded;
87 	u16 rx_status0, rx_status1;
88 	bool ipv4, ipv6;
89 
90 	rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
91 	rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
92 
93 	decoded = ice_decode_rx_desc_ptype(ptype);
94 
95 	/* Start with CHECKSUM_NONE and by default csum_level = 0 */
96 	skb->ip_summed = CHECKSUM_NONE;
97 	skb_checksum_none_assert(skb);
98 
99 	/* check if Rx checksum is enabled */
100 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
101 		return;
102 
103 	/* check if HW has decoded the packet and checksum */
104 	if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
105 		return;
106 
107 	if (!(decoded.known && decoded.outer_ip))
108 		return;
109 
110 	ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
111 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
112 	ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
113 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
114 
115 	if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
116 				   BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
117 		goto checksum_fail;
118 
119 	if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
120 		goto checksum_fail;
121 
122 	/* check for L4 errors and handle packets that were not able to be
123 	 * checksummed due to arrival speed
124 	 */
125 	if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
126 		goto checksum_fail;
127 
128 	/* check for outer UDP checksum error in tunneled packets */
129 	if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
130 	    (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
131 		goto checksum_fail;
132 
133 	/* If there is an outer header present that might contain a checksum
134 	 * we need to bump the checksum level by 1 to reflect the fact that
135 	 * we are indicating we validated the inner checksum.
136 	 */
137 	if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
138 		skb->csum_level = 1;
139 
140 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
141 	switch (decoded.inner_prot) {
142 	case ICE_RX_PTYPE_INNER_PROT_TCP:
143 	case ICE_RX_PTYPE_INNER_PROT_UDP:
144 	case ICE_RX_PTYPE_INNER_PROT_SCTP:
145 		skb->ip_summed = CHECKSUM_UNNECESSARY;
146 		break;
147 	default:
148 		break;
149 	}
150 	return;
151 
152 checksum_fail:
153 	ring->vsi->back->hw_csum_rx_error++;
154 }
155 
156 /**
157  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
158  * @rx_ring: Rx descriptor ring packet is being transacted on
159  * @rx_desc: pointer to the EOP Rx descriptor
160  * @skb: pointer to current skb being populated
161  * @ptype: the packet type decoded by hardware
162  *
163  * This function checks the ring, descriptor, and packet information in
164  * order to populate the hash, checksum, VLAN, protocol, and
165  * other fields within the skb.
166  */
167 void
ice_process_skb_fields(struct ice_ring * rx_ring,union ice_32b_rx_flex_desc * rx_desc,struct sk_buff * skb,u8 ptype)168 ice_process_skb_fields(struct ice_ring *rx_ring,
169 		       union ice_32b_rx_flex_desc *rx_desc,
170 		       struct sk_buff *skb, u8 ptype)
171 {
172 	ice_rx_hash(rx_ring, rx_desc, skb, ptype);
173 
174 	/* modifies the skb - consumes the enet header */
175 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
176 
177 	ice_rx_csum(rx_ring, skb, rx_desc, ptype);
178 }
179 
180 /**
181  * ice_receive_skb - Send a completed packet up the stack
182  * @rx_ring: Rx ring in play
183  * @skb: packet to send up
184  * @vlan_tag: VLAN tag for packet
185  *
186  * This function sends the completed packet (via. skb) up the stack using
187  * gro receive functions (with/without VLAN tag)
188  */
189 void
ice_receive_skb(struct ice_ring * rx_ring,struct sk_buff * skb,u16 vlan_tag)190 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
191 {
192 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
193 	    (vlan_tag & VLAN_VID_MASK))
194 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
195 	napi_gro_receive(&rx_ring->q_vector->napi, skb);
196 }
197 
198 /**
199  * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
200  * @data: packet data pointer
201  * @size: packet data size
202  * @xdp_ring: XDP ring for transmission
203  */
ice_xmit_xdp_ring(void * data,u16 size,struct ice_ring * xdp_ring)204 int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
205 {
206 	u16 i = xdp_ring->next_to_use;
207 	struct ice_tx_desc *tx_desc;
208 	struct ice_tx_buf *tx_buf;
209 	dma_addr_t dma;
210 
211 	if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
212 		xdp_ring->tx_stats.tx_busy++;
213 		return ICE_XDP_CONSUMED;
214 	}
215 
216 	dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
217 	if (dma_mapping_error(xdp_ring->dev, dma))
218 		return ICE_XDP_CONSUMED;
219 
220 	tx_buf = &xdp_ring->tx_buf[i];
221 	tx_buf->bytecount = size;
222 	tx_buf->gso_segs = 1;
223 	tx_buf->raw_buf = data;
224 
225 	/* record length, and DMA address */
226 	dma_unmap_len_set(tx_buf, len, size);
227 	dma_unmap_addr_set(tx_buf, dma, dma);
228 
229 	tx_desc = ICE_TX_DESC(xdp_ring, i);
230 	tx_desc->buf_addr = cpu_to_le64(dma);
231 	tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
232 						      size, 0);
233 
234 	/* Make certain all of the status bits have been updated
235 	 * before next_to_watch is written.
236 	 */
237 	smp_wmb();
238 
239 	i++;
240 	if (i == xdp_ring->count)
241 		i = 0;
242 
243 	tx_buf->next_to_watch = tx_desc;
244 	xdp_ring->next_to_use = i;
245 
246 	return ICE_XDP_TX;
247 }
248 
249 /**
250  * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
251  * @xdp: XDP buffer
252  * @xdp_ring: XDP Tx ring
253  *
254  * Returns negative on failure, 0 on success.
255  */
ice_xmit_xdp_buff(struct xdp_buff * xdp,struct ice_ring * xdp_ring)256 int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
257 {
258 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
259 
260 	if (unlikely(!xdpf))
261 		return ICE_XDP_CONSUMED;
262 
263 	return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
264 }
265 
266 /**
267  * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
268  * @rx_ring: Rx ring
269  * @xdp_res: Result of the receive batch
270  *
271  * This function bumps XDP Tx tail and/or flush redirect map, and
272  * should be called when a batch of packets has been processed in the
273  * napi loop.
274  */
ice_finalize_xdp_rx(struct ice_ring * rx_ring,unsigned int xdp_res)275 void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
276 {
277 	if (xdp_res & ICE_XDP_REDIR)
278 		xdp_do_flush_map();
279 
280 	if (xdp_res & ICE_XDP_TX) {
281 		struct ice_ring *xdp_ring =
282 			rx_ring->vsi->xdp_rings[rx_ring->q_index];
283 
284 		ice_xdp_ring_update_tail(xdp_ring);
285 	}
286 }
287