1 /* 2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #ifndef __MLX5E_EN_ACCEL_H__ 35 #define __MLX5E_EN_ACCEL_H__ 36 37 #include <linux/skbuff.h> 38 #include <linux/netdevice.h> 39 #include "en_accel/ipsec_rxtx.h" 40 #include "en_accel/ktls.h" 41 #include "en_accel/ktls_txrx.h" 42 #include "en.h" 43 #include "en/txrx.h" 44 45 #if IS_ENABLED(CONFIG_GENEVE) 46 #include <net/geneve.h> 47 48 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) 49 { 50 return mlx5_tx_swp_supported(mdev); 51 } 52 53 static inline void 54 mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs) 55 { 56 struct mlx5e_swp_spec swp_spec = {}; 57 unsigned int offset = 0; 58 __be16 l3_proto; 59 u8 l4_proto; 60 61 l3_proto = vlan_get_protocol(skb); 62 switch (l3_proto) { 63 case htons(ETH_P_IP): 64 l4_proto = ip_hdr(skb)->protocol; 65 break; 66 case htons(ETH_P_IPV6): 67 l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL); 68 break; 69 default: 70 return; 71 } 72 73 if (l4_proto != IPPROTO_UDP || 74 udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT)) 75 return; 76 swp_spec.l3_proto = l3_proto; 77 swp_spec.l4_proto = l4_proto; 78 swp_spec.is_tun = true; 79 if (inner_ip_hdr(skb)->version == 6) { 80 swp_spec.tun_l3_proto = htons(ETH_P_IPV6); 81 swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr; 82 } else { 83 swp_spec.tun_l3_proto = htons(ETH_P_IP); 84 swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol; 85 } 86 87 mlx5e_set_eseg_swp(skb, eseg, &swp_spec); 88 if (skb_vlan_tag_present(skb) && ihs) 89 mlx5e_eseg_swp_offsets_add_vlan(eseg); 90 } 91 92 #else 93 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) 94 { 95 return false; 96 } 97 98 #endif /* CONFIG_GENEVE */ 99 100 static inline void 101 mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) 102 { 103 int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); 104 105 udp_hdr(skb)->len = htons(payload_len); 106 } 107 108 struct mlx5e_accel_tx_state { 109 #ifdef CONFIG_MLX5_EN_TLS 110 struct mlx5e_accel_tx_tls_state tls; 111 #endif 112 #ifdef CONFIG_MLX5_EN_IPSEC 113 struct mlx5e_accel_tx_ipsec_state ipsec; 114 #endif 115 }; 116 117 static inline bool mlx5e_accel_tx_begin(struct net_device *dev, 118 struct mlx5e_txqsq *sq, 119 struct sk_buff *skb, 120 struct mlx5e_accel_tx_state *state) 121 { 122 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 123 mlx5e_udp_gso_handle_tx_skb(skb); 124 125 #ifdef CONFIG_MLX5_EN_TLS 126 /* May send SKBs and WQEs. */ 127 if (mlx5e_ktls_skb_offloaded(skb)) 128 if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb, 129 &state->tls))) 130 return false; 131 #endif 132 133 #ifdef CONFIG_MLX5_EN_IPSEC 134 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) { 135 if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec))) 136 return false; 137 } 138 #endif 139 140 return true; 141 } 142 143 static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq, 144 struct mlx5e_accel_tx_state *state) 145 { 146 #ifdef CONFIG_MLX5_EN_IPSEC 147 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) 148 return mlx5e_ipsec_tx_ids_len(&state->ipsec); 149 #endif 150 151 return 0; 152 } 153 154 /* Part of the eseg touched by TX offloads */ 155 #define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss) 156 157 static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, 158 struct sk_buff *skb, 159 struct mlx5_wqe_eth_seg *eseg, u16 ihs) 160 { 161 #ifdef CONFIG_MLX5_EN_IPSEC 162 if (xfrm_offload(skb)) 163 mlx5e_ipsec_tx_build_eseg(priv, skb, eseg); 164 #endif 165 166 #if IS_ENABLED(CONFIG_GENEVE) 167 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) 168 mlx5e_tx_tunnel_accel(skb, eseg, ihs); 169 #endif 170 } 171 172 static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, 173 struct mlx5e_tx_wqe *wqe, 174 struct mlx5e_accel_tx_state *state, 175 struct mlx5_wqe_inline_seg *inlseg) 176 { 177 #ifdef CONFIG_MLX5_EN_TLS 178 mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls); 179 #endif 180 181 #ifdef CONFIG_MLX5_EN_IPSEC 182 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && 183 state->ipsec.xo && state->ipsec.tailen) 184 mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg); 185 #endif 186 } 187 188 static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv) 189 { 190 return mlx5e_ktls_init_rx(priv); 191 } 192 193 static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv) 194 { 195 mlx5e_ktls_cleanup_rx(priv); 196 } 197 #endif /* __MLX5E_EN_ACCEL_H__ */ 198