1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_DST_METADATA_H
3 #define __NET_DST_METADATA_H 1
4
5 #include <linux/skbuff.h>
6 #include <net/ip_tunnels.h>
7 #include <net/macsec.h>
8 #include <net/dst.h>
9
10 enum metadata_type {
11 METADATA_IP_TUNNEL,
12 METADATA_HW_PORT_MUX,
13 METADATA_MACSEC,
14 METADATA_XFRM,
15 };
16
17 struct hw_port_info {
18 struct net_device *lower_dev;
19 u32 port_id;
20 };
21
22 struct macsec_info {
23 sci_t sci;
24 };
25
26 struct xfrm_md_info {
27 u32 if_id;
28 int link;
29 struct dst_entry *dst_orig;
30 };
31
32 struct metadata_dst {
33 struct dst_entry dst;
34 enum metadata_type type;
35 union {
36 struct ip_tunnel_info tun_info;
37 struct hw_port_info port_info;
38 struct macsec_info macsec_info;
39 struct xfrm_md_info xfrm_info;
40 } u;
41 };
42
skb_metadata_dst(const struct sk_buff * skb)43 static inline struct metadata_dst *skb_metadata_dst(const struct sk_buff *skb)
44 {
45 struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
46
47 if (md_dst && md_dst->dst.flags & DST_METADATA)
48 return md_dst;
49
50 return NULL;
51 }
52
53 static inline struct ip_tunnel_info *
skb_tunnel_info(const struct sk_buff * skb)54 skb_tunnel_info(const struct sk_buff *skb)
55 {
56 struct metadata_dst *md_dst = skb_metadata_dst(skb);
57 struct dst_entry *dst;
58
59 if (md_dst && md_dst->type == METADATA_IP_TUNNEL)
60 return &md_dst->u.tun_info;
61
62 dst = skb_dst(skb);
63 if (dst && dst->lwtstate &&
64 (dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
65 dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
66 return lwt_tun_info(dst->lwtstate);
67
68 return NULL;
69 }
70
lwt_xfrm_info(struct lwtunnel_state * lwt)71 static inline struct xfrm_md_info *lwt_xfrm_info(struct lwtunnel_state *lwt)
72 {
73 return (struct xfrm_md_info *)lwt->data;
74 }
75
skb_xfrm_md_info(const struct sk_buff * skb)76 static inline struct xfrm_md_info *skb_xfrm_md_info(const struct sk_buff *skb)
77 {
78 struct metadata_dst *md_dst = skb_metadata_dst(skb);
79 struct dst_entry *dst;
80
81 if (md_dst && md_dst->type == METADATA_XFRM)
82 return &md_dst->u.xfrm_info;
83
84 dst = skb_dst(skb);
85 if (dst && dst->lwtstate &&
86 dst->lwtstate->type == LWTUNNEL_ENCAP_XFRM)
87 return lwt_xfrm_info(dst->lwtstate);
88
89 return NULL;
90 }
91
skb_valid_dst(const struct sk_buff * skb)92 static inline bool skb_valid_dst(const struct sk_buff *skb)
93 {
94 struct dst_entry *dst = skb_dst(skb);
95
96 return dst && !(dst->flags & DST_METADATA);
97 }
98
skb_metadata_dst_cmp(const struct sk_buff * skb_a,const struct sk_buff * skb_b)99 static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
100 const struct sk_buff *skb_b)
101 {
102 const struct metadata_dst *a, *b;
103
104 if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
105 return 0;
106
107 a = (const struct metadata_dst *) skb_dst(skb_a);
108 b = (const struct metadata_dst *) skb_dst(skb_b);
109
110 if (!a != !b || a->type != b->type)
111 return 1;
112
113 switch (a->type) {
114 case METADATA_HW_PORT_MUX:
115 return memcmp(&a->u.port_info, &b->u.port_info,
116 sizeof(a->u.port_info));
117 case METADATA_IP_TUNNEL:
118 return memcmp(&a->u.tun_info, &b->u.tun_info,
119 sizeof(a->u.tun_info) +
120 a->u.tun_info.options_len);
121 case METADATA_MACSEC:
122 return memcmp(&a->u.macsec_info, &b->u.macsec_info,
123 sizeof(a->u.macsec_info));
124 case METADATA_XFRM:
125 return memcmp(&a->u.xfrm_info, &b->u.xfrm_info,
126 sizeof(a->u.xfrm_info));
127 default:
128 return 1;
129 }
130 }
131
132 void metadata_dst_free(struct metadata_dst *);
133 struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
134 gfp_t flags);
135 void metadata_dst_free_percpu(struct metadata_dst __percpu *md_dst);
136 struct metadata_dst __percpu *
137 metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags);
138
tun_rx_dst(int md_size)139 static inline struct metadata_dst *tun_rx_dst(int md_size)
140 {
141 struct metadata_dst *tun_dst;
142
143 tun_dst = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
144 if (!tun_dst)
145 return NULL;
146
147 tun_dst->u.tun_info.options_len = 0;
148 tun_dst->u.tun_info.mode = 0;
149 return tun_dst;
150 }
151
tun_dst_unclone(struct sk_buff * skb)152 static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
153 {
154 struct metadata_dst *md_dst = skb_metadata_dst(skb);
155 int md_size;
156 struct metadata_dst *new_md;
157
158 if (!md_dst || md_dst->type != METADATA_IP_TUNNEL)
159 return ERR_PTR(-EINVAL);
160
161 md_size = md_dst->u.tun_info.options_len;
162 new_md = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
163 if (!new_md)
164 return ERR_PTR(-ENOMEM);
165
166 unsafe_memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
167 sizeof(struct ip_tunnel_info) + md_size,
168 /* metadata_dst_alloc() reserves room (md_size bytes) for
169 * options right after the ip_tunnel_info struct.
170 */);
171 #ifdef CONFIG_DST_CACHE
172 /* Unclone the dst cache if there is one */
173 if (new_md->u.tun_info.dst_cache.cache) {
174 int ret;
175
176 ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
177 if (ret) {
178 metadata_dst_free(new_md);
179 return ERR_PTR(ret);
180 }
181 }
182 #endif
183
184 skb_dst_drop(skb);
185 skb_dst_set(skb, &new_md->dst);
186 return new_md;
187 }
188
skb_tunnel_info_unclone(struct sk_buff * skb)189 static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
190 {
191 struct metadata_dst *dst;
192
193 dst = tun_dst_unclone(skb);
194 if (IS_ERR(dst))
195 return NULL;
196
197 return &dst->u.tun_info;
198 }
199
__ip_tun_set_dst(__be32 saddr,__be32 daddr,__u8 tos,__u8 ttl,__be16 tp_dst,const unsigned long * flags,__be64 tunnel_id,int md_size)200 static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
201 __be32 daddr,
202 __u8 tos, __u8 ttl,
203 __be16 tp_dst,
204 const unsigned long *flags,
205 __be64 tunnel_id,
206 int md_size)
207 {
208 struct metadata_dst *tun_dst;
209
210 tun_dst = tun_rx_dst(md_size);
211 if (!tun_dst)
212 return NULL;
213
214 ip_tunnel_key_init(&tun_dst->u.tun_info.key,
215 saddr, daddr, tos, ttl,
216 0, 0, tp_dst, tunnel_id, flags);
217 return tun_dst;
218 }
219
ip_tun_rx_dst(struct sk_buff * skb,const unsigned long * flags,__be64 tunnel_id,int md_size)220 static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
221 const unsigned long *flags,
222 __be64 tunnel_id,
223 int md_size)
224 {
225 const struct iphdr *iph = ip_hdr(skb);
226
227 return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
228 0, flags, tunnel_id, md_size);
229 }
230
__ipv6_tun_set_dst(const struct in6_addr * saddr,const struct in6_addr * daddr,__u8 tos,__u8 ttl,__be16 tp_dst,__be32 label,const unsigned long * flags,__be64 tunnel_id,int md_size)231 static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
232 const struct in6_addr *daddr,
233 __u8 tos, __u8 ttl,
234 __be16 tp_dst,
235 __be32 label,
236 const unsigned long *flags,
237 __be64 tunnel_id,
238 int md_size)
239 {
240 struct metadata_dst *tun_dst;
241 struct ip_tunnel_info *info;
242
243 tun_dst = tun_rx_dst(md_size);
244 if (!tun_dst)
245 return NULL;
246
247 info = &tun_dst->u.tun_info;
248 info->mode = IP_TUNNEL_INFO_IPV6;
249 ip_tunnel_flags_copy(info->key.tun_flags, flags);
250 info->key.tun_id = tunnel_id;
251 info->key.tp_src = 0;
252 info->key.tp_dst = tp_dst;
253
254 info->key.u.ipv6.src = *saddr;
255 info->key.u.ipv6.dst = *daddr;
256
257 info->key.tos = tos;
258 info->key.ttl = ttl;
259 info->key.label = label;
260
261 return tun_dst;
262 }
263
ipv6_tun_rx_dst(struct sk_buff * skb,const unsigned long * flags,__be64 tunnel_id,int md_size)264 static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
265 const unsigned long *flags,
266 __be64 tunnel_id,
267 int md_size)
268 {
269 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
270
271 return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
272 ipv6_get_dsfield(ip6h), ip6h->hop_limit,
273 0, ip6_flowlabel(ip6h), flags, tunnel_id,
274 md_size);
275 }
276 #endif /* __NET_DST_METADATA_H */
277