1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #ifndef _LINUX_SKMSG_H 5 #define _LINUX_SKMSG_H 6 7 #include <linux/bpf.h> 8 #include <linux/filter.h> 9 #include <linux/scatterlist.h> 10 #include <linux/skbuff.h> 11 12 #include <net/sock.h> 13 #include <net/tcp.h> 14 #include <net/strparser.h> 15 16 #define MAX_MSG_FRAGS MAX_SKB_FRAGS 17 18 enum __sk_action { 19 __SK_DROP = 0, 20 __SK_PASS, 21 __SK_REDIRECT, 22 __SK_NONE, 23 }; 24 25 struct sk_msg_sg { 26 u32 start; 27 u32 curr; 28 u32 end; 29 u32 size; 30 u32 copybreak; 31 bool copy[MAX_MSG_FRAGS]; 32 /* The extra element is used for chaining the front and sections when 33 * the list becomes partitioned (e.g. end < start). The crypto APIs 34 * require the chaining. 35 */ 36 struct scatterlist data[MAX_MSG_FRAGS + 1]; 37 }; 38 39 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ 40 struct sk_msg { 41 struct sk_msg_sg sg; 42 void *data; 43 void *data_end; 44 u32 apply_bytes; 45 u32 cork_bytes; 46 u32 flags; 47 struct sk_buff *skb; 48 struct sock *sk_redir; 49 struct sock *sk; 50 struct list_head list; 51 }; 52 53 struct sk_psock_progs { 54 struct bpf_prog *msg_parser; 55 struct bpf_prog *skb_parser; 56 struct bpf_prog *skb_verdict; 57 }; 58 59 enum sk_psock_state_bits { 60 SK_PSOCK_TX_ENABLED, 61 }; 62 63 struct sk_psock_link { 64 struct list_head list; 65 struct bpf_map *map; 66 void *link_raw; 67 }; 68 69 struct sk_psock_parser { 70 struct strparser strp; 71 bool enabled; 72 void (*saved_data_ready)(struct sock *sk); 73 }; 74 75 struct sk_psock_work_state { 76 struct sk_buff *skb; 77 u32 len; 78 u32 off; 79 }; 80 81 struct sk_psock { 82 struct sock *sk; 83 struct sock *sk_redir; 84 u32 apply_bytes; 85 u32 cork_bytes; 86 u32 eval; 87 struct sk_msg *cork; 88 struct sk_psock_progs progs; 89 struct sk_psock_parser parser; 90 struct sk_buff_head ingress_skb; 91 struct list_head ingress_msg; 92 unsigned long state; 93 struct list_head link; 94 spinlock_t link_lock; 95 refcount_t refcnt; 96 void (*saved_unhash)(struct sock *sk); 97 void (*saved_close)(struct sock *sk, long timeout); 98 void (*saved_write_space)(struct sock *sk); 99 struct proto *sk_proto; 100 struct sk_psock_work_state work_state; 101 struct work_struct work; 102 union { 103 struct rcu_head rcu; 104 struct work_struct gc; 105 }; 106 }; 107 108 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 109 int elem_first_coalesce); 110 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 111 u32 off, u32 len); 112 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); 113 int sk_msg_free(struct sock *sk, struct sk_msg *msg); 114 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); 115 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); 116 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 117 u32 bytes); 118 119 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); 120 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); 121 122 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 123 struct sk_msg *msg, u32 bytes); 124 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 125 struct sk_msg *msg, u32 bytes); 126 127 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) 128 { 129 WARN_ON(i == msg->sg.end && bytes); 130 } 131 132 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) 133 { 134 if (psock->apply_bytes) { 135 if (psock->apply_bytes < bytes) 136 psock->apply_bytes = 0; 137 else 138 psock->apply_bytes -= bytes; 139 } 140 } 141 142 #define sk_msg_iter_var_prev(var) \ 143 do { \ 144 if (var == 0) \ 145 var = MAX_MSG_FRAGS - 1; \ 146 else \ 147 var--; \ 148 } while (0) 149 150 #define sk_msg_iter_var_next(var) \ 151 do { \ 152 var++; \ 153 if (var == MAX_MSG_FRAGS) \ 154 var = 0; \ 155 } while (0) 156 157 #define sk_msg_iter_prev(msg, which) \ 158 sk_msg_iter_var_prev(msg->sg.which) 159 160 #define sk_msg_iter_next(msg, which) \ 161 sk_msg_iter_var_next(msg->sg.which) 162 163 static inline void sk_msg_clear_meta(struct sk_msg *msg) 164 { 165 memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy)); 166 } 167 168 static inline void sk_msg_init(struct sk_msg *msg) 169 { 170 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS); 171 memset(msg, 0, sizeof(*msg)); 172 sg_init_marker(msg->sg.data, MAX_MSG_FRAGS); 173 } 174 175 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, 176 int which, u32 size) 177 { 178 dst->sg.data[which] = src->sg.data[which]; 179 dst->sg.data[which].length = size; 180 dst->sg.size += size; 181 src->sg.data[which].length -= size; 182 src->sg.data[which].offset += size; 183 } 184 185 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) 186 { 187 memcpy(dst, src, sizeof(*src)); 188 sk_msg_init(src); 189 } 190 191 static inline bool sk_msg_full(const struct sk_msg *msg) 192 { 193 return (msg->sg.end == msg->sg.start) && msg->sg.size; 194 } 195 196 static inline u32 sk_msg_elem_used(const struct sk_msg *msg) 197 { 198 if (sk_msg_full(msg)) 199 return MAX_MSG_FRAGS; 200 201 return msg->sg.end >= msg->sg.start ? 202 msg->sg.end - msg->sg.start : 203 msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start); 204 } 205 206 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) 207 { 208 return &msg->sg.data[which]; 209 } 210 211 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) 212 { 213 return msg->sg.data[which]; 214 } 215 216 static inline struct page *sk_msg_page(struct sk_msg *msg, int which) 217 { 218 return sg_page(sk_msg_elem(msg, which)); 219 } 220 221 static inline bool sk_msg_to_ingress(const struct sk_msg *msg) 222 { 223 return msg->flags & BPF_F_INGRESS; 224 } 225 226 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) 227 { 228 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); 229 230 if (msg->sg.copy[msg->sg.start]) { 231 msg->data = NULL; 232 msg->data_end = NULL; 233 } else { 234 msg->data = sg_virt(sge); 235 msg->data_end = msg->data + sge->length; 236 } 237 } 238 239 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, 240 u32 len, u32 offset) 241 { 242 struct scatterlist *sge; 243 244 get_page(page); 245 sge = sk_msg_elem(msg, msg->sg.end); 246 sg_set_page(sge, page, len, offset); 247 sg_unmark_end(sge); 248 249 msg->sg.copy[msg->sg.end] = true; 250 msg->sg.size += len; 251 sk_msg_iter_next(msg, end); 252 } 253 254 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) 255 { 256 do { 257 msg->sg.copy[i] = copy_state; 258 sk_msg_iter_var_next(i); 259 if (i == msg->sg.end) 260 break; 261 } while (1); 262 } 263 264 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) 265 { 266 sk_msg_sg_copy(msg, start, true); 267 } 268 269 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) 270 { 271 sk_msg_sg_copy(msg, start, false); 272 } 273 274 static inline struct sk_psock *sk_psock(const struct sock *sk) 275 { 276 return rcu_dereference_sk_user_data(sk); 277 } 278 279 static inline void sk_psock_queue_msg(struct sk_psock *psock, 280 struct sk_msg *msg) 281 { 282 list_add_tail(&msg->list, &psock->ingress_msg); 283 } 284 285 static inline bool sk_psock_queue_empty(const struct sk_psock *psock) 286 { 287 return psock ? list_empty(&psock->ingress_msg) : true; 288 } 289 290 static inline void sk_psock_report_error(struct sk_psock *psock, int err) 291 { 292 struct sock *sk = psock->sk; 293 294 sk->sk_err = err; 295 sk->sk_error_report(sk); 296 } 297 298 struct sk_psock *sk_psock_init(struct sock *sk, int node); 299 300 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); 301 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); 302 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); 303 304 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 305 struct sk_msg *msg); 306 307 static inline struct sk_psock_link *sk_psock_init_link(void) 308 { 309 return kzalloc(sizeof(struct sk_psock_link), 310 GFP_ATOMIC | __GFP_NOWARN); 311 } 312 313 static inline void sk_psock_free_link(struct sk_psock_link *link) 314 { 315 kfree(link); 316 } 317 318 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); 319 #if defined(CONFIG_BPF_STREAM_PARSER) 320 void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link); 321 #else 322 static inline void sk_psock_unlink(struct sock *sk, 323 struct sk_psock_link *link) 324 { 325 } 326 #endif 327 328 void __sk_psock_purge_ingress_msg(struct sk_psock *psock); 329 330 static inline void sk_psock_cork_free(struct sk_psock *psock) 331 { 332 if (psock->cork) { 333 sk_msg_free(psock->sk, psock->cork); 334 kfree(psock->cork); 335 psock->cork = NULL; 336 } 337 } 338 339 static inline void sk_psock_update_proto(struct sock *sk, 340 struct sk_psock *psock, 341 struct proto *ops) 342 { 343 psock->saved_unhash = sk->sk_prot->unhash; 344 psock->saved_close = sk->sk_prot->close; 345 psock->saved_write_space = sk->sk_write_space; 346 347 psock->sk_proto = sk->sk_prot; 348 sk->sk_prot = ops; 349 } 350 351 static inline void sk_psock_restore_proto(struct sock *sk, 352 struct sk_psock *psock) 353 { 354 sk->sk_write_space = psock->saved_write_space; 355 356 if (psock->sk_proto) { 357 struct inet_connection_sock *icsk = inet_csk(sk); 358 bool has_ulp = !!icsk->icsk_ulp_data; 359 360 if (has_ulp) 361 tcp_update_ulp(sk, psock->sk_proto); 362 else 363 sk->sk_prot = psock->sk_proto; 364 psock->sk_proto = NULL; 365 } 366 } 367 368 static inline void sk_psock_set_state(struct sk_psock *psock, 369 enum sk_psock_state_bits bit) 370 { 371 set_bit(bit, &psock->state); 372 } 373 374 static inline void sk_psock_clear_state(struct sk_psock *psock, 375 enum sk_psock_state_bits bit) 376 { 377 clear_bit(bit, &psock->state); 378 } 379 380 static inline bool sk_psock_test_state(const struct sk_psock *psock, 381 enum sk_psock_state_bits bit) 382 { 383 return test_bit(bit, &psock->state); 384 } 385 386 static inline struct sk_psock *sk_psock_get_checked(struct sock *sk) 387 { 388 struct sk_psock *psock; 389 390 rcu_read_lock(); 391 psock = sk_psock(sk); 392 if (psock) { 393 if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) { 394 psock = ERR_PTR(-EBUSY); 395 goto out; 396 } 397 398 if (!refcount_inc_not_zero(&psock->refcnt)) 399 psock = ERR_PTR(-EBUSY); 400 } 401 out: 402 rcu_read_unlock(); 403 return psock; 404 } 405 406 static inline struct sk_psock *sk_psock_get(struct sock *sk) 407 { 408 struct sk_psock *psock; 409 410 rcu_read_lock(); 411 psock = sk_psock(sk); 412 if (psock && !refcount_inc_not_zero(&psock->refcnt)) 413 psock = NULL; 414 rcu_read_unlock(); 415 return psock; 416 } 417 418 void sk_psock_stop(struct sock *sk, struct sk_psock *psock); 419 void sk_psock_destroy(struct rcu_head *rcu); 420 void sk_psock_drop(struct sock *sk, struct sk_psock *psock); 421 422 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) 423 { 424 if (refcount_dec_and_test(&psock->refcnt)) 425 sk_psock_drop(sk, psock); 426 } 427 428 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) 429 { 430 if (psock->parser.enabled) 431 psock->parser.saved_data_ready(sk); 432 else 433 sk->sk_data_ready(sk); 434 } 435 436 static inline void psock_set_prog(struct bpf_prog **pprog, 437 struct bpf_prog *prog) 438 { 439 prog = xchg(pprog, prog); 440 if (prog) 441 bpf_prog_put(prog); 442 } 443 444 static inline void psock_progs_drop(struct sk_psock_progs *progs) 445 { 446 psock_set_prog(&progs->msg_parser, NULL); 447 psock_set_prog(&progs->skb_parser, NULL); 448 psock_set_prog(&progs->skb_verdict, NULL); 449 } 450 451 #endif /* _LINUX_SKMSG_H */ 452