1 /*-
2 * Copyright (c) 2020-2023 The FreeBSD Foundation
3 * Copyright (c) 2021-2023 Bjoern A. Zeeb
4 *
5 * This software was developed by Björn Zeeb under sponsorship from
6 * the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
32 * Do not rely on the internals of this implementation. They are highly
33 * likely to change as we will improve the integration to FreeBSD mbufs.
34 */
35
36 #ifndef _LINUXKPI_LINUX_SKBUFF_H
37 #define _LINUXKPI_LINUX_SKBUFF_H
38
39 #include <linux/kernel.h>
40 #include <linux/page.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/netdev_features.h>
43 #include <linux/list.h>
44 #include <linux/gfp.h>
45 #include <linux/compiler.h>
46 #include <linux/spinlock.h>
47 #include <linux/ktime.h>
48
49 /* #define SKB_DEBUG */
50 #ifdef SKB_DEBUG
51 #define DSKB_TODO 0x01
52 #define DSKB_IMPROVE 0x02
53 #define DSKB_TRACE 0x10
54 #define DSKB_TRACEX 0x20
55 extern int linuxkpi_debug_skb;
56
57 #define SKB_TODO() \
58 if (linuxkpi_debug_skb & DSKB_TODO) \
59 printf("SKB_TODO %s:%d\n", __func__, __LINE__)
60 #define SKB_IMPROVE(...) \
61 if (linuxkpi_debug_skb & DSKB_IMPROVE) \
62 printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
63 #define SKB_TRACE(_s) \
64 if (linuxkpi_debug_skb & DSKB_TRACE) \
65 printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
66 #define SKB_TRACE2(_s, _p) \
67 if (linuxkpi_debug_skb & DSKB_TRACE) \
68 printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
69 #define SKB_TRACE_FMT(_s, _fmt, ...) \
70 if (linuxkpi_debug_skb & DSKB_TRACE) \
71 printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s, \
72 __VA_ARGS__)
73 #else
74 #define SKB_TODO() do { } while(0)
75 #define SKB_IMPROVE(...) do { } while(0)
76 #define SKB_TRACE(_s) do { } while(0)
77 #define SKB_TRACE2(_s, _p) do { } while(0)
78 #define SKB_TRACE_FMT(_s, ...) do { } while(0)
79 #endif
80
81 enum sk_buff_pkt_type {
82 PACKET_BROADCAST,
83 PACKET_MULTICAST,
84 PACKET_OTHERHOST,
85 };
86
87 struct skb_shared_hwtstamps {
88 ktime_t hwtstamp;
89 };
90
91 #define NET_SKB_PAD max(CACHE_LINE_SIZE, 32)
92 #define SKB_DATA_ALIGN(_x) roundup2(_x, CACHE_LINE_SIZE)
93
94 struct sk_buff_head {
95 /* XXX TODO */
96 union {
97 struct {
98 struct sk_buff *next;
99 struct sk_buff *prev;
100 };
101 struct sk_buff_head_l {
102 struct sk_buff *next;
103 struct sk_buff *prev;
104 } list;
105 };
106 size_t qlen;
107 spinlock_t lock;
108 };
109
110 enum sk_checksum_flags {
111 CHECKSUM_NONE = 0x00,
112 CHECKSUM_UNNECESSARY = 0x01,
113 CHECKSUM_PARTIAL = 0x02,
114 CHECKSUM_COMPLETE = 0x04,
115 };
116
117 struct skb_frag {
118 /* XXX TODO */
119 struct page *page; /* XXX-BZ These three are a wild guess so far! */
120 off_t offset;
121 size_t size;
122 };
123 typedef struct skb_frag skb_frag_t;
124
125 enum skb_shared_info_gso_type {
126 SKB_GSO_TCPV4,
127 SKB_GSO_TCPV6,
128 };
129
130 struct skb_shared_info {
131 enum skb_shared_info_gso_type gso_type;
132 uint16_t gso_size;
133 uint16_t nr_frags;
134 struct sk_buff *frag_list;
135 skb_frag_t frags[64]; /* XXX TODO, 16xpage? */
136 };
137
138 struct sk_buff {
139 /* XXX TODO */
140 union {
141 /* struct sk_buff_head */
142 struct {
143 struct sk_buff *next;
144 struct sk_buff *prev;
145 };
146 struct list_head list;
147 };
148 uint32_t _alloc_len; /* Length of alloc data-buf. XXX-BZ give up for truesize? */
149 uint32_t len; /* ? */
150 uint32_t data_len; /* ? If we have frags? */
151 uint32_t truesize; /* The total size of all buffers, incl. frags. */
152 uint16_t mac_len; /* Link-layer header length. */
153 __sum16 csum;
154 uint16_t l3hdroff; /* network header offset from *head */
155 uint16_t l4hdroff; /* transport header offset from *head */
156 uint32_t priority;
157 uint16_t qmap; /* queue mapping */
158 uint16_t _flags; /* Internal flags. */
159 #define _SKB_FLAGS_SKBEXTFRAG 0x0001
160 enum sk_buff_pkt_type pkt_type;
161 uint16_t mac_header; /* offset of mac_header */
162
163 /* "Scratch" area for layers to store metadata. */
164 /* ??? I see sizeof() operations so probably an array. */
165 uint8_t cb[64] __aligned(CACHE_LINE_SIZE);
166
167 struct net_device *dev;
168 void *sk; /* XXX net/sock.h? */
169
170 int csum_offset, csum_start, ip_summed, protocol;
171
172 uint8_t *head; /* Head of buffer. */
173 uint8_t *data; /* Head of data. */
174 uint8_t *tail; /* End of data. */
175 uint8_t *end; /* End of buffer. */
176
177 struct skb_shared_info *shinfo;
178
179 /* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
180 void *m;
181 void(*m_free_func)(void *);
182
183 /* Force padding to CACHE_LINE_SIZE. */
184 uint8_t __scratch[0] __aligned(CACHE_LINE_SIZE);
185 };
186
187 /* -------------------------------------------------------------------------- */
188
189 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
190 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
191 struct sk_buff *linuxkpi_build_skb(void *, size_t);
192 void linuxkpi_kfree_skb(struct sk_buff *);
193
194 struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t);
195
196 /* -------------------------------------------------------------------------- */
197
198 static inline struct sk_buff *
alloc_skb(size_t size,gfp_t gfp)199 alloc_skb(size_t size, gfp_t gfp)
200 {
201 struct sk_buff *skb;
202
203 skb = linuxkpi_alloc_skb(size, gfp);
204 SKB_TRACE(skb);
205 return (skb);
206 }
207
208 static inline struct sk_buff *
__dev_alloc_skb(size_t len,gfp_t gfp)209 __dev_alloc_skb(size_t len, gfp_t gfp)
210 {
211 struct sk_buff *skb;
212
213 skb = linuxkpi_dev_alloc_skb(len, gfp);
214 SKB_IMPROVE();
215 SKB_TRACE(skb);
216 return (skb);
217 }
218
219 static inline struct sk_buff *
dev_alloc_skb(size_t len)220 dev_alloc_skb(size_t len)
221 {
222 struct sk_buff *skb;
223
224 skb = __dev_alloc_skb(len, GFP_NOWAIT);
225 SKB_IMPROVE();
226 SKB_TRACE(skb);
227 return (skb);
228 }
229
230 static inline void
kfree_skb(struct sk_buff * skb)231 kfree_skb(struct sk_buff *skb)
232 {
233 SKB_TRACE(skb);
234 linuxkpi_kfree_skb(skb);
235 }
236
237 static inline void
dev_kfree_skb(struct sk_buff * skb)238 dev_kfree_skb(struct sk_buff *skb)
239 {
240 SKB_TRACE(skb);
241 kfree_skb(skb);
242 }
243
244 static inline void
dev_kfree_skb_any(struct sk_buff * skb)245 dev_kfree_skb_any(struct sk_buff *skb)
246 {
247 SKB_TRACE(skb);
248 dev_kfree_skb(skb);
249 }
250
251 static inline void
dev_kfree_skb_irq(struct sk_buff * skb)252 dev_kfree_skb_irq(struct sk_buff *skb)
253 {
254 SKB_TRACE(skb);
255 SKB_IMPROVE("Do we have to defer this?");
256 dev_kfree_skb(skb);
257 }
258
259 static inline struct sk_buff *
build_skb(void * data,unsigned int fragsz)260 build_skb(void *data, unsigned int fragsz)
261 {
262 struct sk_buff *skb;
263
264 skb = linuxkpi_build_skb(data, fragsz);
265 SKB_TRACE(skb);
266 return (skb);
267 }
268
269 /* -------------------------------------------------------------------------- */
270
271 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
272 #define skb_list_walk_safe(_q, skb, tmp) \
273 for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
274
275 /* Add headroom; cannot do once there is data in there. */
276 static inline void
skb_reserve(struct sk_buff * skb,size_t len)277 skb_reserve(struct sk_buff *skb, size_t len)
278 {
279 SKB_TRACE(skb);
280 #if 0
281 /* Apparently it is allowed to call skb_reserve multiple times in a row. */
282 KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
283 "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
284 #else
285 KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
286 "empty head %p data %p tail %p len %u\n", __func__, skb,
287 skb->head, skb->data, skb->tail, skb->len));
288 #endif
289 skb->data += len;
290 skb->tail += len;
291 }
292
293 /*
294 * Remove headroom; return new data pointer; basically make space at the
295 * front to copy data in (manually).
296 */
297 static inline void *
__skb_push(struct sk_buff * skb,size_t len)298 __skb_push(struct sk_buff *skb, size_t len)
299 {
300 SKB_TRACE(skb);
301 KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
302 "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
303 skb->len += len;
304 skb->data -= len;
305 return (skb->data);
306 }
307
308 static inline void *
skb_push(struct sk_buff * skb,size_t len)309 skb_push(struct sk_buff *skb, size_t len)
310 {
311
312 SKB_TRACE(skb);
313 return (__skb_push(skb, len));
314 }
315
316 /*
317 * Length of the data on the skb (without any frags)???
318 */
319 static inline size_t
skb_headlen(struct sk_buff * skb)320 skb_headlen(struct sk_buff *skb)
321 {
322
323 SKB_TRACE(skb);
324 return (skb->len - skb->data_len);
325 }
326
327
328 /* Return the end of data (tail pointer). */
329 static inline uint8_t *
skb_tail_pointer(struct sk_buff * skb)330 skb_tail_pointer(struct sk_buff *skb)
331 {
332
333 SKB_TRACE(skb);
334 return (skb->tail);
335 }
336
337 /* Return number of bytes available at end of buffer. */
338 static inline unsigned int
skb_tailroom(struct sk_buff * skb)339 skb_tailroom(struct sk_buff *skb)
340 {
341
342 SKB_TRACE(skb);
343 KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
344 "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
345 return (skb->end - skb->tail);
346 }
347
348 /* Return numer of bytes available at the beginning of buffer. */
349 static inline unsigned int
skb_headroom(struct sk_buff * skb)350 skb_headroom(struct sk_buff *skb)
351 {
352 SKB_TRACE(skb);
353 KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
354 "data %p head %p\n", __func__, skb, skb->data, skb->head));
355 return (skb->data - skb->head);
356 }
357
358
359 /*
360 * Remove tailroom; return the old tail pointer; basically make space at
361 * the end to copy data in (manually). See also skb_put_data() below.
362 */
363 static inline void *
__skb_put(struct sk_buff * skb,size_t len)364 __skb_put(struct sk_buff *skb, size_t len)
365 {
366 void *s;
367
368 SKB_TRACE(skb);
369 KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
370 "len %zu) > end %p, head %p data %p len %u\n", __func__,
371 skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
372
373 s = skb_tail_pointer(skb);
374 if (len == 0)
375 return (s);
376 skb->tail += len;
377 skb->len += len;
378 #ifdef SKB_DEBUG
379 if (linuxkpi_debug_skb & DSKB_TRACEX)
380 printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
381 __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
382 s, len);
383 #endif
384 return (s);
385 }
386
387 static inline void *
skb_put(struct sk_buff * skb,size_t len)388 skb_put(struct sk_buff *skb, size_t len)
389 {
390
391 SKB_TRACE(skb);
392 return (__skb_put(skb, len));
393 }
394
395 /* skb_put() + copying data in. */
396 static inline void *
skb_put_data(struct sk_buff * skb,const void * buf,size_t len)397 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
398 {
399 void *s;
400
401 SKB_TRACE2(skb, buf);
402 s = skb_put(skb, len);
403 if (len == 0)
404 return (s);
405 memcpy(s, buf, len);
406 return (s);
407 }
408
409 /* skb_put() + filling with zeros. */
410 static inline void *
skb_put_zero(struct sk_buff * skb,size_t len)411 skb_put_zero(struct sk_buff *skb, size_t len)
412 {
413 void *s;
414
415 SKB_TRACE(skb);
416 s = skb_put(skb, len);
417 memset(s, '\0', len);
418 return (s);
419 }
420
421 /*
422 * Remove len bytes from beginning of data.
423 *
424 * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
425 * we return the advanced data pointer so we don't have to keep a temp, correct?
426 */
427 static inline void *
skb_pull(struct sk_buff * skb,size_t len)428 skb_pull(struct sk_buff *skb, size_t len)
429 {
430
431 SKB_TRACE(skb);
432 #if 0 /* Apparently this doesn't barf... */
433 KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
434 __func__, skb, skb->len, len, skb->data));
435 #endif
436 if (skb->len < len)
437 return (NULL);
438 skb->len -= len;
439 skb->data += len;
440 return (skb->data);
441 }
442
443 /* Reduce skb data to given length or do nothing if smaller already. */
444 static inline void
__skb_trim(struct sk_buff * skb,unsigned int len)445 __skb_trim(struct sk_buff *skb, unsigned int len)
446 {
447
448 SKB_TRACE(skb);
449 if (skb->len < len)
450 return;
451
452 skb->len = len;
453 skb->tail = skb->data + skb->len;
454 }
455
456 static inline void
skb_trim(struct sk_buff * skb,unsigned int len)457 skb_trim(struct sk_buff *skb, unsigned int len)
458 {
459
460 return (__skb_trim(skb, len));
461 }
462
463 static inline struct skb_shared_info *
skb_shinfo(struct sk_buff * skb)464 skb_shinfo(struct sk_buff *skb)
465 {
466
467 SKB_TRACE(skb);
468 return (skb->shinfo);
469 }
470
471 static inline void
skb_add_rx_frag(struct sk_buff * skb,int fragno,struct page * page,off_t offset,size_t size,unsigned int truesize)472 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
473 off_t offset, size_t size, unsigned int truesize)
474 {
475 struct skb_shared_info *shinfo;
476
477 SKB_TRACE(skb);
478 #ifdef SKB_DEBUG
479 if (linuxkpi_debug_skb & DSKB_TRACEX)
480 printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
481 "page %#jx offset %ju size %zu truesize %u\n", __func__,
482 skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
483 (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
484 size, truesize);
485 #endif
486
487 shinfo = skb_shinfo(skb);
488 KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
489 "fragno %d too big\n", __func__, skb, fragno));
490 shinfo->frags[fragno].page = page;
491 shinfo->frags[fragno].offset = offset;
492 shinfo->frags[fragno].size = size;
493 shinfo->nr_frags = fragno + 1;
494 skb->len += size;
495 skb->data_len += size;
496 skb->truesize += truesize;
497
498 /* XXX TODO EXTEND truesize? */
499 }
500
501 /* -------------------------------------------------------------------------- */
502
503 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
504 #define skb_queue_walk(_q, skb) \
505 for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q); \
506 (skb) = (skb)->next)
507
508 #define skb_queue_walk_safe(_q, skb, tmp) \
509 for ((skb) = (_q)->next, (tmp) = (skb)->next; \
510 (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
511
512 static inline bool
skb_queue_empty(struct sk_buff_head * q)513 skb_queue_empty(struct sk_buff_head *q)
514 {
515
516 SKB_TRACE(q);
517 return (q->qlen == 0);
518 }
519
520 static inline void
__skb_queue_head_init(struct sk_buff_head * q)521 __skb_queue_head_init(struct sk_buff_head *q)
522 {
523 SKB_TRACE(q);
524 q->prev = q->next = (struct sk_buff *)q;
525 q->qlen = 0;
526 }
527
528 static inline void
skb_queue_head_init(struct sk_buff_head * q)529 skb_queue_head_init(struct sk_buff_head *q)
530 {
531 SKB_TRACE(q);
532 return (__skb_queue_head_init(q));
533 }
534
535 static inline void
__skb_insert(struct sk_buff * new,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * q)536 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
537 struct sk_buff_head *q)
538 {
539
540 SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
541 new->prev = prev;
542 new->next = next;
543 ((struct sk_buff_head_l *)next)->prev = new;
544 ((struct sk_buff_head_l *)prev)->next = new;
545 q->qlen++;
546 }
547
548 static inline void
__skb_queue_after(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)549 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
550 struct sk_buff *new)
551 {
552
553 SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
554 __skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
555 }
556
557 static inline void
__skb_queue_before(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)558 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
559 struct sk_buff *new)
560 {
561
562 SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
563 __skb_insert(new, skb->prev, skb, q);
564 }
565
566 static inline void
__skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)567 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
568 {
569
570 SKB_TRACE2(q, new);
571 __skb_queue_before(q, (struct sk_buff *)q, new);
572 }
573
574 static inline void
skb_queue_tail(struct sk_buff_head * q,struct sk_buff * new)575 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
576 {
577 SKB_TRACE2(q, new);
578 return (__skb_queue_tail(q, new));
579 }
580
581 static inline struct sk_buff *
skb_peek(struct sk_buff_head * q)582 skb_peek(struct sk_buff_head *q)
583 {
584 struct sk_buff *skb;
585
586 skb = q->next;
587 SKB_TRACE2(q, skb);
588 if (skb == (struct sk_buff *)q)
589 return (NULL);
590 return (skb);
591 }
592
593 static inline struct sk_buff *
skb_peek_tail(struct sk_buff_head * q)594 skb_peek_tail(struct sk_buff_head *q)
595 {
596 struct sk_buff *skb;
597
598 skb = q->prev;
599 SKB_TRACE2(q, skb);
600 if (skb == (struct sk_buff *)q)
601 return (NULL);
602 return (skb);
603 }
604
605 static inline void
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * head)606 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
607 {
608 SKB_TRACE2(skb, head);
609 struct sk_buff *p, *n;;
610
611 head->qlen--;
612 p = skb->prev;
613 n = skb->next;
614 p->next = n;
615 n->prev = p;
616 skb->prev = skb->next = NULL;
617 }
618
619 static inline void
skb_unlink(struct sk_buff * skb,struct sk_buff_head * head)620 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
621 {
622 SKB_TRACE2(skb, head);
623 return (__skb_unlink(skb, head));
624 }
625
626 static inline struct sk_buff *
__skb_dequeue(struct sk_buff_head * q)627 __skb_dequeue(struct sk_buff_head *q)
628 {
629 struct sk_buff *skb;
630
631 SKB_TRACE(q);
632 skb = q->next;
633 if (skb == (struct sk_buff *)q)
634 return (NULL);
635 if (skb != NULL)
636 __skb_unlink(skb, q);
637 SKB_TRACE(skb);
638 return (skb);
639 }
640
641 static inline struct sk_buff *
skb_dequeue(struct sk_buff_head * q)642 skb_dequeue(struct sk_buff_head *q)
643 {
644 SKB_TRACE(q);
645 return (__skb_dequeue(q));
646 }
647
648 static inline struct sk_buff *
skb_dequeue_tail(struct sk_buff_head * q)649 skb_dequeue_tail(struct sk_buff_head *q)
650 {
651 struct sk_buff *skb;
652
653 skb = skb_peek_tail(q);
654 if (skb != NULL)
655 __skb_unlink(skb, q);
656
657 SKB_TRACE2(q, skb);
658 return (skb);
659 }
660
661 static inline void
__skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)662 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
663 {
664
665 SKB_TRACE2(q, skb);
666 __skb_queue_after(q, (struct sk_buff *)q, skb);
667 }
668
669 static inline void
skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)670 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
671 {
672
673 SKB_TRACE2(q, skb);
674 __skb_queue_after(q, (struct sk_buff *)q, skb);
675 }
676
677 static inline uint32_t
skb_queue_len(struct sk_buff_head * head)678 skb_queue_len(struct sk_buff_head *head)
679 {
680
681 SKB_TRACE(head);
682 return (head->qlen);
683 }
684
685 static inline uint32_t
skb_queue_len_lockless(const struct sk_buff_head * head)686 skb_queue_len_lockless(const struct sk_buff_head *head)
687 {
688
689 SKB_TRACE(head);
690 return (READ_ONCE(head->qlen));
691 }
692
693 static inline void
__skb_queue_purge(struct sk_buff_head * q)694 __skb_queue_purge(struct sk_buff_head *q)
695 {
696 struct sk_buff *skb;
697
698 SKB_TRACE(q);
699 while ((skb = __skb_dequeue(q)) != NULL)
700 kfree_skb(skb);
701 }
702
703 static inline void
skb_queue_purge(struct sk_buff_head * q)704 skb_queue_purge(struct sk_buff_head *q)
705 {
706 SKB_TRACE(q);
707 return (__skb_queue_purge(q));
708 }
709
710 static inline struct sk_buff *
skb_queue_prev(struct sk_buff_head * q,struct sk_buff * skb)711 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
712 {
713
714 SKB_TRACE2(q, skb);
715 /* XXX what is the q argument good for? */
716 return (skb->prev);
717 }
718
719 /* -------------------------------------------------------------------------- */
720
721 static inline struct sk_buff *
skb_copy(struct sk_buff * skb,gfp_t gfp)722 skb_copy(struct sk_buff *skb, gfp_t gfp)
723 {
724 struct sk_buff *new;
725
726 new = linuxkpi_skb_copy(skb, gfp);
727 SKB_TRACE2(skb, new);
728 return (new);
729 }
730
731 static inline void
consume_skb(struct sk_buff * skb)732 consume_skb(struct sk_buff *skb)
733 {
734 SKB_TRACE(skb);
735 SKB_TODO();
736 }
737
738 static inline uint16_t
skb_checksum(struct sk_buff * skb,int offs,size_t len,int x)739 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
740 {
741 SKB_TRACE(skb);
742 SKB_TODO();
743 return (0xffff);
744 }
745
746 static inline int
skb_checksum_start_offset(struct sk_buff * skb)747 skb_checksum_start_offset(struct sk_buff *skb)
748 {
749 SKB_TRACE(skb);
750 SKB_TODO();
751 return (-1);
752 }
753
754 static inline dma_addr_t
skb_frag_dma_map(struct device * dev,const skb_frag_t * frag,int x,size_t fragsz,enum dma_data_direction dir)755 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
756 size_t fragsz, enum dma_data_direction dir)
757 {
758 SKB_TRACE2(frag, dev);
759 SKB_TODO();
760 return (-1);
761 }
762
763 static inline size_t
skb_frag_size(const skb_frag_t * frag)764 skb_frag_size(const skb_frag_t *frag)
765 {
766 SKB_TRACE(frag);
767 SKB_TODO();
768 return (-1);
769 }
770
771 #define skb_walk_frags(_skb, _frag) \
772 for ((_frag) = (_skb); false; (_frag)++)
773
774 static inline void
skb_checksum_help(struct sk_buff * skb)775 skb_checksum_help(struct sk_buff *skb)
776 {
777 SKB_TRACE(skb);
778 SKB_TODO();
779 }
780
781 static inline bool
skb_ensure_writable(struct sk_buff * skb,size_t off)782 skb_ensure_writable(struct sk_buff *skb, size_t off)
783 {
784 SKB_TRACE(skb);
785 SKB_TODO();
786 return (false);
787 }
788
789 static inline void *
skb_frag_address(const skb_frag_t * frag)790 skb_frag_address(const skb_frag_t *frag)
791 {
792 SKB_TRACE(frag);
793 SKB_TODO();
794 return (NULL);
795 }
796
797 static inline void
skb_free_frag(void * frag)798 skb_free_frag(void *frag)
799 {
800
801 page_frag_free(frag);
802 }
803
804 static inline struct sk_buff *
skb_gso_segment(struct sk_buff * skb,netdev_features_t netdev_flags)805 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
806 {
807 SKB_TRACE(skb);
808 SKB_TODO();
809 return (NULL);
810 }
811
812 static inline bool
skb_is_gso(struct sk_buff * skb)813 skb_is_gso(struct sk_buff *skb)
814 {
815 SKB_TRACE(skb);
816 SKB_IMPROVE("Really a TODO but get it away from logging");
817 return (false);
818 }
819
820 static inline void
skb_mark_not_on_list(struct sk_buff * skb)821 skb_mark_not_on_list(struct sk_buff *skb)
822 {
823 SKB_TRACE(skb);
824 SKB_TODO();
825 }
826
827 static inline void
___skb_queue_splice(const struct sk_buff_head * from,struct sk_buff * p,struct sk_buff * n)828 ___skb_queue_splice(const struct sk_buff_head *from,
829 struct sk_buff *p, struct sk_buff *n)
830 {
831 struct sk_buff *b, *e;
832
833 b = from->next;
834 e = from->prev;
835
836 b->prev = p;
837 ((struct sk_buff_head_l *)p)->next = b;
838 e->next = n;
839 ((struct sk_buff_head_l *)n)->prev = e;
840 }
841
842 static inline void
skb_queue_splice_init(struct sk_buff_head * from,struct sk_buff_head * to)843 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
844 {
845
846 SKB_TRACE2(from, to);
847
848 if (skb_queue_empty(from))
849 return;
850
851 ___skb_queue_splice(from, (struct sk_buff *)to, to->next);
852 to->qlen += from->qlen;
853 __skb_queue_head_init(from);
854 }
855
856 static inline void
skb_queue_splice_tail_init(struct sk_buff_head * from,struct sk_buff_head * to)857 skb_queue_splice_tail_init(struct sk_buff_head *from, struct sk_buff_head *to)
858 {
859
860 SKB_TRACE2(from, to);
861
862 if (skb_queue_empty(from))
863 return;
864
865 ___skb_queue_splice(from, to->prev, (struct sk_buff *)to);
866 to->qlen += from->qlen;
867 __skb_queue_head_init(from);
868 }
869
870 static inline void
skb_reset_transport_header(struct sk_buff * skb)871 skb_reset_transport_header(struct sk_buff *skb)
872 {
873
874 SKB_TRACE(skb);
875 skb->l4hdroff = skb->data - skb->head;
876 }
877
878 static inline uint8_t *
skb_transport_header(struct sk_buff * skb)879 skb_transport_header(struct sk_buff *skb)
880 {
881
882 SKB_TRACE(skb);
883 return (skb->head + skb->l4hdroff);
884 }
885
886 static inline uint8_t *
skb_network_header(struct sk_buff * skb)887 skb_network_header(struct sk_buff *skb)
888 {
889
890 SKB_TRACE(skb);
891 return (skb->head + skb->l3hdroff);
892 }
893
894 static inline bool
skb_is_nonlinear(struct sk_buff * skb)895 skb_is_nonlinear(struct sk_buff *skb)
896 {
897 SKB_TRACE(skb);
898 return ((skb->data_len > 0) ? true : false);
899 }
900
901 static inline int
__skb_linearize(struct sk_buff * skb)902 __skb_linearize(struct sk_buff *skb)
903 {
904 SKB_TRACE(skb);
905 SKB_TODO();
906 return (ENXIO);
907 }
908
909 static inline int
skb_linearize(struct sk_buff * skb)910 skb_linearize(struct sk_buff *skb)
911 {
912
913 return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
914 }
915
916 static inline int
pskb_expand_head(struct sk_buff * skb,int x,int len,gfp_t gfp)917 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
918 {
919 SKB_TRACE(skb);
920 SKB_TODO();
921 return (-ENXIO);
922 }
923
924 /* Not really seen this one but need it as symmetric accessor function. */
925 static inline void
skb_set_queue_mapping(struct sk_buff * skb,uint16_t qmap)926 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
927 {
928
929 SKB_TRACE_FMT(skb, "qmap %u", qmap);
930 skb->qmap = qmap;
931 }
932
933 static inline uint16_t
skb_get_queue_mapping(struct sk_buff * skb)934 skb_get_queue_mapping(struct sk_buff *skb)
935 {
936
937 SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
938 return (skb->qmap);
939 }
940
941 static inline bool
skb_header_cloned(struct sk_buff * skb)942 skb_header_cloned(struct sk_buff *skb)
943 {
944 SKB_TRACE(skb);
945 SKB_TODO();
946 return (false);
947 }
948
949 static inline uint8_t *
skb_mac_header(const struct sk_buff * skb)950 skb_mac_header(const struct sk_buff *skb)
951 {
952 SKB_TRACE(skb);
953 return (skb->head + skb->mac_header);
954 }
955
956 static inline void
skb_reset_mac_header(struct sk_buff * skb)957 skb_reset_mac_header(struct sk_buff *skb)
958 {
959 SKB_TRACE(skb);
960 skb->mac_header = skb->data - skb->head;
961 }
962
963 static inline void
skb_set_mac_header(struct sk_buff * skb,const size_t len)964 skb_set_mac_header(struct sk_buff *skb, const size_t len)
965 {
966 SKB_TRACE(skb);
967 skb_reset_mac_header(skb);
968 skb->mac_header += len;
969 }
970
971 static inline struct skb_shared_hwtstamps *
skb_hwtstamps(struct sk_buff * skb)972 skb_hwtstamps(struct sk_buff *skb)
973 {
974 SKB_TRACE(skb);
975 SKB_TODO();
976 return (NULL);
977 }
978
979 static inline void
skb_orphan(struct sk_buff * skb)980 skb_orphan(struct sk_buff *skb)
981 {
982 SKB_TRACE(skb);
983 SKB_TODO();
984 }
985
986 static inline __sum16
csum_unfold(__sum16 sum)987 csum_unfold(__sum16 sum)
988 {
989 SKB_TODO();
990 return (sum);
991 }
992
993 static __inline void
skb_postpush_rcsum(struct sk_buff * skb,const void * data,size_t len)994 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
995 {
996 SKB_TODO();
997 }
998
999 static inline void
skb_reset_tail_pointer(struct sk_buff * skb)1000 skb_reset_tail_pointer(struct sk_buff *skb)
1001 {
1002
1003 SKB_TRACE(skb);
1004 #ifdef SKB_DOING_OFFSETS_US_NOT
1005 skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
1006 #endif
1007 skb->tail = skb->data;
1008 SKB_TRACE(skb);
1009 }
1010
1011 static inline struct sk_buff *
skb_get(struct sk_buff * skb)1012 skb_get(struct sk_buff *skb)
1013 {
1014
1015 SKB_TODO(); /* XXX refcnt? as in get/put_device? */
1016 return (skb);
1017 }
1018
1019 static inline struct sk_buff *
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)1020 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1021 {
1022
1023 SKB_TODO();
1024 return (NULL);
1025 }
1026
1027 static inline void
skb_copy_from_linear_data(const struct sk_buff * skb,void * dst,size_t len)1028 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
1029 {
1030
1031 SKB_TRACE(skb);
1032 /* Let us just hope the destination has len space ... */
1033 memcpy(dst, skb->data, len);
1034 }
1035
1036 static inline int
skb_pad(struct sk_buff * skb,int pad)1037 skb_pad(struct sk_buff *skb, int pad)
1038 {
1039
1040 SKB_TRACE(skb);
1041 SKB_TODO();
1042 return (-1);
1043 }
1044
1045 static inline void
skb_list_del_init(struct sk_buff * skb)1046 skb_list_del_init(struct sk_buff *skb)
1047 {
1048
1049 SKB_TRACE(skb);
1050 SKB_TODO();
1051 }
1052
1053 static inline void
napi_consume_skb(struct sk_buff * skb,int budget)1054 napi_consume_skb(struct sk_buff *skb, int budget)
1055 {
1056
1057 SKB_TRACE(skb);
1058 SKB_TODO();
1059 }
1060
1061 static inline struct sk_buff *
napi_build_skb(void * data,size_t len)1062 napi_build_skb(void *data, size_t len)
1063 {
1064
1065 SKB_TODO();
1066 return (NULL);
1067 }
1068
1069 static inline uint32_t
skb_get_hash(struct sk_buff * skb)1070 skb_get_hash(struct sk_buff *skb)
1071 {
1072 SKB_TRACE(skb);
1073 SKB_TODO();
1074 return (0);
1075 }
1076
1077 static inline void
skb_mark_for_recycle(struct sk_buff * skb)1078 skb_mark_for_recycle(struct sk_buff *skb)
1079 {
1080 SKB_TRACE(skb);
1081 SKB_TODO();
1082 }
1083
1084 static inline int
skb_cow_head(struct sk_buff * skb,unsigned int headroom)1085 skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1086 {
1087 SKB_TRACE(skb);
1088 SKB_TODO();
1089 return (-1);
1090 }
1091
1092 #define SKB_WITH_OVERHEAD(_s) \
1093 (_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1094
1095 #endif /* _LINUXKPI_LINUX_SKBUFF_H */
1096