1 /*-
2  * Copyright (c) 2020-2022 The FreeBSD Foundation
3  * Copyright (c) 2021-2022 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34  *       Do not rely on the internals of this implementation.  They are highly
35  *       likely to change as we will improve the integration to FreeBSD mbufs.
36  */
37 
38 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
39 #define	_LINUXKPI_LINUX_SKBUFF_H
40 
41 #include <linux/kernel.h>
42 #include <linux/page.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/netdev_features.h>
45 #include <linux/list.h>
46 #include <linux/gfp.h>
47 #include <linux/compiler.h>
48 #include <linux/spinlock.h>
49 
50 /* #define	SKB_DEBUG */
51 #ifdef SKB_DEBUG
52 #define	DSKB_TODO	0x01
53 #define	DSKB_IMPROVE	0x02
54 #define	DSKB_TRACE	0x10
55 #define	DSKB_TRACEX	0x20
56 extern int linuxkpi_debug_skb;
57 
58 #define	SKB_TODO()							\
59     if (linuxkpi_debug_skb & DSKB_TODO)					\
60 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
61 #define	SKB_IMPROVE(...)						\
62     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
63 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
64 #define	SKB_TRACE(_s)							\
65     if (linuxkpi_debug_skb & DSKB_TRACE)				\
66 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
67 #define	SKB_TRACE2(_s, _p)						\
68     if (linuxkpi_debug_skb & DSKB_TRACE)				\
69 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
70 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
71    if (linuxkpi_debug_skb & DSKB_TRACE)					\
72 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
73 	    __VA_ARGS__)
74 #else
75 #define	SKB_TODO()		do { } while(0)
76 #define	SKB_IMPROVE(...)	do { } while(0)
77 #define	SKB_TRACE(_s)		do { } while(0)
78 #define	SKB_TRACE2(_s, _p)	do { } while(0)
79 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
80 #endif
81 
82 enum sk_buff_pkt_type {
83 	PACKET_BROADCAST,
84 	PACKET_MULTICAST,
85 	PACKET_OTHERHOST,
86 };
87 
88 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
89 
90 struct sk_buff_head {
91 		/* XXX TODO */
92 	union {
93 		struct {
94 			struct sk_buff		*next;
95 			struct sk_buff		*prev;
96 		};
97 		struct sk_buff_head_l {
98 			struct sk_buff		*next;
99 			struct sk_buff		*prev;
100 		} list;
101 	};
102 	size_t			qlen;
103 	spinlock_t		lock;
104 };
105 
106 enum sk_checksum_flags {
107 	CHECKSUM_NONE			= 0x00,
108 	CHECKSUM_UNNECESSARY		= 0x01,
109 	CHECKSUM_PARTIAL		= 0x02,
110 	CHECKSUM_COMPLETE		= 0x04,
111 };
112 
113 struct skb_frag {
114 		/* XXX TODO */
115 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
116 	off_t			offset;
117 	size_t			size;
118 };
119 typedef	struct skb_frag	skb_frag_t;
120 
121 enum skb_shared_info_gso_type {
122 	SKB_GSO_TCPV4,
123 	SKB_GSO_TCPV6,
124 };
125 
126 struct skb_shared_info {
127 	enum skb_shared_info_gso_type	gso_type;
128 	uint16_t			gso_size;
129 	uint16_t			nr_frags;
130 	struct sk_buff			*frag_list;
131 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
132 };
133 
134 struct sk_buff {
135 	/* XXX TODO */
136 	union {
137 		/* struct sk_buff_head */
138 		struct {
139 			struct sk_buff		*next;
140 			struct sk_buff		*prev;
141 		};
142 		struct list_head	list;
143 	};
144 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
145 	uint32_t		len;		/* ? */
146 	uint32_t		data_len;	/* ? If we have frags? */
147 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
148 	uint16_t		mac_len;	/* Link-layer header length. */
149 	__sum16			csum;
150 	uint16_t		l3hdroff;	/* network header offset from *head */
151 	uint16_t		l4hdroff;	/* transport header offset from *head */
152 	uint32_t		priority;
153 	uint16_t		qmap;		/* queue mapping */
154 	uint16_t		_flags;		/* Internal flags. */
155 #define	_SKB_FLAGS_SKBEXTFRAG	0x0001
156 	enum sk_buff_pkt_type	pkt_type;
157 
158 	/* "Scratch" area for layers to store metadata. */
159 	/* ??? I see sizeof() operations so probably an array. */
160 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
161 
162 	struct net_device	*dev;
163 	void			*sk;		/* XXX net/sock.h? */
164 
165 	int		csum_offset, csum_start, ip_summed, protocol;
166 
167 	uint8_t			*head;			/* Head of buffer. */
168 	uint8_t			*data;			/* Head of data. */
169 	uint8_t			*tail;			/* End of data. */
170 	uint8_t			*end;			/* End of buffer. */
171 
172 	struct skb_shared_info	*shinfo;
173 
174 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
175 	void			*m;
176 	void(*m_free_func)(void *);
177 
178 	/* Force padding to CACHE_LINE_SIZE. */
179 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
180 };
181 
182 /* -------------------------------------------------------------------------- */
183 
184 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
185 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
186 struct sk_buff *linuxkpi_build_skb(void *, size_t);
187 void linuxkpi_kfree_skb(struct sk_buff *);
188 
189 struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t);
190 
191 /* -------------------------------------------------------------------------- */
192 
193 static inline struct sk_buff *
194 alloc_skb(size_t size, gfp_t gfp)
195 {
196 	struct sk_buff *skb;
197 
198 	skb = linuxkpi_alloc_skb(size, gfp);
199 	SKB_TRACE(skb);
200 	return (skb);
201 }
202 
203 static inline struct sk_buff *
204 __dev_alloc_skb(size_t len, gfp_t gfp)
205 {
206 	struct sk_buff *skb;
207 
208 	skb = linuxkpi_dev_alloc_skb(len, gfp);
209 	SKB_IMPROVE();
210 	SKB_TRACE(skb);
211 	return (skb);
212 }
213 
214 static inline struct sk_buff *
215 dev_alloc_skb(size_t len)
216 {
217 	struct sk_buff *skb;
218 
219 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
220 	SKB_IMPROVE();
221 	SKB_TRACE(skb);
222 	return (skb);
223 }
224 
225 static inline void
226 kfree_skb(struct sk_buff *skb)
227 {
228 	SKB_TRACE(skb);
229 	linuxkpi_kfree_skb(skb);
230 }
231 
232 static inline void
233 dev_kfree_skb(struct sk_buff *skb)
234 {
235 	SKB_TRACE(skb);
236 	kfree_skb(skb);
237 }
238 
239 static inline void
240 dev_kfree_skb_any(struct sk_buff *skb)
241 {
242 	SKB_TRACE(skb);
243 	dev_kfree_skb(skb);
244 }
245 
246 static inline void
247 dev_kfree_skb_irq(struct sk_buff *skb)
248 {
249 	SKB_TRACE(skb);
250 	SKB_IMPROVE("Do we have to defer this?");
251 	dev_kfree_skb(skb);
252 }
253 
254 static inline struct sk_buff *
255 build_skb(void *data, unsigned int fragsz)
256 {
257 	struct sk_buff *skb;
258 
259 	skb = linuxkpi_build_skb(data, fragsz);
260 	SKB_TRACE(skb);
261 	return (skb);
262 }
263 
264 /* -------------------------------------------------------------------------- */
265 
266 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
267 #define	skb_list_walk_safe(_q, skb, tmp)				\
268 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
269 
270 /* Add headroom; cannot do once there is data in there. */
271 static inline void
272 skb_reserve(struct sk_buff *skb, size_t len)
273 {
274 	SKB_TRACE(skb);
275 #if 0
276 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
277 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
278 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
279 #else
280 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
281 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
282 	    skb->head, skb->data, skb->tail, skb->len));
283 #endif
284 	skb->data += len;
285 	skb->tail += len;
286 }
287 
288 /*
289  * Remove headroom; return new data pointer; basically make space at the
290  * front to copy data in (manually).
291  */
292 static inline void *
293 __skb_push(struct sk_buff *skb, size_t len)
294 {
295 	SKB_TRACE(skb);
296 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
297 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
298 	skb->len  += len;
299 	skb->data -= len;
300 	return (skb->data);
301 }
302 
303 static inline void *
304 skb_push(struct sk_buff *skb, size_t len)
305 {
306 
307 	SKB_TRACE(skb);
308 	return (__skb_push(skb, len));
309 }
310 
311 /*
312  * Length of the data on the skb (without any frags)???
313  */
314 static inline size_t
315 skb_headlen(struct sk_buff *skb)
316 {
317 
318 	SKB_TRACE(skb);
319 	return (skb->len - skb->data_len);
320 }
321 
322 
323 /* Return the end of data (tail pointer). */
324 static inline uint8_t *
325 skb_tail_pointer(struct sk_buff *skb)
326 {
327 
328 	SKB_TRACE(skb);
329 	return (skb->tail);
330 }
331 
332 /* Return number of bytes available at end of buffer. */
333 static inline unsigned int
334 skb_tailroom(struct sk_buff *skb)
335 {
336 
337 	SKB_TRACE(skb);
338 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
339 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
340 	return (skb->end - skb->tail);
341 }
342 
343 /* Return numer of bytes available at the beginning of buffer. */
344 static inline unsigned int
345 skb_headroom(struct sk_buff *skb)
346 {
347 	SKB_TRACE(skb);
348 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
349 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
350 	return (skb->data - skb->head);
351 }
352 
353 
354 /*
355  * Remove tailroom; return the old tail pointer; basically make space at
356  * the end to copy data in (manually).  See also skb_put_data() below.
357  */
358 static inline void *
359 __skb_put(struct sk_buff *skb, size_t len)
360 {
361 	void *s;
362 
363 	SKB_TRACE(skb);
364 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
365 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
366 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
367 
368 	s = skb_tail_pointer(skb);
369 	if (len == 0)
370 		return (s);
371 	skb->tail += len;
372 	skb->len += len;
373 #ifdef SKB_DEBUG
374 	if (linuxkpi_debug_skb & DSKB_TRACEX)
375 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
376 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
377 	    s, len);
378 #endif
379 	return (s);
380 }
381 
382 static inline void *
383 skb_put(struct sk_buff *skb, size_t len)
384 {
385 
386 	SKB_TRACE(skb);
387 	return (__skb_put(skb, len));
388 }
389 
390 /* skb_put() + copying data in. */
391 static inline void *
392 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
393 {
394 	void *s;
395 
396 	SKB_TRACE2(skb, buf);
397 	s = skb_put(skb, len);
398 	if (len == 0)
399 		return (s);
400 	memcpy(s, buf, len);
401 	return (s);
402 }
403 
404 /* skb_put() + filling with zeros. */
405 static inline void *
406 skb_put_zero(struct sk_buff *skb, size_t len)
407 {
408 	void *s;
409 
410 	SKB_TRACE(skb);
411 	s = skb_put(skb, len);
412 	memset(s, '\0', len);
413 	return (s);
414 }
415 
416 /*
417  * Remove len bytes from beginning of data.
418  *
419  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
420  * we return the advanced data pointer so we don't have to keep a temp, correct?
421  */
422 static inline void *
423 skb_pull(struct sk_buff *skb, size_t len)
424 {
425 
426 	SKB_TRACE(skb);
427 #if 0	/* Apparently this doesn't barf... */
428 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
429 	    __func__, skb, skb->len, len, skb->data));
430 #endif
431 	if (skb->len < len)
432 		return (NULL);
433 	skb->len -= len;
434 	skb->data += len;
435 	return (skb->data);
436 }
437 
438 /* Reduce skb data to given length or do nothing if smaller already. */
439 static inline void
440 __skb_trim(struct sk_buff *skb, unsigned int len)
441 {
442 
443 	SKB_TRACE(skb);
444 	if (skb->len < len)
445 		return;
446 
447 	skb->len = len;
448 	skb->tail = skb->data + skb->len;
449 }
450 
451 static inline void
452 skb_trim(struct sk_buff *skb, unsigned int len)
453 {
454 
455 	return (__skb_trim(skb, len));
456 }
457 
458 static inline struct skb_shared_info *
459 skb_shinfo(struct sk_buff *skb)
460 {
461 
462 	SKB_TRACE(skb);
463 	return (skb->shinfo);
464 }
465 
466 static inline void
467 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
468     off_t offset, size_t size, unsigned int truesize)
469 {
470 	struct skb_shared_info *shinfo;
471 
472 	SKB_TRACE(skb);
473 #ifdef SKB_DEBUG
474 	if (linuxkpi_debug_skb & DSKB_TRACEX)
475 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
476 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
477 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
478 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
479 	    size, truesize);
480 #endif
481 
482 	shinfo = skb_shinfo(skb);
483 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
484 	    "fragno %d too big\n", __func__, skb, fragno));
485 	shinfo->frags[fragno].page = page;
486 	shinfo->frags[fragno].offset = offset;
487 	shinfo->frags[fragno].size = size;
488 	shinfo->nr_frags = fragno + 1;
489         skb->len += size;
490 	skb->data_len += size;
491         skb->truesize += truesize;
492 
493 	/* XXX TODO EXTEND truesize? */
494 }
495 
496 /* -------------------------------------------------------------------------- */
497 
498 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
499 #define	skb_queue_walk(_q, skb)						\
500 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
501 	    (skb) = (skb)->next)
502 
503 #define	skb_queue_walk_safe(_q, skb, tmp)				\
504 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
505 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
506 
507 static inline bool
508 skb_queue_empty(struct sk_buff_head *q)
509 {
510 
511 	SKB_TRACE(q);
512 	return (q->qlen == 0);
513 }
514 
515 static inline void
516 __skb_queue_head_init(struct sk_buff_head *q)
517 {
518 	SKB_TRACE(q);
519 	q->prev = q->next = (struct sk_buff *)q;
520 	q->qlen = 0;
521 }
522 
523 static inline void
524 skb_queue_head_init(struct sk_buff_head *q)
525 {
526 	SKB_TRACE(q);
527 	return (__skb_queue_head_init(q));
528 }
529 
530 static inline void
531 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
532     struct sk_buff_head *q)
533 {
534 
535 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
536 	new->prev = prev;
537 	new->next = next;
538 	((struct sk_buff_head_l *)next)->prev = new;
539 	((struct sk_buff_head_l *)prev)->next = new;
540 	q->qlen++;
541 }
542 
543 static inline void
544 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
545     struct sk_buff *new)
546 {
547 
548 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
549 	__skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q);
550 }
551 
552 static inline void
553 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
554     struct sk_buff *new)
555 {
556 
557 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
558 	__skb_insert(new, skb->prev, skb, q);
559 }
560 
561 static inline void
562 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
563 {
564 
565 	SKB_TRACE2(q, new);
566 	__skb_queue_after(q, (struct sk_buff *)q, new);
567 }
568 
569 static inline void
570 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new)
571 {
572 	SKB_TRACE2(q, skb);
573 	return (__skb_queue_tail(q, new));
574 }
575 
576 static inline struct sk_buff *
577 skb_peek(struct sk_buff_head *q)
578 {
579 	struct sk_buff *skb;
580 
581 	skb = q->next;
582 	SKB_TRACE2(q, skb);
583 	if (skb == (struct sk_buff *)q)
584 		return (NULL);
585 	return (skb);
586 }
587 
588 static inline struct sk_buff *
589 skb_peek_tail(struct sk_buff_head *q)
590 {
591 	struct sk_buff *skb;
592 
593 	skb = q->prev;
594 	SKB_TRACE2(q, skb);
595 	if (skb == (struct sk_buff *)q)
596 		return (NULL);
597 	return (skb);
598 }
599 
600 static inline void
601 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
602 {
603 	SKB_TRACE2(skb, head);
604 	struct sk_buff *p, *n;;
605 
606 	head->qlen--;
607 	p = skb->prev;
608 	n = skb->next;
609 	p->next = n;
610 	n->prev = p;
611 	skb->prev = skb->next = NULL;
612 }
613 
614 static inline void
615 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
616 {
617 	SKB_TRACE2(skb, head);
618 	return (__skb_unlink(skb, head));
619 }
620 
621 static inline struct sk_buff *
622 __skb_dequeue(struct sk_buff_head *q)
623 {
624 	struct sk_buff *skb;
625 
626 	SKB_TRACE(q);
627 	skb = q->next;
628 	if (skb == (struct sk_buff *)q)
629 		return (NULL);
630 	if (skb != NULL)
631 		__skb_unlink(skb, q);
632 	SKB_TRACE(skb);
633 	return (skb);
634 }
635 
636 static inline struct sk_buff *
637 skb_dequeue(struct sk_buff_head *q)
638 {
639 	SKB_TRACE(q);
640 	return (__skb_dequeue(q));
641 }
642 
643 static inline struct sk_buff *
644 skb_dequeue_tail(struct sk_buff_head *q)
645 {
646 	struct sk_buff *skb;
647 
648 	skb = skb_peek_tail(q);
649 	if (skb != NULL)
650 		__skb_unlink(skb, q);
651 
652 	SKB_TRACE2(q, skb);
653 	return (skb);
654 }
655 
656 static inline void
657 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
658 {
659 
660 	SKB_TRACE2(q, skb);
661 	__skb_queue_after(q, (struct sk_buff *)q, skb);
662 }
663 
664 static inline void
665 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
666 {
667 
668 	SKB_TRACE2(q, skb);
669 	__skb_queue_after(q, (struct sk_buff *)q, skb);
670 }
671 
672 static inline uint32_t
673 skb_queue_len(struct sk_buff_head *head)
674 {
675 
676 	SKB_TRACE(head);
677 	return (head->qlen);
678 }
679 
680 static inline uint32_t
681 skb_queue_len_lockless(const struct sk_buff_head *head)
682 {
683 
684 	SKB_TRACE(head);
685 	return (READ_ONCE(head->qlen));
686 }
687 
688 static inline void
689 __skb_queue_purge(struct sk_buff_head *q)
690 {
691 	struct sk_buff *skb;
692 
693 	SKB_TRACE(q);
694         while ((skb = __skb_dequeue(q)) != NULL)
695 		kfree_skb(skb);
696 }
697 
698 static inline void
699 skb_queue_purge(struct sk_buff_head *q)
700 {
701 	SKB_TRACE(q);
702 	return (__skb_queue_purge(q));
703 }
704 
705 static inline struct sk_buff *
706 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
707 {
708 
709 	SKB_TRACE2(q, skb);
710 	/* XXX what is the q argument good for? */
711 	return (skb->prev);
712 }
713 
714 /* -------------------------------------------------------------------------- */
715 
716 static inline struct sk_buff *
717 skb_copy(struct sk_buff *skb, gfp_t gfp)
718 {
719 	struct sk_buff *new;
720 
721 	new = linuxkpi_skb_copy(skb, gfp);
722 	SKB_TRACE2(skb, new);
723 	return (new);
724 }
725 
726 static inline void
727 consume_skb(struct sk_buff *skb)
728 {
729 	SKB_TRACE(skb);
730 	SKB_TODO();
731 }
732 
733 static inline uint16_t
734 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
735 {
736 	SKB_TRACE(skb);
737 	SKB_TODO();
738 	return (0xffff);
739 }
740 
741 static inline int
742 skb_checksum_start_offset(struct sk_buff *skb)
743 {
744 	SKB_TRACE(skb);
745 	SKB_TODO();
746 	return (-1);
747 }
748 
749 static inline dma_addr_t
750 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
751     size_t fragsz, enum dma_data_direction dir)
752 {
753 	SKB_TRACE2(frag, dev);
754 	SKB_TODO();
755 	return (-1);
756 }
757 
758 static inline size_t
759 skb_frag_size(const skb_frag_t *frag)
760 {
761 	SKB_TRACE(frag);
762 	SKB_TODO();
763 	return (-1);
764 }
765 
766 #define	skb_walk_frags(_skb, _frag)					\
767 	for ((_frag) = (_skb); false; (_frag)++)
768 
769 static inline void
770 skb_checksum_help(struct sk_buff *skb)
771 {
772 	SKB_TRACE(skb);
773 	SKB_TODO();
774 }
775 
776 static inline bool
777 skb_ensure_writable(struct sk_buff *skb, size_t off)
778 {
779 	SKB_TRACE(skb);
780 	SKB_TODO();
781 	return (false);
782 }
783 
784 static inline void *
785 skb_frag_address(const skb_frag_t *frag)
786 {
787 	SKB_TRACE(frag);
788 	SKB_TODO();
789 	return (NULL);
790 }
791 
792 static inline void
793 skb_free_frag(void *frag)
794 {
795 
796 	page_frag_free(frag);
797 }
798 
799 static inline struct sk_buff *
800 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
801 {
802 	SKB_TRACE(skb);
803 	SKB_TODO();
804 	return (NULL);
805 }
806 
807 static inline bool
808 skb_is_gso(struct sk_buff *skb)
809 {
810 	SKB_TRACE(skb);
811 	SKB_IMPROVE("Really a TODO but get it away from logging");
812 	return (false);
813 }
814 
815 static inline void
816 skb_mark_not_on_list(struct sk_buff *skb)
817 {
818 	SKB_TRACE(skb);
819 	SKB_TODO();
820 }
821 
822 static inline void
823 ___skb_queue_splice_init(const struct sk_buff_head *from,
824     struct sk_buff *p, struct sk_buff *n)
825 {
826 	struct sk_buff *b, *e;
827 
828 	b = from->next;
829 	e = from->prev;
830 
831 	b->prev = p;
832 	((struct sk_buff_head_l *)p)->next = b;
833 	e->next = n;
834 	((struct sk_buff_head_l *)n)->prev = e;
835 }
836 
837 static inline void
838 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
839 {
840 
841 	SKB_TRACE2(from, to);
842 
843 	if (skb_queue_empty(from))
844 		return;
845 
846 	___skb_queue_splice_init(from, (struct sk_buff *)to, to->next);
847 	to->qlen += from->qlen;
848 	__skb_queue_head_init(from);
849 }
850 
851 static inline void
852 skb_reset_transport_header(struct sk_buff *skb)
853 {
854 
855 	SKB_TRACE(skb);
856 	skb->l4hdroff = skb->data - skb->head;
857 }
858 
859 static inline uint8_t *
860 skb_transport_header(struct sk_buff *skb)
861 {
862 
863 	SKB_TRACE(skb);
864         return (skb->head + skb->l4hdroff);
865 }
866 
867 static inline uint8_t *
868 skb_network_header(struct sk_buff *skb)
869 {
870 
871 	SKB_TRACE(skb);
872         return (skb->head + skb->l3hdroff);
873 }
874 
875 static inline bool
876 skb_is_nonlinear(struct sk_buff *skb)
877 {
878 	SKB_TRACE(skb);
879 	return ((skb->data_len > 0) ? true : false);
880 }
881 
882 static inline int
883 __skb_linearize(struct sk_buff *skb)
884 {
885 	SKB_TRACE(skb);
886 	SKB_TODO();
887 	return (ENXIO);
888 }
889 
890 static inline int
891 skb_linearize(struct sk_buff *skb)
892 {
893 
894 	return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0);
895 }
896 
897 static inline int
898 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
899 {
900 	SKB_TRACE(skb);
901 	SKB_TODO();
902 	return (-ENXIO);
903 }
904 
905 /* Not really seen this one but need it as symmetric accessor function. */
906 static inline void
907 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
908 {
909 
910 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
911 	skb->qmap = qmap;
912 }
913 
914 static inline uint16_t
915 skb_get_queue_mapping(struct sk_buff *skb)
916 {
917 
918 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
919 	return (skb->qmap);
920 }
921 
922 static inline bool
923 skb_header_cloned(struct sk_buff *skb)
924 {
925 	SKB_TRACE(skb);
926 	SKB_TODO();
927 	return (false);
928 }
929 
930 static inline uint8_t *
931 skb_mac_header(struct sk_buff *skb)
932 {
933 	SKB_TRACE(skb);
934 	SKB_TODO();
935 	return (NULL);
936 }
937 
938 static inline void
939 skb_orphan(struct sk_buff *skb)
940 {
941 	SKB_TRACE(skb);
942 	SKB_TODO();
943 }
944 
945 static inline void
946 skb_reset_mac_header(struct sk_buff *skb)
947 {
948 	SKB_TRACE(skb);
949 	SKB_TODO();
950 }
951 
952 static inline __sum16
953 csum_unfold(__sum16 sum)
954 {
955 	SKB_TODO();
956 	return (sum);
957 }
958 
959 static __inline void
960 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
961 {
962 	SKB_TODO();
963 }
964 
965 static inline void
966 skb_reset_tail_pointer(struct sk_buff *skb)
967 {
968 
969 	SKB_TRACE(skb);
970 #ifdef SKB_DOING_OFFSETS_US_NOT
971 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
972 #endif
973 	skb->tail = skb->data;
974 	SKB_TRACE(skb);
975 }
976 
977 static inline struct sk_buff *
978 skb_get(struct sk_buff *skb)
979 {
980 
981 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
982 	return (skb);
983 }
984 
985 static inline struct sk_buff *
986 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
987 {
988 
989 	SKB_TODO();
990 	return (NULL);
991 }
992 
993 static inline void
994 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
995 {
996 
997 	SKB_TRACE(skb);
998 	/* Let us just hope the destination has len space ... */
999 	memcpy(dst, skb->data, len);
1000 }
1001 
1002 static inline int
1003 skb_pad(struct sk_buff *skb, int pad)
1004 {
1005 
1006 	SKB_TRACE(skb);
1007 	SKB_TODO();
1008 	return (-1);
1009 }
1010 
1011 static inline void
1012 skb_list_del_init(struct sk_buff *skb)
1013 {
1014 
1015 	SKB_TRACE(skb);
1016 	SKB_TODO();
1017 }
1018 
1019 static inline void
1020 napi_consume_skb(struct sk_buff *skb, int budget)
1021 {
1022 
1023 	SKB_TRACE(skb);
1024 	SKB_TODO();
1025 }
1026 
1027 #define	SKB_WITH_OVERHEAD(_s)						\
1028 	(_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE)
1029 
1030 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
1031