1 /*-
2  * Copyright (c) 2020-2022 The FreeBSD Foundation
3  * Copyright (c) 2021-2022 Bjoern A. Zeeb
4  *
5  * This software was developed by Björn Zeeb under sponsorship from
6  * the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 /*
33  * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34  *       Do not rely on the internals of this implementation.  They are highly
35  *       likely to change as we will improve the integration to FreeBSD mbufs.
36  */
37 
38 #ifndef	_LINUXKPI_LINUX_SKBUFF_H
39 #define	_LINUXKPI_LINUX_SKBUFF_H
40 
41 #include <linux/kernel.h>
42 #include <linux/page.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/netdev_features.h>
45 #include <linux/list.h>
46 #include <linux/gfp.h>
47 #include <linux/compiler.h>
48 #include <linux/spinlock.h>
49 
50 /* #define	SKB_DEBUG */
51 #ifdef SKB_DEBUG
52 #define	DSKB_TODO	0x01
53 #define	DSKB_IMPROVE	0x02
54 #define	DSKB_TRACE	0x10
55 #define	DSKB_TRACEX	0x20
56 extern int linuxkpi_debug_skb;
57 
58 #define	SKB_TODO()							\
59     if (linuxkpi_debug_skb & DSKB_TODO)					\
60 	printf("SKB_TODO %s:%d\n", __func__, __LINE__)
61 #define	SKB_IMPROVE(...)						\
62     if (linuxkpi_debug_skb & DSKB_IMPROVE)				\
63 	printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
64 #define	SKB_TRACE(_s)							\
65     if (linuxkpi_debug_skb & DSKB_TRACE)				\
66 	printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
67 #define	SKB_TRACE2(_s, _p)						\
68     if (linuxkpi_debug_skb & DSKB_TRACE)				\
69 	printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
70 #define	SKB_TRACE_FMT(_s, _fmt, ...)					\
71    if (linuxkpi_debug_skb & DSKB_TRACE)					\
72 	printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s,	\
73 	    __VA_ARGS__)
74 #else
75 #define	SKB_TODO()		do { } while(0)
76 #define	SKB_IMPROVE(...)	do { } while(0)
77 #define	SKB_TRACE(_s)		do { } while(0)
78 #define	SKB_TRACE2(_s, _p)	do { } while(0)
79 #define	SKB_TRACE_FMT(_s, ...)	do { } while(0)
80 #endif
81 
82 enum sk_buff_pkt_type {
83 	PACKET_BROADCAST,
84 	PACKET_MULTICAST,
85 	PACKET_OTHERHOST,
86 };
87 
88 #define	NET_SKB_PAD		max(CACHE_LINE_SIZE, 32)
89 
90 struct sk_buff_head {
91 		/* XXX TODO */
92 	struct sk_buff		*next;
93 	struct sk_buff		*prev;
94 	size_t			qlen;
95 	spinlock_t		lock;
96 };
97 
98 enum sk_checksum_flags {
99 	CHECKSUM_NONE			= 0x00,
100 	CHECKSUM_UNNECESSARY		= 0x01,
101 	CHECKSUM_PARTIAL		= 0x02,
102 	CHECKSUM_COMPLETE		= 0x04,
103 };
104 
105 struct skb_frag {
106 		/* XXX TODO */
107 	struct page		*page;		/* XXX-BZ These three are a wild guess so far! */
108 	off_t			offset;
109 	size_t			size;
110 };
111 typedef	struct skb_frag	skb_frag_t;
112 
113 enum skb_shared_info_gso_type {
114 	SKB_GSO_TCPV4,
115 	SKB_GSO_TCPV6,
116 };
117 
118 struct skb_shared_info {
119 	enum skb_shared_info_gso_type	gso_type;
120 	uint16_t			gso_size;
121 	uint16_t			nr_frags;
122 	struct sk_buff			*frag_list;
123 	skb_frag_t			frags[64];	/* XXX TODO, 16xpage? */
124 };
125 
126 struct sk_buff {
127 		/* XXX TODO */
128 	/* struct sk_buff_head */
129 	struct sk_buff		*next;
130 	struct sk_buff		*prev;
131 	int			list;		/* XXX TYPE */
132 	uint32_t		_alloc_len;	/* Length of alloc data-buf. XXX-BZ give up for truesize? */
133 	uint32_t		len;		/* ? */
134 	uint32_t		data_len;	/* ? If we have frags? */
135 	uint32_t		truesize;	/* The total size of all buffers, incl. frags. */
136 	uint16_t		mac_len;	/* Link-layer header length. */
137 	__sum16			csum;
138 	uint16_t		l3hdroff;	/* network header offset from *head */
139 	uint16_t		l4hdroff;	/* transport header offset from *head */
140 	uint32_t		priority;
141 	uint16_t		qmap;		/* queue mapping */
142 	uint16_t		_spareu16_0;
143 	enum sk_buff_pkt_type	pkt_type;
144 
145 	/* "Scratch" area for layers to store metadata. */
146 	/* ??? I see sizeof() operations so probably an array. */
147 	uint8_t			cb[64] __aligned(CACHE_LINE_SIZE);
148 
149 	struct net_device	*dev;
150 	void			*sk;		/* XXX net/sock.h? */
151 
152 	int		csum_offset, csum_start, ip_summed, protocol;
153 
154 	uint8_t			*head;			/* Head of buffer. */
155 	uint8_t			*data;			/* Head of data. */
156 	uint8_t			*tail;			/* End of data. */
157 	uint8_t			*end;			/* End of buffer. */
158 
159 	struct skb_shared_info	*shinfo;
160 
161 	/* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
162 	void			*m;
163 	void(*m_free_func)(void *);
164 
165 	/* Force padding to CACHE_LINE_SIZE. */
166 	uint8_t			__scratch[0] __aligned(CACHE_LINE_SIZE);
167 };
168 
169 /* -------------------------------------------------------------------------- */
170 
171 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
172 struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t);
173 void linuxkpi_kfree_skb(struct sk_buff *);
174 
175 struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t);
176 
177 /* -------------------------------------------------------------------------- */
178 
179 static inline struct sk_buff *
180 alloc_skb(size_t size, gfp_t gfp)
181 {
182 	struct sk_buff *skb;
183 
184 	skb = linuxkpi_alloc_skb(size, gfp);
185 	SKB_TRACE(skb);
186 	return (skb);
187 }
188 
189 static inline struct sk_buff *
190 __dev_alloc_skb(size_t len, gfp_t gfp)
191 {
192 	struct sk_buff *skb;
193 
194 	skb = linuxkpi_dev_alloc_skb(len, gfp);
195 	SKB_IMPROVE();
196 	SKB_TRACE(skb);
197 	return (skb);
198 }
199 
200 static inline struct sk_buff *
201 dev_alloc_skb(size_t len)
202 {
203 	struct sk_buff *skb;
204 
205 	skb = __dev_alloc_skb(len, GFP_NOWAIT);
206 	SKB_IMPROVE();
207 	SKB_TRACE(skb);
208 	return (skb);
209 }
210 
211 static inline void
212 kfree_skb(struct sk_buff *skb)
213 {
214 	SKB_TRACE(skb);
215 	linuxkpi_kfree_skb(skb);
216 }
217 
218 static inline void
219 dev_kfree_skb(struct sk_buff *skb)
220 {
221 	SKB_TRACE(skb);
222 	kfree_skb(skb);
223 }
224 
225 static inline void
226 dev_kfree_skb_any(struct sk_buff *skb)
227 {
228 	SKB_TRACE(skb);
229 	dev_kfree_skb(skb);
230 }
231 
232 static inline void
233 dev_kfree_skb_irq(struct sk_buff *skb)
234 {
235 	SKB_TRACE(skb);
236 	SKB_IMPROVE("Do we have to defer this?");
237 	dev_kfree_skb(skb);
238 }
239 
240 /* -------------------------------------------------------------------------- */
241 
242 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
243 #define	skb_list_walk_safe(_q, skb, tmp)				\
244 	for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
245 
246 /* Add headroom; cannot do once there is data in there. */
247 static inline void
248 skb_reserve(struct sk_buff *skb, size_t len)
249 {
250 	SKB_TRACE(skb);
251 #if 0
252 	/* Apparently it is allowed to call skb_reserve multiple times in a row. */
253 	KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
254 	    "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
255 #else
256 	KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
257 	    "empty head %p data %p tail %p len %u\n", __func__, skb,
258 	    skb->head, skb->data, skb->tail, skb->len));
259 #endif
260 	skb->data += len;
261 	skb->tail += len;
262 }
263 
264 /*
265  * Remove headroom; return new data pointer; basically make space at the
266  * front to copy data in (manually).
267  */
268 static inline void *
269 skb_push(struct sk_buff *skb, size_t len)
270 {
271 	SKB_TRACE(skb);
272 	KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
273 	    "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
274 	skb->len  += len;
275 	skb->data -= len;
276 	return (skb->data);
277 }
278 
279 /*
280  * Length of the data on the skb (without any frags)???
281  */
282 static inline size_t
283 skb_headlen(struct sk_buff *skb)
284 {
285 
286 	SKB_TRACE(skb);
287 	return (skb->len - skb->data_len);
288 }
289 
290 
291 /* Return the end of data (tail pointer). */
292 static inline uint8_t *
293 skb_tail_pointer(struct sk_buff *skb)
294 {
295 
296 	SKB_TRACE(skb);
297 	return (skb->tail);
298 }
299 
300 /* Return number of bytes available at end of buffer. */
301 static inline unsigned int
302 skb_tailroom(struct sk_buff *skb)
303 {
304 
305 	SKB_TRACE(skb);
306 	KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
307 	    "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
308 	return (skb->end - skb->tail);
309 }
310 
311 /* Return numer of bytes available at the beginning of buffer. */
312 static inline unsigned int
313 skb_headroom(struct sk_buff *skb)
314 {
315 	SKB_TRACE(skb);
316 	KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
317 	    "data %p head %p\n", __func__, skb, skb->data, skb->head));
318 	return (skb->data - skb->head);
319 }
320 
321 
322 /*
323  * Remove tailroom; return the old tail pointer; basically make space at
324  * the end to copy data in (manually).  See also skb_put_data() below.
325  */
326 static inline void *
327 skb_put(struct sk_buff *skb, size_t len)
328 {
329 	void *s;
330 
331 	SKB_TRACE(skb);
332 	KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
333 	    "len %zu) > end %p, head %p data %p len %u\n", __func__,
334 	    skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
335 
336 	s = skb_tail_pointer(skb);
337 	if (len == 0)
338 		return (s);
339 	skb->tail += len;
340 	skb->len += len;
341 #ifdef SKB_DEBUG
342 	if (linuxkpi_debug_skb & DSKB_TRACEX)
343 	printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
344 	    __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
345 	    s, len);
346 #endif
347 	return (s);
348 }
349 
350 /* skb_put() + copying data in. */
351 static inline void *
352 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
353 {
354 	void *s;
355 
356 	SKB_TRACE2(skb, buf);
357 	s = skb_put(skb, len);
358 	if (len == 0)
359 		return (s);
360 	memcpy(s, buf, len);
361 	return (s);
362 }
363 
364 /* skb_put() + filling with zeros. */
365 static inline void *
366 skb_put_zero(struct sk_buff *skb, size_t len)
367 {
368 	void *s;
369 
370 	SKB_TRACE(skb);
371 	s = skb_put(skb, len);
372 	memset(s, '\0', len);
373 	return (s);
374 }
375 
376 /*
377  * Remove len bytes from beginning of data.
378  *
379  * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
380  * we return the advanced data pointer so we don't have to keep a temp, correct?
381  */
382 static inline void *
383 skb_pull(struct sk_buff *skb, size_t len)
384 {
385 
386 	SKB_TRACE(skb);
387 #if 0	/* Apparently this doesn't barf... */
388 	KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
389 	    __func__, skb, skb->len, len, skb->data));
390 #endif
391 	if (skb->len < len)
392 		return (NULL);
393 	skb->len -= len;
394 	skb->data += len;
395 	return (skb->data);
396 }
397 
398 /* Reduce skb data to given length or do nothing if smaller already. */
399 static inline void
400 __skb_trim(struct sk_buff *skb, unsigned int len)
401 {
402 
403 	SKB_TRACE(skb);
404 	if (skb->len < len)
405 		return;
406 
407 	skb->len = len;
408 	skb->tail = skb->data + skb->len;
409 }
410 
411 static inline void
412 skb_trim(struct sk_buff *skb, unsigned int len)
413 {
414 
415 	return (__skb_trim(skb, len));
416 }
417 
418 static inline struct skb_shared_info *
419 skb_shinfo(struct sk_buff *skb)
420 {
421 
422 	SKB_TRACE(skb);
423 	return (skb->shinfo);
424 }
425 
426 static inline void
427 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
428     off_t offset, size_t size, unsigned int truesize)
429 {
430 	struct skb_shared_info *shinfo;
431 
432 	SKB_TRACE(skb);
433 #ifdef SKB_DEBUG
434 	if (linuxkpi_debug_skb & DSKB_TRACEX)
435 	printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
436 	    "page %#jx offset %ju size %zu truesize %u\n", __func__,
437 	    skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
438 	    (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
439 	    size, truesize);
440 #endif
441 
442 	shinfo = skb_shinfo(skb);
443 	KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
444 	    "fragno %d too big\n", __func__, skb, fragno));
445 	shinfo->frags[fragno].page = page;
446 	shinfo->frags[fragno].offset = offset;
447 	shinfo->frags[fragno].size = size;
448 	shinfo->nr_frags = fragno + 1;
449         skb->len += size;
450         skb->truesize += truesize;
451 
452 	/* XXX TODO EXTEND truesize? */
453 }
454 
455 /* -------------------------------------------------------------------------- */
456 
457 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
458 #define	skb_queue_walk(_q, skb)						\
459 	for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q);	\
460 	    (skb) = (skb)->next)
461 
462 #define	skb_queue_walk_safe(_q, skb, tmp)				\
463 	for ((skb) = (_q)->next, (tmp) = (skb)->next;			\
464 	    (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
465 
466 static inline bool
467 skb_queue_empty(struct sk_buff_head *q)
468 {
469 
470 	SKB_TRACE(q);
471 	return (q->qlen == 0);
472 }
473 
474 static inline void
475 __skb_queue_head_init(struct sk_buff_head *q)
476 {
477 	SKB_TRACE(q);
478 	q->prev = q->next = (struct sk_buff *)q;
479 	q->qlen = 0;
480 }
481 
482 static inline void
483 skb_queue_head_init(struct sk_buff_head *q)
484 {
485 	SKB_TRACE(q);
486 	return (__skb_queue_head_init(q));
487 }
488 
489 static inline void
490 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
491     struct sk_buff_head *q)
492 {
493 
494 	SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
495 	new->prev = prev;
496 	new->next = next;
497 	next->prev = new;
498 	prev->next = new;
499 	q->qlen++;
500 }
501 
502 static inline void
503 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
504     struct sk_buff *new)
505 {
506 
507 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
508 	__skb_insert(new, skb, skb->next, q);
509 }
510 
511 static inline void
512 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
513     struct sk_buff *new)
514 {
515 
516 	SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
517 	__skb_insert(new, skb->prev, skb, q);
518 }
519 
520 static inline void
521 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
522 {
523 	struct sk_buff *s;
524 
525 	SKB_TRACE2(q, skb);
526 	q->qlen++;
527 	s = (struct sk_buff *)q;
528 	s->prev->next = skb;
529 	skb->prev = s->prev;
530 	skb->next = s;
531 	s->prev = skb;
532 }
533 
534 static inline void
535 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
536 {
537 	SKB_TRACE2(q, skb);
538 	return (__skb_queue_tail(q, skb));
539 }
540 
541 static inline struct sk_buff *
542 skb_peek_tail(struct sk_buff_head *q)
543 {
544 	struct sk_buff *skb;
545 
546 	skb = q->prev;
547 	SKB_TRACE2(q, skb);
548 	if (skb == (struct sk_buff *)q)
549 		return (NULL);
550 	return (skb);
551 }
552 
553 static inline void
554 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
555 {
556 	SKB_TRACE2(skb, head);
557 	struct sk_buff *p, *n;;
558 
559 	head->qlen--;
560 	p = skb->prev;
561 	n = skb->next;
562 	p->next = n;
563 	n->prev = p;
564 	skb->prev = skb->next = NULL;
565 }
566 
567 static inline void
568 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
569 {
570 	SKB_TRACE2(skb, head);
571 	return (__skb_unlink(skb, head));
572 }
573 
574 static inline struct sk_buff *
575 __skb_dequeue(struct sk_buff_head *q)
576 {
577 	struct sk_buff *skb;
578 
579 	SKB_TRACE(q);
580 	skb = q->next;
581 	if (skb == (struct sk_buff *)q)
582 		return (NULL);
583 	if (skb != NULL)
584 		__skb_unlink(skb, q);
585 	SKB_TRACE(skb);
586 	return (skb);
587 }
588 
589 static inline struct sk_buff *
590 skb_dequeue(struct sk_buff_head *q)
591 {
592 	SKB_TRACE(q);
593 	return (__skb_dequeue(q));
594 }
595 
596 static inline struct sk_buff *
597 skb_dequeue_tail(struct sk_buff_head *q)
598 {
599 	struct sk_buff *skb;
600 
601 	skb = skb_peek_tail(q);
602 	if (skb != NULL)
603 		__skb_unlink(skb, q);
604 
605 	SKB_TRACE2(q, skb);
606 	return (skb);
607 }
608 
609 static inline void
610 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
611 {
612 
613 	SKB_TRACE2(q, skb);
614 	__skb_queue_after(q, (struct sk_buff *)q, skb);
615 }
616 
617 static inline void
618 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
619 {
620 
621 	SKB_TRACE2(q, skb);
622 	__skb_queue_after(q, (struct sk_buff *)q, skb);
623 }
624 
625 static inline uint32_t
626 skb_queue_len(struct sk_buff_head *head)
627 {
628 
629 	SKB_TRACE(head);
630 	return (head->qlen);
631 }
632 
633 static inline uint32_t
634 skb_queue_len_lockless(const struct sk_buff_head *head)
635 {
636 
637 	SKB_TRACE(head);
638 	return (READ_ONCE(head->qlen));
639 }
640 
641 static inline void
642 __skb_queue_purge(struct sk_buff_head *q)
643 {
644 	struct sk_buff *skb;
645 
646 	SKB_TRACE(q);
647         while ((skb = __skb_dequeue(q)) != NULL)
648 		kfree_skb(skb);
649 }
650 
651 static inline void
652 skb_queue_purge(struct sk_buff_head *q)
653 {
654 	SKB_TRACE(q);
655 	return (__skb_queue_purge(q));
656 }
657 
658 static inline struct sk_buff *
659 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
660 {
661 
662 	SKB_TRACE2(q, skb);
663 	/* XXX what is the q argument good for? */
664 	return (skb->prev);
665 }
666 
667 /* -------------------------------------------------------------------------- */
668 
669 static inline struct sk_buff *
670 skb_copy(struct sk_buff *skb, gfp_t gfp)
671 {
672 	struct sk_buff *new;
673 
674 	new = linuxkpi_skb_copy(skb, gfp);
675 	SKB_TRACE2(skb, new);
676 	return (new);
677 }
678 
679 static inline void
680 consume_skb(struct sk_buff *skb)
681 {
682 	SKB_TRACE(skb);
683 	SKB_TODO();
684 }
685 
686 static inline uint16_t
687 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
688 {
689 	SKB_TRACE(skb);
690 	SKB_TODO();
691 	return (0xffff);
692 }
693 
694 static inline int
695 skb_checksum_start_offset(struct sk_buff *skb)
696 {
697 	SKB_TRACE(skb);
698 	SKB_TODO();
699 	return (-1);
700 }
701 
702 static inline dma_addr_t
703 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
704     size_t fragsz, enum dma_data_direction dir)
705 {
706 	SKB_TRACE2(frag, dev);
707 	SKB_TODO();
708 	return (-1);
709 }
710 
711 static inline size_t
712 skb_frag_size(const skb_frag_t *frag)
713 {
714 	SKB_TRACE(frag);
715 	SKB_TODO();
716 	return (-1);
717 }
718 
719 static inline bool
720 skb_is_nonlinear(struct sk_buff *skb)
721 {
722 	SKB_TRACE(skb);
723 	return ((skb->data_len > 0) ? true : false);
724 }
725 
726 #define	skb_walk_frags(_skb, _frag)					\
727 	for ((_frag) = (_skb); false; (_frag)++)
728 
729 static inline void
730 skb_checksum_help(struct sk_buff *skb)
731 {
732 	SKB_TRACE(skb);
733 	SKB_TODO();
734 }
735 
736 static inline bool
737 skb_ensure_writable(struct sk_buff *skb, size_t off)
738 {
739 	SKB_TRACE(skb);
740 	SKB_TODO();
741 	return (false);
742 }
743 
744 static inline void *
745 skb_frag_address(const skb_frag_t *frag)
746 {
747 	SKB_TRACE(frag);
748 	SKB_TODO();
749 	return (NULL);
750 }
751 
752 static inline struct sk_buff *
753 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
754 {
755 	SKB_TRACE(skb);
756 	SKB_TODO();
757 	return (NULL);
758 }
759 
760 static inline bool
761 skb_is_gso(struct sk_buff *skb)
762 {
763 	SKB_TRACE(skb);
764 	SKB_IMPROVE("Really a TODO but get it away from logging");
765 	return (false);
766 }
767 
768 static inline void
769 skb_mark_not_on_list(struct sk_buff *skb)
770 {
771 	SKB_TRACE(skb);
772 	SKB_TODO();
773 }
774 
775 static inline void
776 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
777 {
778 	struct sk_buff *b, *e, *n;
779 
780 	SKB_TRACE2(from, to);
781 
782 	if (skb_queue_empty(from))
783 		return;
784 
785 	/* XXX do we need a barrier around this? */
786 	b = from->next;
787 	e = from->prev;
788 	n = to->next;
789 
790 	b->prev = (struct sk_buff *)to;
791 	to->next = b;
792 	e->next = n;
793 	n->prev = e;
794 
795 	to->qlen += from->qlen;
796 	__skb_queue_head_init(from);
797 }
798 
799 static inline void
800 skb_reset_transport_header(struct sk_buff *skb)
801 {
802 
803 	SKB_TRACE(skb);
804 	skb->l4hdroff = skb->data - skb->head;
805 }
806 
807 static inline uint8_t *
808 skb_transport_header(struct sk_buff *skb)
809 {
810 
811 	SKB_TRACE(skb);
812         return (skb->head + skb->l4hdroff);
813 }
814 
815 static inline uint8_t *
816 skb_network_header(struct sk_buff *skb)
817 {
818 
819 	SKB_TRACE(skb);
820         return (skb->head + skb->l3hdroff);
821 }
822 
823 static inline int
824 __skb_linearize(struct sk_buff *skb)
825 {
826 	SKB_TRACE(skb);
827 	SKB_TODO();
828 	return (ENXIO);
829 }
830 
831 static inline int
832 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
833 {
834 	SKB_TRACE(skb);
835 	SKB_TODO();
836 	return (-ENXIO);
837 }
838 
839 /* Not really seen this one but need it as symmetric accessor function. */
840 static inline void
841 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
842 {
843 
844 	SKB_TRACE_FMT(skb, "qmap %u", qmap);
845 	skb->qmap = qmap;
846 }
847 
848 static inline uint16_t
849 skb_get_queue_mapping(struct sk_buff *skb)
850 {
851 
852 	SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
853 	return (skb->qmap);
854 }
855 
856 static inline bool
857 skb_header_cloned(struct sk_buff *skb)
858 {
859 	SKB_TRACE(skb);
860 	SKB_TODO();
861 	return (false);
862 }
863 
864 static inline uint8_t *
865 skb_mac_header(struct sk_buff *skb)
866 {
867 	SKB_TRACE(skb);
868 	SKB_TODO();
869 	return (NULL);
870 }
871 
872 static inline void
873 skb_orphan(struct sk_buff *skb)
874 {
875 	SKB_TRACE(skb);
876 	SKB_TODO();
877 }
878 
879 static inline void
880 skb_reset_mac_header(struct sk_buff *skb)
881 {
882 	SKB_TRACE(skb);
883 	SKB_TODO();
884 }
885 
886 static inline struct sk_buff *
887 skb_peek(struct sk_buff_head *q)
888 {
889 	SKB_TRACE(q);
890 	SKB_TODO();
891 	return (NULL);
892 }
893 
894 static inline __sum16
895 csum_unfold(__sum16 sum)
896 {
897 	SKB_TODO();
898 	return (sum);
899 }
900 
901 static __inline void
902 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
903 {
904 	SKB_TODO();
905 }
906 
907 static inline void
908 skb_reset_tail_pointer(struct sk_buff *skb)
909 {
910 
911 	SKB_TRACE(skb);
912 	skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
913 	SKB_TRACE(skb);
914 }
915 
916 static inline struct sk_buff *
917 skb_get(struct sk_buff *skb)
918 {
919 
920 	SKB_TODO();	/* XXX refcnt? as in get/put_device? */
921 	return (skb);
922 }
923 
924 static inline struct sk_buff *
925 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
926 {
927 
928 	SKB_TODO();
929 	return (NULL);
930 }
931 
932 static inline void
933 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
934 {
935 
936 	SKB_TRACE(skb);
937 	/* Let us just hope the destination has len space ... */
938 	memcpy(dst, skb->data, len);
939 }
940 
941 #endif	/* _LINUXKPI_LINUX_SKBUFF_H */
942