1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 /* Stage 5 definitions for creating trace events */
4 
5 /*
6  * remember the offset of each array from the beginning of the event.
7  */
8 
9 #undef __entry
10 #define __entry entry
11 
12 /*
13  * Fields should never declare an array: i.e. __field(int, arr[5])
14  * If they do, it will cause issues in parsing and possibly corrupt the
15  * events. To prevent that from happening, test the sizeof() a fictitious
16  * type called "struct _test_no_array_##item" which will fail if "item"
17  * contains array elements (like "arr[5]").
18  *
19  * If you hit this, use __array(int, arr, 5) instead.
20  */
21 #undef __field
22 #define __field(type, item)					\
23 	{ (void)sizeof(struct _test_no_array_##item *); }
24 
25 #undef __field_ext
26 #define __field_ext(type, item, filter_type)			\
27 	{ (void)sizeof(struct _test_no_array_##item *); }
28 
29 #undef __field_struct
30 #define __field_struct(type, item)				\
31 	{ (void)sizeof(struct _test_no_array_##item *); }
32 
33 #undef __field_struct_ext
34 #define __field_struct_ext(type, item, filter_type)		\
35 	{ (void)sizeof(struct _test_no_array_##item *); }
36 
37 #undef __array
38 #define __array(type, item, len)
39 
40 #undef __dynamic_array
41 #define __dynamic_array(type, item, len)				\
42 	__item_length = (len) * sizeof(type);				\
43 	__data_offsets->item = __data_size +				\
44 			       offsetof(typeof(*entry), __data);	\
45 	__data_offsets->item |= __item_length << 16;			\
46 	__data_size += __item_length;
47 
48 #undef __string
49 #define __string(item, src) __dynamic_array(char, item,			\
50 		    strlen((src) ? (const char *)(src) : "(null)") + 1)
51 
52 #undef __string_len
53 #define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)
54 
55 #undef __vstring
56 #define __vstring(item, fmt, ap) __dynamic_array(char, item,		\
57 		      __trace_event_vstr_len(fmt, ap))
58 
59 #undef __rel_dynamic_array
60 #define __rel_dynamic_array(type, item, len)				\
61 	__item_length = (len) * sizeof(type);				\
62 	__data_offsets->item = __data_size +				\
63 			       offsetof(typeof(*entry), __data) -	\
64 			       offsetof(typeof(*entry), __rel_loc_##item) -	\
65 			       sizeof(u32);				\
66 	__data_offsets->item |= __item_length << 16;			\
67 	__data_size += __item_length;
68 
69 #undef __rel_string
70 #define __rel_string(item, src) __rel_dynamic_array(char, item,			\
71 		    strlen((src) ? (const char *)(src) : "(null)") + 1)
72 
73 #undef __rel_string_len
74 #define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)
75 /*
76  * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
77  * num_possible_cpus().
78  */
79 #define __bitmask_size_in_bytes_raw(nr_bits)	\
80 	(((nr_bits) + 7) / 8)
81 
82 #define __bitmask_size_in_longs(nr_bits)			\
83 	((__bitmask_size_in_bytes_raw(nr_bits) +		\
84 	  ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
85 
86 /*
87  * __bitmask_size_in_bytes is the number of bytes needed to hold
88  * num_possible_cpus() padded out to the nearest long. This is what
89  * is saved in the buffer, just to be consistent.
90  */
91 #define __bitmask_size_in_bytes(nr_bits)				\
92 	(__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
93 
94 #undef __bitmask
95 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item,	\
96 					 __bitmask_size_in_longs(nr_bits))
97 
98 #undef __cpumask
99 #define __cpumask(item) __bitmask(item, nr_cpumask_bits)
100 
101 #undef __rel_bitmask
102 #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item,	\
103 					 __bitmask_size_in_longs(nr_bits))
104 
105 #undef __rel_cpumask
106 #define __rel_cpumask(item) __rel_bitmask(item, nr_cpumask_bits)
107 
108 #undef __sockaddr
109 #define __sockaddr(field, len) __dynamic_array(u8, field, len)
110 
111 #undef __rel_sockaddr
112 #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len)
113