1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/cgroup_skb.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 SEC("cgroup/skb")
9 __description("direct packet read test#1 for CGROUP_SKB")
10 __success __failure_unpriv
11 __msg_unpriv("invalid bpf_context access off=76 size=4")
12 __retval(0)
test_1_for_cgroup_skb(void)13 __naked void test_1_for_cgroup_skb(void)
14 {
15 	asm volatile ("					\
16 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
17 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
18 	r4 = *(u32*)(r1 + %[__sk_buff_len]);		\
19 	r5 = *(u32*)(r1 + %[__sk_buff_pkt_type]);	\
20 	r6 = *(u32*)(r1 + %[__sk_buff_mark]);		\
21 	*(u32*)(r1 + %[__sk_buff_mark]) = r6;		\
22 	r7 = *(u32*)(r1 + %[__sk_buff_queue_mapping]);	\
23 	r8 = *(u32*)(r1 + %[__sk_buff_protocol]);	\
24 	r9 = *(u32*)(r1 + %[__sk_buff_vlan_present]);	\
25 	r0 = r2;					\
26 	r0 += 8;					\
27 	if r0 > r3 goto l0_%=;				\
28 	r0 = *(u8*)(r2 + 0);				\
29 l0_%=:	r0 = 0;						\
30 	exit;						\
31 "	:
32 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
33 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
34 	  __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
35 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
36 	  __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
37 	  __imm_const(__sk_buff_protocol, offsetof(struct __sk_buff, protocol)),
38 	  __imm_const(__sk_buff_queue_mapping, offsetof(struct __sk_buff, queue_mapping)),
39 	  __imm_const(__sk_buff_vlan_present, offsetof(struct __sk_buff, vlan_present))
40 	: __clobber_all);
41 }
42 
43 SEC("cgroup/skb")
44 __description("direct packet read test#2 for CGROUP_SKB")
45 __success __success_unpriv __retval(0)
test_2_for_cgroup_skb(void)46 __naked void test_2_for_cgroup_skb(void)
47 {
48 	asm volatile ("					\
49 	r4 = *(u32*)(r1 + %[__sk_buff_vlan_tci]);	\
50 	r5 = *(u32*)(r1 + %[__sk_buff_vlan_proto]);	\
51 	r6 = *(u32*)(r1 + %[__sk_buff_priority]);	\
52 	*(u32*)(r1 + %[__sk_buff_priority]) = r6;	\
53 	r7 = *(u32*)(r1 + %[__sk_buff_ingress_ifindex]);\
54 	r8 = *(u32*)(r1 + %[__sk_buff_tc_index]);	\
55 	r9 = *(u32*)(r1 + %[__sk_buff_hash]);		\
56 	r0 = 0;						\
57 	exit;						\
58 "	:
59 	: __imm_const(__sk_buff_hash, offsetof(struct __sk_buff, hash)),
60 	  __imm_const(__sk_buff_ingress_ifindex, offsetof(struct __sk_buff, ingress_ifindex)),
61 	  __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)),
62 	  __imm_const(__sk_buff_tc_index, offsetof(struct __sk_buff, tc_index)),
63 	  __imm_const(__sk_buff_vlan_proto, offsetof(struct __sk_buff, vlan_proto)),
64 	  __imm_const(__sk_buff_vlan_tci, offsetof(struct __sk_buff, vlan_tci))
65 	: __clobber_all);
66 }
67 
68 SEC("cgroup/skb")
69 __description("direct packet read test#3 for CGROUP_SKB")
70 __success __success_unpriv __retval(0)
test_3_for_cgroup_skb(void)71 __naked void test_3_for_cgroup_skb(void)
72 {
73 	asm volatile ("					\
74 	r4 = *(u32*)(r1 + %[__sk_buff_cb_0]);		\
75 	r5 = *(u32*)(r1 + %[__sk_buff_cb_1]);		\
76 	r6 = *(u32*)(r1 + %[__sk_buff_cb_2]);		\
77 	r7 = *(u32*)(r1 + %[__sk_buff_cb_3]);		\
78 	r8 = *(u32*)(r1 + %[__sk_buff_cb_4]);		\
79 	r9 = *(u32*)(r1 + %[__sk_buff_napi_id]);	\
80 	*(u32*)(r1 + %[__sk_buff_cb_0]) = r4;		\
81 	*(u32*)(r1 + %[__sk_buff_cb_1]) = r5;		\
82 	*(u32*)(r1 + %[__sk_buff_cb_2]) = r6;		\
83 	*(u32*)(r1 + %[__sk_buff_cb_3]) = r7;		\
84 	*(u32*)(r1 + %[__sk_buff_cb_4]) = r8;		\
85 	r0 = 0;						\
86 	exit;						\
87 "	:
88 	: __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])),
89 	  __imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])),
90 	  __imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])),
91 	  __imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])),
92 	  __imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])),
93 	  __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
94 	: __clobber_all);
95 }
96 
97 SEC("cgroup/skb")
98 __description("direct packet read test#4 for CGROUP_SKB")
99 __success __success_unpriv __retval(0)
test_4_for_cgroup_skb(void)100 __naked void test_4_for_cgroup_skb(void)
101 {
102 	asm volatile ("					\
103 	r2 = *(u32*)(r1 + %[__sk_buff_family]);		\
104 	r3 = *(u32*)(r1 + %[__sk_buff_remote_ip4]);	\
105 	r4 = *(u32*)(r1 + %[__sk_buff_local_ip4]);	\
106 	r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_0]);	\
107 	r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_1]);	\
108 	r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_2]);	\
109 	r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_3]);	\
110 	r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_0]);	\
111 	r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_1]);	\
112 	r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_2]);	\
113 	r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_3]);	\
114 	r7 = *(u32*)(r1 + %[__sk_buff_remote_port]);	\
115 	r8 = *(u32*)(r1 + %[__sk_buff_local_port]);	\
116 	r0 = 0;						\
117 	exit;						\
118 "	:
119 	: __imm_const(__sk_buff_family, offsetof(struct __sk_buff, family)),
120 	  __imm_const(__sk_buff_local_ip4, offsetof(struct __sk_buff, local_ip4)),
121 	  __imm_const(__sk_buff_local_ip6_0, offsetof(struct __sk_buff, local_ip6[0])),
122 	  __imm_const(__sk_buff_local_ip6_1, offsetof(struct __sk_buff, local_ip6[1])),
123 	  __imm_const(__sk_buff_local_ip6_2, offsetof(struct __sk_buff, local_ip6[2])),
124 	  __imm_const(__sk_buff_local_ip6_3, offsetof(struct __sk_buff, local_ip6[3])),
125 	  __imm_const(__sk_buff_local_port, offsetof(struct __sk_buff, local_port)),
126 	  __imm_const(__sk_buff_remote_ip4, offsetof(struct __sk_buff, remote_ip4)),
127 	  __imm_const(__sk_buff_remote_ip6_0, offsetof(struct __sk_buff, remote_ip6[0])),
128 	  __imm_const(__sk_buff_remote_ip6_1, offsetof(struct __sk_buff, remote_ip6[1])),
129 	  __imm_const(__sk_buff_remote_ip6_2, offsetof(struct __sk_buff, remote_ip6[2])),
130 	  __imm_const(__sk_buff_remote_ip6_3, offsetof(struct __sk_buff, remote_ip6[3])),
131 	  __imm_const(__sk_buff_remote_port, offsetof(struct __sk_buff, remote_port))
132 	: __clobber_all);
133 }
134 
135 SEC("cgroup/skb")
136 __description("invalid access of tc_classid for CGROUP_SKB")
137 __failure __msg("invalid bpf_context access")
138 __failure_unpriv
tc_classid_for_cgroup_skb(void)139 __naked void tc_classid_for_cgroup_skb(void)
140 {
141 	asm volatile ("					\
142 	r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]);	\
143 	r0 = 0;						\
144 	exit;						\
145 "	:
146 	: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
147 	: __clobber_all);
148 }
149 
150 SEC("cgroup/skb")
151 __description("invalid access of data_meta for CGROUP_SKB")
152 __failure __msg("invalid bpf_context access")
153 __failure_unpriv
data_meta_for_cgroup_skb(void)154 __naked void data_meta_for_cgroup_skb(void)
155 {
156 	asm volatile ("					\
157 	r0 = *(u32*)(r1 + %[__sk_buff_data_meta]);	\
158 	r0 = 0;						\
159 	exit;						\
160 "	:
161 	: __imm_const(__sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
162 	: __clobber_all);
163 }
164 
165 SEC("cgroup/skb")
166 __description("invalid access of flow_keys for CGROUP_SKB")
167 __failure __msg("invalid bpf_context access")
168 __failure_unpriv
flow_keys_for_cgroup_skb(void)169 __naked void flow_keys_for_cgroup_skb(void)
170 {
171 	asm volatile ("					\
172 	r0 = *(u32*)(r1 + %[__sk_buff_flow_keys]);	\
173 	r0 = 0;						\
174 	exit;						\
175 "	:
176 	: __imm_const(__sk_buff_flow_keys, offsetof(struct __sk_buff, flow_keys))
177 	: __clobber_all);
178 }
179 
180 SEC("cgroup/skb")
181 __description("invalid write access to napi_id for CGROUP_SKB")
182 __failure __msg("invalid bpf_context access")
183 __failure_unpriv
napi_id_for_cgroup_skb(void)184 __naked void napi_id_for_cgroup_skb(void)
185 {
186 	asm volatile ("					\
187 	r9 = *(u32*)(r1 + %[__sk_buff_napi_id]);	\
188 	*(u32*)(r1 + %[__sk_buff_napi_id]) = r9;	\
189 	r0 = 0;						\
190 	exit;						\
191 "	:
192 	: __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
193 	: __clobber_all);
194 }
195 
196 SEC("cgroup/skb")
197 __description("write tstamp from CGROUP_SKB")
198 __success __failure_unpriv
199 __msg_unpriv("invalid bpf_context access off=152 size=8")
200 __retval(0)
write_tstamp_from_cgroup_skb(void)201 __naked void write_tstamp_from_cgroup_skb(void)
202 {
203 	asm volatile ("					\
204 	r0 = 0;						\
205 	*(u64*)(r1 + %[__sk_buff_tstamp]) = r0;		\
206 	r0 = 0;						\
207 	exit;						\
208 "	:
209 	: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
210 	: __clobber_all);
211 }
212 
213 SEC("cgroup/skb")
214 __description("read tstamp from CGROUP_SKB")
215 __success __success_unpriv __retval(0)
read_tstamp_from_cgroup_skb(void)216 __naked void read_tstamp_from_cgroup_skb(void)
217 {
218 	asm volatile ("					\
219 	r0 = *(u64*)(r1 + %[__sk_buff_tstamp]);		\
220 	r0 = 0;						\
221 	exit;						\
222 "	:
223 	: __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
224 	: __clobber_all);
225 }
226 
227 char _license[] SEC("license") = "GPL";
228