1*b1b63725SEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*b1b63725SEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/cgroup_skb.c */
3*b1b63725SEduard Zingerman
4*b1b63725SEduard Zingerman #include <linux/bpf.h>
5*b1b63725SEduard Zingerman #include <bpf/bpf_helpers.h>
6*b1b63725SEduard Zingerman #include "bpf_misc.h"
7*b1b63725SEduard Zingerman
8*b1b63725SEduard Zingerman SEC("cgroup/skb")
9*b1b63725SEduard Zingerman __description("direct packet read test#1 for CGROUP_SKB")
10*b1b63725SEduard Zingerman __success __failure_unpriv
11*b1b63725SEduard Zingerman __msg_unpriv("invalid bpf_context access off=76 size=4")
12*b1b63725SEduard Zingerman __retval(0)
test_1_for_cgroup_skb(void)13*b1b63725SEduard Zingerman __naked void test_1_for_cgroup_skb(void)
14*b1b63725SEduard Zingerman {
15*b1b63725SEduard Zingerman asm volatile (" \
16*b1b63725SEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
17*b1b63725SEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
18*b1b63725SEduard Zingerman r4 = *(u32*)(r1 + %[__sk_buff_len]); \
19*b1b63725SEduard Zingerman r5 = *(u32*)(r1 + %[__sk_buff_pkt_type]); \
20*b1b63725SEduard Zingerman r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
21*b1b63725SEduard Zingerman *(u32*)(r1 + %[__sk_buff_mark]) = r6; \
22*b1b63725SEduard Zingerman r7 = *(u32*)(r1 + %[__sk_buff_queue_mapping]); \
23*b1b63725SEduard Zingerman r8 = *(u32*)(r1 + %[__sk_buff_protocol]); \
24*b1b63725SEduard Zingerman r9 = *(u32*)(r1 + %[__sk_buff_vlan_present]); \
25*b1b63725SEduard Zingerman r0 = r2; \
26*b1b63725SEduard Zingerman r0 += 8; \
27*b1b63725SEduard Zingerman if r0 > r3 goto l0_%=; \
28*b1b63725SEduard Zingerman r0 = *(u8*)(r2 + 0); \
29*b1b63725SEduard Zingerman l0_%=: r0 = 0; \
30*b1b63725SEduard Zingerman exit; \
31*b1b63725SEduard Zingerman " :
32*b1b63725SEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
33*b1b63725SEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
34*b1b63725SEduard Zingerman __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
35*b1b63725SEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
36*b1b63725SEduard Zingerman __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
37*b1b63725SEduard Zingerman __imm_const(__sk_buff_protocol, offsetof(struct __sk_buff, protocol)),
38*b1b63725SEduard Zingerman __imm_const(__sk_buff_queue_mapping, offsetof(struct __sk_buff, queue_mapping)),
39*b1b63725SEduard Zingerman __imm_const(__sk_buff_vlan_present, offsetof(struct __sk_buff, vlan_present))
40*b1b63725SEduard Zingerman : __clobber_all);
41*b1b63725SEduard Zingerman }
42*b1b63725SEduard Zingerman
43*b1b63725SEduard Zingerman SEC("cgroup/skb")
44*b1b63725SEduard Zingerman __description("direct packet read test#2 for CGROUP_SKB")
45*b1b63725SEduard Zingerman __success __success_unpriv __retval(0)
test_2_for_cgroup_skb(void)46*b1b63725SEduard Zingerman __naked void test_2_for_cgroup_skb(void)
47*b1b63725SEduard Zingerman {
48*b1b63725SEduard Zingerman asm volatile (" \
49*b1b63725SEduard Zingerman r4 = *(u32*)(r1 + %[__sk_buff_vlan_tci]); \
50*b1b63725SEduard Zingerman r5 = *(u32*)(r1 + %[__sk_buff_vlan_proto]); \
51*b1b63725SEduard Zingerman r6 = *(u32*)(r1 + %[__sk_buff_priority]); \
52*b1b63725SEduard Zingerman *(u32*)(r1 + %[__sk_buff_priority]) = r6; \
53*b1b63725SEduard Zingerman r7 = *(u32*)(r1 + %[__sk_buff_ingress_ifindex]);\
54*b1b63725SEduard Zingerman r8 = *(u32*)(r1 + %[__sk_buff_tc_index]); \
55*b1b63725SEduard Zingerman r9 = *(u32*)(r1 + %[__sk_buff_hash]); \
56*b1b63725SEduard Zingerman r0 = 0; \
57*b1b63725SEduard Zingerman exit; \
58*b1b63725SEduard Zingerman " :
59*b1b63725SEduard Zingerman : __imm_const(__sk_buff_hash, offsetof(struct __sk_buff, hash)),
60*b1b63725SEduard Zingerman __imm_const(__sk_buff_ingress_ifindex, offsetof(struct __sk_buff, ingress_ifindex)),
61*b1b63725SEduard Zingerman __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)),
62*b1b63725SEduard Zingerman __imm_const(__sk_buff_tc_index, offsetof(struct __sk_buff, tc_index)),
63*b1b63725SEduard Zingerman __imm_const(__sk_buff_vlan_proto, offsetof(struct __sk_buff, vlan_proto)),
64*b1b63725SEduard Zingerman __imm_const(__sk_buff_vlan_tci, offsetof(struct __sk_buff, vlan_tci))
65*b1b63725SEduard Zingerman : __clobber_all);
66*b1b63725SEduard Zingerman }
67*b1b63725SEduard Zingerman
68*b1b63725SEduard Zingerman SEC("cgroup/skb")
69*b1b63725SEduard Zingerman __description("direct packet read test#3 for CGROUP_SKB")
70*b1b63725SEduard Zingerman __success __success_unpriv __retval(0)
test_3_for_cgroup_skb(void)71*b1b63725SEduard Zingerman __naked void test_3_for_cgroup_skb(void)
72*b1b63725SEduard Zingerman {
73*b1b63725SEduard Zingerman asm volatile (" \
74*b1b63725SEduard Zingerman r4 = *(u32*)(r1 + %[__sk_buff_cb_0]); \
75*b1b63725SEduard Zingerman r5 = *(u32*)(r1 + %[__sk_buff_cb_1]); \
76*b1b63725SEduard Zingerman r6 = *(u32*)(r1 + %[__sk_buff_cb_2]); \
77*b1b63725SEduard Zingerman r7 = *(u32*)(r1 + %[__sk_buff_cb_3]); \
78*b1b63725SEduard Zingerman r8 = *(u32*)(r1 + %[__sk_buff_cb_4]); \
79*b1b63725SEduard Zingerman r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
80*b1b63725SEduard Zingerman *(u32*)(r1 + %[__sk_buff_cb_0]) = r4; \
81*b1b63725SEduard Zingerman *(u32*)(r1 + %[__sk_buff_cb_1]) = r5; \
82*b1b63725SEduard Zingerman *(u32*)(r1 + %[__sk_buff_cb_2]) = r6; \
83*b1b63725SEduard Zingerman *(u32*)(r1 + %[__sk_buff_cb_3]) = r7; \
84*b1b63725SEduard Zingerman *(u32*)(r1 + %[__sk_buff_cb_4]) = r8; \
85*b1b63725SEduard Zingerman r0 = 0; \
86*b1b63725SEduard Zingerman exit; \
87*b1b63725SEduard Zingerman " :
88*b1b63725SEduard Zingerman : __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])),
89*b1b63725SEduard Zingerman __imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])),
90*b1b63725SEduard Zingerman __imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])),
91*b1b63725SEduard Zingerman __imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])),
92*b1b63725SEduard Zingerman __imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])),
93*b1b63725SEduard Zingerman __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
94*b1b63725SEduard Zingerman : __clobber_all);
95*b1b63725SEduard Zingerman }
96*b1b63725SEduard Zingerman
97*b1b63725SEduard Zingerman SEC("cgroup/skb")
98*b1b63725SEduard Zingerman __description("direct packet read test#4 for CGROUP_SKB")
99*b1b63725SEduard Zingerman __success __success_unpriv __retval(0)
test_4_for_cgroup_skb(void)100*b1b63725SEduard Zingerman __naked void test_4_for_cgroup_skb(void)
101*b1b63725SEduard Zingerman {
102*b1b63725SEduard Zingerman asm volatile (" \
103*b1b63725SEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_family]); \
104*b1b63725SEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_remote_ip4]); \
105*b1b63725SEduard Zingerman r4 = *(u32*)(r1 + %[__sk_buff_local_ip4]); \
106*b1b63725SEduard Zingerman r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_0]); \
107*b1b63725SEduard Zingerman r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_1]); \
108*b1b63725SEduard Zingerman r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_2]); \
109*b1b63725SEduard Zingerman r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_3]); \
110*b1b63725SEduard Zingerman r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_0]); \
111*b1b63725SEduard Zingerman r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_1]); \
112*b1b63725SEduard Zingerman r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_2]); \
113*b1b63725SEduard Zingerman r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_3]); \
114*b1b63725SEduard Zingerman r7 = *(u32*)(r1 + %[__sk_buff_remote_port]); \
115*b1b63725SEduard Zingerman r8 = *(u32*)(r1 + %[__sk_buff_local_port]); \
116*b1b63725SEduard Zingerman r0 = 0; \
117*b1b63725SEduard Zingerman exit; \
118*b1b63725SEduard Zingerman " :
119*b1b63725SEduard Zingerman : __imm_const(__sk_buff_family, offsetof(struct __sk_buff, family)),
120*b1b63725SEduard Zingerman __imm_const(__sk_buff_local_ip4, offsetof(struct __sk_buff, local_ip4)),
121*b1b63725SEduard Zingerman __imm_const(__sk_buff_local_ip6_0, offsetof(struct __sk_buff, local_ip6[0])),
122*b1b63725SEduard Zingerman __imm_const(__sk_buff_local_ip6_1, offsetof(struct __sk_buff, local_ip6[1])),
123*b1b63725SEduard Zingerman __imm_const(__sk_buff_local_ip6_2, offsetof(struct __sk_buff, local_ip6[2])),
124*b1b63725SEduard Zingerman __imm_const(__sk_buff_local_ip6_3, offsetof(struct __sk_buff, local_ip6[3])),
125*b1b63725SEduard Zingerman __imm_const(__sk_buff_local_port, offsetof(struct __sk_buff, local_port)),
126*b1b63725SEduard Zingerman __imm_const(__sk_buff_remote_ip4, offsetof(struct __sk_buff, remote_ip4)),
127*b1b63725SEduard Zingerman __imm_const(__sk_buff_remote_ip6_0, offsetof(struct __sk_buff, remote_ip6[0])),
128*b1b63725SEduard Zingerman __imm_const(__sk_buff_remote_ip6_1, offsetof(struct __sk_buff, remote_ip6[1])),
129*b1b63725SEduard Zingerman __imm_const(__sk_buff_remote_ip6_2, offsetof(struct __sk_buff, remote_ip6[2])),
130*b1b63725SEduard Zingerman __imm_const(__sk_buff_remote_ip6_3, offsetof(struct __sk_buff, remote_ip6[3])),
131*b1b63725SEduard Zingerman __imm_const(__sk_buff_remote_port, offsetof(struct __sk_buff, remote_port))
132*b1b63725SEduard Zingerman : __clobber_all);
133*b1b63725SEduard Zingerman }
134*b1b63725SEduard Zingerman
135*b1b63725SEduard Zingerman SEC("cgroup/skb")
136*b1b63725SEduard Zingerman __description("invalid access of tc_classid for CGROUP_SKB")
137*b1b63725SEduard Zingerman __failure __msg("invalid bpf_context access")
138*b1b63725SEduard Zingerman __failure_unpriv
tc_classid_for_cgroup_skb(void)139*b1b63725SEduard Zingerman __naked void tc_classid_for_cgroup_skb(void)
140*b1b63725SEduard Zingerman {
141*b1b63725SEduard Zingerman asm volatile (" \
142*b1b63725SEduard Zingerman r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
143*b1b63725SEduard Zingerman r0 = 0; \
144*b1b63725SEduard Zingerman exit; \
145*b1b63725SEduard Zingerman " :
146*b1b63725SEduard Zingerman : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
147*b1b63725SEduard Zingerman : __clobber_all);
148*b1b63725SEduard Zingerman }
149*b1b63725SEduard Zingerman
150*b1b63725SEduard Zingerman SEC("cgroup/skb")
151*b1b63725SEduard Zingerman __description("invalid access of data_meta for CGROUP_SKB")
152*b1b63725SEduard Zingerman __failure __msg("invalid bpf_context access")
153*b1b63725SEduard Zingerman __failure_unpriv
data_meta_for_cgroup_skb(void)154*b1b63725SEduard Zingerman __naked void data_meta_for_cgroup_skb(void)
155*b1b63725SEduard Zingerman {
156*b1b63725SEduard Zingerman asm volatile (" \
157*b1b63725SEduard Zingerman r0 = *(u32*)(r1 + %[__sk_buff_data_meta]); \
158*b1b63725SEduard Zingerman r0 = 0; \
159*b1b63725SEduard Zingerman exit; \
160*b1b63725SEduard Zingerman " :
161*b1b63725SEduard Zingerman : __imm_const(__sk_buff_data_meta, offsetof(struct __sk_buff, data_meta))
162*b1b63725SEduard Zingerman : __clobber_all);
163*b1b63725SEduard Zingerman }
164*b1b63725SEduard Zingerman
165*b1b63725SEduard Zingerman SEC("cgroup/skb")
166*b1b63725SEduard Zingerman __description("invalid access of flow_keys for CGROUP_SKB")
167*b1b63725SEduard Zingerman __failure __msg("invalid bpf_context access")
168*b1b63725SEduard Zingerman __failure_unpriv
flow_keys_for_cgroup_skb(void)169*b1b63725SEduard Zingerman __naked void flow_keys_for_cgroup_skb(void)
170*b1b63725SEduard Zingerman {
171*b1b63725SEduard Zingerman asm volatile (" \
172*b1b63725SEduard Zingerman r0 = *(u32*)(r1 + %[__sk_buff_flow_keys]); \
173*b1b63725SEduard Zingerman r0 = 0; \
174*b1b63725SEduard Zingerman exit; \
175*b1b63725SEduard Zingerman " :
176*b1b63725SEduard Zingerman : __imm_const(__sk_buff_flow_keys, offsetof(struct __sk_buff, flow_keys))
177*b1b63725SEduard Zingerman : __clobber_all);
178*b1b63725SEduard Zingerman }
179*b1b63725SEduard Zingerman
180*b1b63725SEduard Zingerman SEC("cgroup/skb")
181*b1b63725SEduard Zingerman __description("invalid write access to napi_id for CGROUP_SKB")
182*b1b63725SEduard Zingerman __failure __msg("invalid bpf_context access")
183*b1b63725SEduard Zingerman __failure_unpriv
napi_id_for_cgroup_skb(void)184*b1b63725SEduard Zingerman __naked void napi_id_for_cgroup_skb(void)
185*b1b63725SEduard Zingerman {
186*b1b63725SEduard Zingerman asm volatile (" \
187*b1b63725SEduard Zingerman r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \
188*b1b63725SEduard Zingerman *(u32*)(r1 + %[__sk_buff_napi_id]) = r9; \
189*b1b63725SEduard Zingerman r0 = 0; \
190*b1b63725SEduard Zingerman exit; \
191*b1b63725SEduard Zingerman " :
192*b1b63725SEduard Zingerman : __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id))
193*b1b63725SEduard Zingerman : __clobber_all);
194*b1b63725SEduard Zingerman }
195*b1b63725SEduard Zingerman
196*b1b63725SEduard Zingerman SEC("cgroup/skb")
197*b1b63725SEduard Zingerman __description("write tstamp from CGROUP_SKB")
198*b1b63725SEduard Zingerman __success __failure_unpriv
199*b1b63725SEduard Zingerman __msg_unpriv("invalid bpf_context access off=152 size=8")
200*b1b63725SEduard Zingerman __retval(0)
write_tstamp_from_cgroup_skb(void)201*b1b63725SEduard Zingerman __naked void write_tstamp_from_cgroup_skb(void)
202*b1b63725SEduard Zingerman {
203*b1b63725SEduard Zingerman asm volatile (" \
204*b1b63725SEduard Zingerman r0 = 0; \
205*b1b63725SEduard Zingerman *(u64*)(r1 + %[__sk_buff_tstamp]) = r0; \
206*b1b63725SEduard Zingerman r0 = 0; \
207*b1b63725SEduard Zingerman exit; \
208*b1b63725SEduard Zingerman " :
209*b1b63725SEduard Zingerman : __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
210*b1b63725SEduard Zingerman : __clobber_all);
211*b1b63725SEduard Zingerman }
212*b1b63725SEduard Zingerman
213*b1b63725SEduard Zingerman SEC("cgroup/skb")
214*b1b63725SEduard Zingerman __description("read tstamp from CGROUP_SKB")
215*b1b63725SEduard Zingerman __success __success_unpriv __retval(0)
read_tstamp_from_cgroup_skb(void)216*b1b63725SEduard Zingerman __naked void read_tstamp_from_cgroup_skb(void)
217*b1b63725SEduard Zingerman {
218*b1b63725SEduard Zingerman asm volatile (" \
219*b1b63725SEduard Zingerman r0 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
220*b1b63725SEduard Zingerman r0 = 0; \
221*b1b63725SEduard Zingerman exit; \
222*b1b63725SEduard Zingerman " :
223*b1b63725SEduard Zingerman : __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
224*b1b63725SEduard Zingerman : __clobber_all);
225*b1b63725SEduard Zingerman }
226*b1b63725SEduard Zingerman
227*b1b63725SEduard Zingerman char _license[] SEC("license") = "GPL";
228