1 {
2 	"check bpf_perf_event_data->sample_period byte load permitted",
3 	.insns = {
4 	BPF_MOV64_IMM(BPF_REG_0, 0),
5 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
6 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7 		    offsetof(struct bpf_perf_event_data, sample_period)),
8 #else
9 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
10 		    offsetof(struct bpf_perf_event_data, sample_period) + 7),
11 #endif
12 	BPF_EXIT_INSN(),
13 	},
14 	.result = ACCEPT,
15 	.prog_type = BPF_PROG_TYPE_PERF_EVENT,
16 },
17 {
18 	"check bpf_perf_event_data->sample_period half load permitted",
19 	.insns = {
20 	BPF_MOV64_IMM(BPF_REG_0, 0),
21 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
22 	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
23 		    offsetof(struct bpf_perf_event_data, sample_period)),
24 #else
25 	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
26 		    offsetof(struct bpf_perf_event_data, sample_period) + 6),
27 #endif
28 	BPF_EXIT_INSN(),
29 	},
30 	.result = ACCEPT,
31 	.prog_type = BPF_PROG_TYPE_PERF_EVENT,
32 },
33 {
34 	"check bpf_perf_event_data->sample_period word load permitted",
35 	.insns = {
36 	BPF_MOV64_IMM(BPF_REG_0, 0),
37 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
38 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
39 		    offsetof(struct bpf_perf_event_data, sample_period)),
40 #else
41 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
42 		    offsetof(struct bpf_perf_event_data, sample_period) + 4),
43 #endif
44 	BPF_EXIT_INSN(),
45 	},
46 	.result = ACCEPT,
47 	.prog_type = BPF_PROG_TYPE_PERF_EVENT,
48 },
49 {
50 	"check bpf_perf_event_data->sample_period dword load permitted",
51 	.insns = {
52 	BPF_MOV64_IMM(BPF_REG_0, 0),
53 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
54 		    offsetof(struct bpf_perf_event_data, sample_period)),
55 	BPF_EXIT_INSN(),
56 	},
57 	.result = ACCEPT,
58 	.prog_type = BPF_PROG_TYPE_PERF_EVENT,
59 },
60