1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _KERNEL_EVENTS_INTERNAL_H
3 #define _KERNEL_EVENTS_INTERNAL_H
4
5 #include <linux/hardirq.h>
6 #include <linux/uaccess.h>
7 #include <linux/refcount.h>
8
9 /* Buffer handling */
10
11 #define RING_BUFFER_WRITABLE 0x01
12
13 struct perf_buffer {
14 refcount_t refcount;
15 struct rcu_head rcu_head;
16 #ifdef CONFIG_PERF_USE_VMALLOC
17 struct work_struct work;
18 int page_order; /* allocation order */
19 #endif
20 int nr_pages; /* nr of data pages */
21 int overwrite; /* can overwrite itself */
22 int paused; /* can write into ring buffer */
23
24 atomic_t poll; /* POLL_ for wakeups */
25
26 local_t head; /* write position */
27 unsigned int nest; /* nested writers */
28 local_t events; /* event limit */
29 local_t wakeup; /* wakeup stamp */
30 local_t lost; /* nr records lost */
31
32 long watermark; /* wakeup watermark */
33 long aux_watermark;
34 /* poll crap */
35 spinlock_t event_lock;
36 struct list_head event_list;
37
38 atomic_t mmap_count;
39 unsigned long mmap_locked;
40 struct user_struct *mmap_user;
41
42 /* AUX area */
43 struct mutex aux_mutex;
44 long aux_head;
45 unsigned int aux_nest;
46 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
47 unsigned long aux_pgoff;
48 int aux_nr_pages;
49 int aux_overwrite;
50 atomic_t aux_mmap_count;
51 unsigned long aux_mmap_locked;
52 void (*free_aux)(void *);
53 refcount_t aux_refcount;
54 int aux_in_sampling;
55 void **aux_pages;
56 void *aux_priv;
57
58 struct perf_event_mmap_page *user_page;
59 void *data_pages[];
60 };
61
62 extern void rb_free(struct perf_buffer *rb);
63
rb_free_rcu(struct rcu_head * rcu_head)64 static inline void rb_free_rcu(struct rcu_head *rcu_head)
65 {
66 struct perf_buffer *rb;
67
68 rb = container_of(rcu_head, struct perf_buffer, rcu_head);
69 rb_free(rb);
70 }
71
rb_toggle_paused(struct perf_buffer * rb,bool pause)72 static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
73 {
74 if (!pause && rb->nr_pages)
75 rb->paused = 0;
76 else
77 rb->paused = 1;
78 }
79
80 extern struct perf_buffer *
81 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
82 extern void perf_event_wakeup(struct perf_event *event);
83 extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
84 pgoff_t pgoff, int nr_pages, long watermark, int flags);
85 extern void rb_free_aux(struct perf_buffer *rb);
86 extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
87 extern void ring_buffer_put(struct perf_buffer *rb);
88
rb_has_aux(struct perf_buffer * rb)89 static inline bool rb_has_aux(struct perf_buffer *rb)
90 {
91 return !!rb->aux_nr_pages;
92 }
93
94 void perf_event_aux_event(struct perf_event *event, unsigned long head,
95 unsigned long size, u64 flags);
96
97 extern struct page *
98 perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
99
100 #ifdef CONFIG_PERF_USE_VMALLOC
101 /*
102 * Back perf_mmap() with vmalloc memory.
103 *
104 * Required for architectures that have d-cache aliasing issues.
105 */
106
page_order(struct perf_buffer * rb)107 static inline int page_order(struct perf_buffer *rb)
108 {
109 return rb->page_order;
110 }
111
112 #else
113
page_order(struct perf_buffer * rb)114 static inline int page_order(struct perf_buffer *rb)
115 {
116 return 0;
117 }
118 #endif
119
data_page_nr(struct perf_buffer * rb)120 static inline int data_page_nr(struct perf_buffer *rb)
121 {
122 return rb->nr_pages << page_order(rb);
123 }
124
perf_data_size(struct perf_buffer * rb)125 static inline unsigned long perf_data_size(struct perf_buffer *rb)
126 {
127 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
128 }
129
perf_aux_size(struct perf_buffer * rb)130 static inline unsigned long perf_aux_size(struct perf_buffer *rb)
131 {
132 return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
133 }
134
135 #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
136 { \
137 unsigned long size, written; \
138 \
139 do { \
140 size = min(handle->size, len); \
141 written = memcpy_func(__VA_ARGS__); \
142 written = size - written; \
143 \
144 len -= written; \
145 handle->addr += written; \
146 if (advance_buf) \
147 buf += written; \
148 handle->size -= written; \
149 if (!handle->size) { \
150 struct perf_buffer *rb = handle->rb; \
151 \
152 handle->page++; \
153 handle->page &= rb->nr_pages - 1; \
154 handle->addr = rb->data_pages[handle->page]; \
155 handle->size = PAGE_SIZE << page_order(rb); \
156 } \
157 } while (len && written == size); \
158 \
159 return len; \
160 }
161
162 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
163 static inline unsigned long \
164 func_name(struct perf_output_handle *handle, \
165 const void *buf, unsigned long len) \
166 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
167
168 static inline unsigned long
__output_custom(struct perf_output_handle * handle,perf_copy_f copy_func,const void * buf,unsigned long len)169 __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
170 const void *buf, unsigned long len)
171 {
172 unsigned long orig_len = len;
173 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
174 orig_len - len, size)
175 }
176
177 static inline unsigned long
memcpy_common(void * dst,const void * src,unsigned long n)178 memcpy_common(void *dst, const void *src, unsigned long n)
179 {
180 memcpy(dst, src, n);
181 return 0;
182 }
183
DEFINE_OUTPUT_COPY(__output_copy,memcpy_common)184 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
185
186 static inline unsigned long
187 memcpy_skip(void *dst, const void *src, unsigned long n)
188 {
189 return 0;
190 }
191
DEFINE_OUTPUT_COPY(__output_skip,memcpy_skip)192 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
193
194 #ifndef arch_perf_out_copy_user
195 #define arch_perf_out_copy_user arch_perf_out_copy_user
196
197 static inline unsigned long
198 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
199 {
200 unsigned long ret;
201
202 pagefault_disable();
203 ret = __copy_from_user_inatomic(dst, src, n);
204 pagefault_enable();
205
206 return ret;
207 }
208 #endif
209
DEFINE_OUTPUT_COPY(__output_copy_user,arch_perf_out_copy_user)210 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
211
212 static inline int get_recursion_context(u8 *recursion)
213 {
214 unsigned char rctx = interrupt_context_level();
215
216 if (recursion[rctx])
217 return -1;
218
219 recursion[rctx]++;
220 barrier();
221
222 return rctx;
223 }
224
put_recursion_context(u8 * recursion,unsigned char rctx)225 static inline void put_recursion_context(u8 *recursion, unsigned char rctx)
226 {
227 barrier();
228 recursion[rctx]--;
229 }
230
231 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
arch_perf_have_user_stack_dump(void)232 static inline bool arch_perf_have_user_stack_dump(void)
233 {
234 return true;
235 }
236
237 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
238 #else
arch_perf_have_user_stack_dump(void)239 static inline bool arch_perf_have_user_stack_dump(void)
240 {
241 return false;
242 }
243
244 #define perf_user_stack_pointer(regs) 0
245 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
246
247 #endif /* _KERNEL_EVENTS_INTERNAL_H */
248