Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/events/internal.h
29265 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _KERNEL_EVENTS_INTERNAL_H
3
#define _KERNEL_EVENTS_INTERNAL_H
4
5
#include <linux/hardirq.h>
6
#include <linux/uaccess.h>
7
#include <linux/refcount.h>
8
9
/* Buffer handling */
10
11
#define RING_BUFFER_WRITABLE 0x01
12
13
struct perf_buffer {
14
refcount_t refcount;
15
struct rcu_head rcu_head;
16
#ifdef CONFIG_PERF_USE_VMALLOC
17
struct work_struct work;
18
int page_order; /* allocation order */
19
#endif
20
int nr_pages; /* nr of data pages */
21
int overwrite; /* can overwrite itself */
22
int paused; /* can write into ring buffer */
23
24
atomic_t poll; /* POLL_ for wakeups */
25
26
local_t head; /* write position */
27
unsigned int nest; /* nested writers */
28
local_t events; /* event limit */
29
local_t wakeup; /* wakeup stamp */
30
local_t lost; /* nr records lost */
31
32
long watermark; /* wakeup watermark */
33
long aux_watermark;
34
/* poll crap */
35
spinlock_t event_lock;
36
struct list_head event_list;
37
38
refcount_t mmap_count;
39
unsigned long mmap_locked;
40
struct user_struct *mmap_user;
41
42
/* AUX area */
43
struct mutex aux_mutex;
44
long aux_head;
45
unsigned int aux_nest;
46
long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
47
unsigned long aux_pgoff;
48
int aux_nr_pages;
49
int aux_overwrite;
50
refcount_t aux_mmap_count;
51
unsigned long aux_mmap_locked;
52
void (*free_aux)(void *);
53
refcount_t aux_refcount;
54
int aux_in_sampling;
55
int aux_in_pause_resume;
56
void **aux_pages;
57
void *aux_priv;
58
59
struct perf_event_mmap_page *user_page;
60
void *data_pages[];
61
};
62
63
extern void rb_free(struct perf_buffer *rb);
64
65
static inline void rb_free_rcu(struct rcu_head *rcu_head)
66
{
67
struct perf_buffer *rb;
68
69
rb = container_of(rcu_head, struct perf_buffer, rcu_head);
70
rb_free(rb);
71
}
72
73
static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
74
{
75
if (!pause && rb->nr_pages)
76
rb->paused = 0;
77
else
78
rb->paused = 1;
79
}
80
81
extern struct perf_buffer *
82
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
83
extern void perf_event_wakeup(struct perf_event *event);
84
extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
85
pgoff_t pgoff, int nr_pages, long watermark, int flags);
86
extern void rb_free_aux(struct perf_buffer *rb);
87
extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
88
extern void ring_buffer_put(struct perf_buffer *rb);
89
90
static inline bool rb_has_aux(struct perf_buffer *rb)
91
{
92
return !!rb->aux_nr_pages;
93
}
94
95
void perf_event_aux_event(struct perf_event *event, unsigned long head,
96
unsigned long size, u64 flags);
97
98
extern struct page *
99
perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
100
101
#ifdef CONFIG_PERF_USE_VMALLOC
102
/*
103
* Back perf_mmap() with vmalloc memory.
104
*
105
* Required for architectures that have d-cache aliasing issues.
106
*/
107
108
static inline int page_order(struct perf_buffer *rb)
109
{
110
return rb->page_order;
111
}
112
113
#else
114
115
static inline int page_order(struct perf_buffer *rb)
116
{
117
return 0;
118
}
119
#endif
120
121
static inline int data_page_nr(struct perf_buffer *rb)
122
{
123
return rb->nr_pages << page_order(rb);
124
}
125
126
static inline unsigned long perf_data_size(struct perf_buffer *rb)
127
{
128
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
129
}
130
131
static inline unsigned long perf_aux_size(struct perf_buffer *rb)
132
{
133
return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
134
}
135
136
#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
137
{ \
138
unsigned long size, written; \
139
\
140
do { \
141
size = min(handle->size, len); \
142
written = memcpy_func(__VA_ARGS__); \
143
written = size - written; \
144
\
145
len -= written; \
146
handle->addr += written; \
147
if (advance_buf) \
148
buf += written; \
149
handle->size -= written; \
150
if (!handle->size) { \
151
struct perf_buffer *rb = handle->rb; \
152
\
153
handle->page++; \
154
handle->page &= rb->nr_pages - 1; \
155
handle->addr = rb->data_pages[handle->page]; \
156
handle->size = PAGE_SIZE << page_order(rb); \
157
} \
158
} while (len && written == size); \
159
\
160
return len; \
161
}
162
163
#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
164
static inline unsigned long \
165
func_name(struct perf_output_handle *handle, \
166
const void *buf, unsigned long len) \
167
__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
168
169
static inline unsigned long
170
__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
171
const void *buf, unsigned long len)
172
{
173
unsigned long orig_len = len;
174
__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
175
orig_len - len, size)
176
}
177
178
static inline unsigned long
179
memcpy_common(void *dst, const void *src, unsigned long n)
180
{
181
memcpy(dst, src, n);
182
return 0;
183
}
184
185
DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
186
187
static inline unsigned long
188
memcpy_skip(void *dst, const void *src, unsigned long n)
189
{
190
return 0;
191
}
192
193
DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
194
195
#ifndef arch_perf_out_copy_user
196
#define arch_perf_out_copy_user arch_perf_out_copy_user
197
198
static inline unsigned long
199
arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
200
{
201
unsigned long ret;
202
203
pagefault_disable();
204
ret = __copy_from_user_inatomic(dst, src, n);
205
pagefault_enable();
206
207
return ret;
208
}
209
#endif
210
211
DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
212
213
static inline int get_recursion_context(u8 *recursion)
214
{
215
unsigned char rctx = interrupt_context_level();
216
217
if (recursion[rctx])
218
return -1;
219
220
recursion[rctx]++;
221
barrier();
222
223
return rctx;
224
}
225
226
static inline void put_recursion_context(u8 *recursion, unsigned char rctx)
227
{
228
barrier();
229
recursion[rctx]--;
230
}
231
232
#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
233
static inline bool arch_perf_have_user_stack_dump(void)
234
{
235
return true;
236
}
237
238
#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
239
#else
240
static inline bool arch_perf_have_user_stack_dump(void)
241
{
242
return false;
243
}
244
245
#define perf_user_stack_pointer(regs) 0
246
#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
247
248
#endif /* _KERNEL_EVENTS_INTERNAL_H */
249
250