Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/shared/linux.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <stdlib.h>
3
#include <string.h>
4
#include <malloc.h>
5
#include <pthread.h>
6
#include <unistd.h>
7
#include <assert.h>
8
9
#include <linux/gfp.h>
10
#include <linux/poison.h>
11
#include <linux/slab.h>
12
#include <linux/radix-tree.h>
13
#include <urcu/uatomic.h>
14
15
int nr_allocated;
16
int preempt_count;
17
int test_verbose;
18
19
void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *))
20
{
21
cachep->callback = callback;
22
}
23
24
void kmem_cache_set_private(struct kmem_cache *cachep, void *private)
25
{
26
cachep->private = private;
27
}
28
29
void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
30
{
31
cachep->non_kernel = val;
32
}
33
34
unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
35
{
36
return cachep->size * cachep->nr_allocated;
37
}
38
39
unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
40
{
41
return cachep->nr_allocated;
42
}
43
44
unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
45
{
46
return cachep->nr_tallocated;
47
}
48
49
void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
50
{
51
cachep->nr_tallocated = 0;
52
}
53
54
void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
55
int gfp)
56
{
57
void *p;
58
59
if (cachep->exec_callback) {
60
if (cachep->callback)
61
cachep->callback(cachep->private);
62
cachep->exec_callback = false;
63
}
64
65
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
66
if (!cachep->non_kernel) {
67
if (cachep->callback)
68
cachep->exec_callback = true;
69
return NULL;
70
}
71
72
cachep->non_kernel--;
73
}
74
75
pthread_mutex_lock(&cachep->lock);
76
if (cachep->nr_objs) {
77
struct radix_tree_node *node = cachep->objs;
78
cachep->nr_objs--;
79
cachep->objs = node->parent;
80
pthread_mutex_unlock(&cachep->lock);
81
node->parent = NULL;
82
p = node;
83
} else {
84
pthread_mutex_unlock(&cachep->lock);
85
if (cachep->align) {
86
if (posix_memalign(&p, cachep->align, cachep->size) < 0)
87
return NULL;
88
} else {
89
p = malloc(cachep->size);
90
}
91
92
if (cachep->ctor)
93
cachep->ctor(p);
94
else if (gfp & __GFP_ZERO)
95
memset(p, 0, cachep->size);
96
}
97
98
uatomic_inc(&cachep->nr_allocated);
99
uatomic_inc(&nr_allocated);
100
uatomic_inc(&cachep->nr_tallocated);
101
if (kmalloc_verbose)
102
printf("Allocating %p from slab\n", p);
103
return p;
104
}
105
106
void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
107
{
108
assert(objp);
109
if (cachep->nr_objs > 10 || cachep->align) {
110
memset(objp, POISON_FREE, cachep->size);
111
free(objp);
112
} else {
113
struct radix_tree_node *node = objp;
114
cachep->nr_objs++;
115
node->parent = cachep->objs;
116
cachep->objs = node;
117
}
118
}
119
120
void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
121
{
122
uatomic_dec(&nr_allocated);
123
uatomic_dec(&cachep->nr_allocated);
124
if (kmalloc_verbose)
125
printf("Freeing %p to slab\n", objp);
126
__kmem_cache_free_locked(cachep, objp);
127
}
128
129
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
130
{
131
pthread_mutex_lock(&cachep->lock);
132
kmem_cache_free_locked(cachep, objp);
133
pthread_mutex_unlock(&cachep->lock);
134
}
135
136
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
137
{
138
if (kmalloc_verbose)
139
pr_debug("Bulk free %p[0-%zu]\n", list, size - 1);
140
141
if (cachep->exec_callback) {
142
if (cachep->callback)
143
cachep->callback(cachep->private);
144
cachep->exec_callback = false;
145
}
146
147
pthread_mutex_lock(&cachep->lock);
148
for (int i = 0; i < size; i++)
149
kmem_cache_free_locked(cachep, list[i]);
150
pthread_mutex_unlock(&cachep->lock);
151
}
152
153
void kmem_cache_shrink(struct kmem_cache *cachep)
154
{
155
}
156
157
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
158
void **p)
159
{
160
size_t i;
161
162
if (kmalloc_verbose)
163
pr_debug("Bulk alloc %zu\n", size);
164
165
pthread_mutex_lock(&cachep->lock);
166
if (cachep->nr_objs >= size) {
167
struct radix_tree_node *node;
168
169
for (i = 0; i < size; i++) {
170
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
171
if (!cachep->non_kernel)
172
break;
173
cachep->non_kernel--;
174
}
175
176
node = cachep->objs;
177
cachep->nr_objs--;
178
cachep->objs = node->parent;
179
p[i] = node;
180
node->parent = NULL;
181
}
182
pthread_mutex_unlock(&cachep->lock);
183
} else {
184
pthread_mutex_unlock(&cachep->lock);
185
for (i = 0; i < size; i++) {
186
if (!(gfp & __GFP_DIRECT_RECLAIM)) {
187
if (!cachep->non_kernel)
188
break;
189
cachep->non_kernel--;
190
}
191
192
if (cachep->align) {
193
if (posix_memalign(&p[i], cachep->align,
194
cachep->size) < 0)
195
break;
196
} else {
197
p[i] = malloc(cachep->size);
198
if (!p[i])
199
break;
200
}
201
if (cachep->ctor)
202
cachep->ctor(p[i]);
203
else if (gfp & __GFP_ZERO)
204
memset(p[i], 0, cachep->size);
205
}
206
}
207
208
if (i < size) {
209
size = i;
210
pthread_mutex_lock(&cachep->lock);
211
for (i = 0; i < size; i++)
212
__kmem_cache_free_locked(cachep, p[i]);
213
pthread_mutex_unlock(&cachep->lock);
214
if (cachep->callback)
215
cachep->exec_callback = true;
216
return 0;
217
}
218
219
for (i = 0; i < size; i++) {
220
uatomic_inc(&nr_allocated);
221
uatomic_inc(&cachep->nr_allocated);
222
uatomic_inc(&cachep->nr_tallocated);
223
if (kmalloc_verbose)
224
printf("Allocating %p from slab\n", p[i]);
225
}
226
227
return size;
228
}
229
230
struct kmem_cache *
231
__kmem_cache_create_args(const char *name, unsigned int size,
232
struct kmem_cache_args *args,
233
unsigned int flags)
234
{
235
struct kmem_cache *ret = malloc(sizeof(*ret));
236
237
pthread_mutex_init(&ret->lock, NULL);
238
ret->size = size;
239
ret->align = args->align;
240
ret->sheaf_capacity = args->sheaf_capacity;
241
ret->nr_objs = 0;
242
ret->nr_allocated = 0;
243
ret->nr_tallocated = 0;
244
ret->objs = NULL;
245
ret->ctor = args->ctor;
246
ret->non_kernel = 0;
247
ret->exec_callback = false;
248
ret->callback = NULL;
249
ret->private = NULL;
250
251
return ret;
252
}
253
254
struct slab_sheaf *
255
kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
256
{
257
struct slab_sheaf *sheaf;
258
unsigned int capacity;
259
260
if (s->exec_callback) {
261
if (s->callback)
262
s->callback(s->private);
263
s->exec_callback = false;
264
}
265
266
capacity = max(size, s->sheaf_capacity);
267
268
sheaf = calloc(1, sizeof(*sheaf) + sizeof(void *) * capacity);
269
if (!sheaf)
270
return NULL;
271
272
sheaf->cache = s;
273
sheaf->capacity = capacity;
274
sheaf->size = kmem_cache_alloc_bulk(s, gfp, size, sheaf->objects);
275
if (!sheaf->size) {
276
free(sheaf);
277
return NULL;
278
}
279
280
return sheaf;
281
}
282
283
int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
284
struct slab_sheaf **sheafp, unsigned int size)
285
{
286
struct slab_sheaf *sheaf = *sheafp;
287
int refill;
288
289
if (sheaf->size >= size)
290
return 0;
291
292
if (size > sheaf->capacity) {
293
sheaf = kmem_cache_prefill_sheaf(s, gfp, size);
294
if (!sheaf)
295
return -ENOMEM;
296
297
kmem_cache_return_sheaf(s, gfp, *sheafp);
298
*sheafp = sheaf;
299
return 0;
300
}
301
302
refill = kmem_cache_alloc_bulk(s, gfp, size - sheaf->size,
303
&sheaf->objects[sheaf->size]);
304
if (!refill)
305
return -ENOMEM;
306
307
sheaf->size += refill;
308
return 0;
309
}
310
311
void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
312
struct slab_sheaf *sheaf)
313
{
314
if (sheaf->size)
315
kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]);
316
317
free(sheaf);
318
}
319
320
void *
321
kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
322
struct slab_sheaf *sheaf)
323
{
324
void *obj;
325
326
if (sheaf->size == 0) {
327
printf("Nothing left in sheaf!\n");
328
return NULL;
329
}
330
331
obj = sheaf->objects[--sheaf->size];
332
sheaf->objects[sheaf->size] = NULL;
333
334
return obj;
335
}
336
337
/*
338
* Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
339
*/
340
void test_kmem_cache_bulk(void)
341
{
342
int i;
343
void *list[12];
344
static struct kmem_cache *test_cache, *test_cache2;
345
346
/*
347
* Testing the bulk allocators without aligned kmem_cache to force the
348
* bulk alloc/free to reuse
349
*/
350
test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
351
352
for (i = 0; i < 5; i++)
353
list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
354
355
for (i = 0; i < 5; i++)
356
kmem_cache_free(test_cache, list[i]);
357
assert(test_cache->nr_objs == 5);
358
359
kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
360
kmem_cache_free_bulk(test_cache, 5, list);
361
362
for (i = 0; i < 12 ; i++)
363
list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
364
365
for (i = 0; i < 12; i++)
366
kmem_cache_free(test_cache, list[i]);
367
368
/* The last free will not be kept around */
369
assert(test_cache->nr_objs == 11);
370
371
/* Aligned caches will immediately free */
372
test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
373
374
kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
375
kmem_cache_free_bulk(test_cache2, 10, list);
376
assert(!test_cache2->nr_objs);
377
378
379
}
380
381