Merge branches 'x86/cache', 'x86/debug' and 'x86/irq' into x86/urgent
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / tools / testing / selftests / bpf / test_lru_map.c
CommitLineData
5db58faf
MKL
1/*
2 * Copyright (c) 2016 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#define _GNU_SOURCE
9#include <stdio.h>
10#include <unistd.h>
11#include <errno.h>
12#include <string.h>
13#include <assert.h>
14#include <sched.h>
5db58faf
MKL
15#include <stdlib.h>
16#include <time.h>
e00c7b21
DB
17
18#include <sys/wait.h>
19#include <sys/resource.h>
20
5db58faf 21#include "bpf_sys.h"
e00c7b21 22#include "bpf_util.h"
5db58faf
MKL
23
24#define LOCAL_FREE_TARGET (128)
25#define PERCPU_FREE_TARGET (16)
26
27static int nr_cpus;
28
29static int create_map(int map_type, int map_flags, unsigned int size)
30{
31 int map_fd;
32
33 map_fd = bpf_map_create(map_type, sizeof(unsigned long long),
34 sizeof(unsigned long long), size, map_flags);
35
36 if (map_fd == -1)
37 perror("bpf_map_create");
38
39 return map_fd;
40}
41
42static int map_subset(int map0, int map1)
43{
44 unsigned long long next_key = 0;
45 unsigned long long value0[nr_cpus], value1[nr_cpus];
46 int ret;
47
48 while (!bpf_map_next_key(map1, &next_key, &next_key)) {
49 assert(!bpf_map_lookup(map1, &next_key, value1));
50 ret = bpf_map_lookup(map0, &next_key, value0);
51 if (ret) {
52 printf("key:%llu not found from map. %s(%d)\n",
53 next_key, strerror(errno), errno);
54 return 0;
55 }
56 if (value0[0] != value1[0]) {
57 printf("key:%llu value0:%llu != value1:%llu\n",
58 next_key, value0[0], value1[0]);
59 return 0;
60 }
61 }
62 return 1;
63}
64
65static int map_equal(int lru_map, int expected)
66{
67 return map_subset(lru_map, expected) && map_subset(expected, lru_map);
68}
69
3fbfadce 70static int sched_next_online(int pid, int *next_to_try)
5db58faf
MKL
71{
72 cpu_set_t cpuset;
3fbfadce
MKL
73 int next = *next_to_try;
74 int ret = -1;
5db58faf 75
3fbfadce 76 while (next < nr_cpus) {
5db58faf 77 CPU_ZERO(&cpuset);
3fbfadce
MKL
78 CPU_SET(next++, &cpuset);
79 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
80 ret = 0;
5db58faf 81 break;
3fbfadce 82 }
5db58faf
MKL
83 }
84
3fbfadce
MKL
85 *next_to_try = next;
86 return ret;
5db58faf
MKL
87}
88
89/* Size of the LRU amp is 2
90 * Add key=1 (+1 key)
91 * Add key=2 (+1 key)
92 * Lookup Key=1
93 * Add Key=3
94 * => Key=2 will be removed by LRU
95 * Iterate map. Only found key=1 and key=3
96 */
97static void test_lru_sanity0(int map_type, int map_flags)
98{
99 unsigned long long key, value[nr_cpus];
100 int lru_map_fd, expected_map_fd;
3fbfadce 101 int next_cpu = 0;
5db58faf
MKL
102
103 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
104 map_flags);
105
3fbfadce 106 assert(sched_next_online(0, &next_cpu) != -1);
5db58faf
MKL
107
108 if (map_flags & BPF_F_NO_COMMON_LRU)
109 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
110 else
111 lru_map_fd = create_map(map_type, map_flags, 2);
112 assert(lru_map_fd != -1);
113
114 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
115 assert(expected_map_fd != -1);
116
117 value[0] = 1234;
118
119 /* insert key=1 element */
120
121 key = 1;
122 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
123 assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST));
124
125 /* BPF_NOEXIST means: add new element if it doesn't exist */
126 assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST) == -1 &&
127 /* key=1 already exists */
128 errno == EEXIST);
129
130 assert(bpf_map_update(lru_map_fd, &key, value, -1) == -1 &&
131 errno == EINVAL);
132
133 /* insert key=2 element */
134
135 /* check that key=2 is not found */
136 key = 2;
137 assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 &&
138 errno == ENOENT);
139
140 /* BPF_EXIST means: update existing element */
141 assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
142 /* key=2 is not there */
143 errno == ENOENT);
144
145 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
146
147 /* insert key=3 element */
148
149 /* check that key=3 is not found */
150 key = 3;
151 assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 &&
152 errno == ENOENT);
153
154 /* check that key=1 can be found and mark the ref bit to
155 * stop LRU from removing key=1
156 */
157 key = 1;
158 assert(!bpf_map_lookup(lru_map_fd, &key, value));
159 assert(value[0] == 1234);
160
161 key = 3;
162 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
163 assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST));
164
165 /* key=2 has been removed from the LRU */
166 key = 2;
167 assert(bpf_map_lookup(lru_map_fd, &key, value) == -1);
168
169 assert(map_equal(lru_map_fd, expected_map_fd));
170
171 close(expected_map_fd);
172 close(lru_map_fd);
173
174 printf("Pass\n");
175}
176
177/* Size of the LRU map is 1.5*tgt_free
178 * Insert 1 to tgt_free (+tgt_free keys)
179 * Lookup 1 to tgt_free/2
180 * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
181 * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
182 */
183static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
184{
185 unsigned long long key, end_key, value[nr_cpus];
186 int lru_map_fd, expected_map_fd;
187 unsigned int batch_size;
188 unsigned int map_size;
3fbfadce 189 int next_cpu = 0;
5db58faf
MKL
190
191 if (map_flags & BPF_F_NO_COMMON_LRU)
192 /* Ther percpu lru list (i.e each cpu has its own LRU
193 * list) does not have a local free list. Hence,
194 * it will only free old nodes till there is no free
195 * from the LRU list. Hence, this test does not apply
196 * to BPF_F_NO_COMMON_LRU
197 */
198 return;
199
200 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
201 map_flags);
202
3fbfadce 203 assert(sched_next_online(0, &next_cpu) != -1);
5db58faf
MKL
204
205 batch_size = tgt_free / 2;
206 assert(batch_size * 2 == tgt_free);
207
208 map_size = tgt_free + batch_size;
209 lru_map_fd = create_map(map_type, map_flags, map_size);
210 assert(lru_map_fd != -1);
211
212 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
213 assert(expected_map_fd != -1);
214
215 value[0] = 1234;
216
217 /* Insert 1 to tgt_free (+tgt_free keys) */
218 end_key = 1 + tgt_free;
219 for (key = 1; key < end_key; key++)
220 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
221
222 /* Lookup 1 to tgt_free/2 */
223 end_key = 1 + batch_size;
224 for (key = 1; key < end_key; key++) {
225 assert(!bpf_map_lookup(lru_map_fd, &key, value));
226 assert(!bpf_map_update(expected_map_fd, &key, value,
227 BPF_NOEXIST));
228 }
229
230 /* Insert 1+tgt_free to 2*tgt_free
231 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
232 * removed by LRU
233 */
234 key = 1 + tgt_free;
235 end_key = key + tgt_free;
236 for (; key < end_key; key++) {
237 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
238 assert(!bpf_map_update(expected_map_fd, &key, value,
239 BPF_NOEXIST));
240 }
241
242 assert(map_equal(lru_map_fd, expected_map_fd));
243
244 close(expected_map_fd);
245 close(lru_map_fd);
246
247 printf("Pass\n");
248}
249
250/* Size of the LRU map 1.5 * tgt_free
251 * Insert 1 to tgt_free (+tgt_free keys)
252 * Update 1 to tgt_free/2
253 * => The original 1 to tgt_free/2 will be removed due to
254 * the LRU shrink process
255 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
256 * Insert 1+tgt_free to tgt_free*3/2
257 * Insert 1+tgt_free*3/2 to tgt_free*5/2
258 * => Key 1+tgt_free to tgt_free*3/2
259 * will be removed from LRU because it has never
260 * been lookup and ref bit is not set
261 */
262static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
263{
264 unsigned long long key, value[nr_cpus];
265 unsigned long long end_key;
266 int lru_map_fd, expected_map_fd;
267 unsigned int batch_size;
268 unsigned int map_size;
3fbfadce 269 int next_cpu = 0;
5db58faf
MKL
270
271 if (map_flags & BPF_F_NO_COMMON_LRU)
272 /* Ther percpu lru list (i.e each cpu has its own LRU
273 * list) does not have a local free list. Hence,
274 * it will only free old nodes till there is no free
275 * from the LRU list. Hence, this test does not apply
276 * to BPF_F_NO_COMMON_LRU
277 */
278 return;
279
280 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
281 map_flags);
282
3fbfadce 283 assert(sched_next_online(0, &next_cpu) != -1);
5db58faf
MKL
284
285 batch_size = tgt_free / 2;
286 assert(batch_size * 2 == tgt_free);
287
288 map_size = tgt_free + batch_size;
289 if (map_flags & BPF_F_NO_COMMON_LRU)
290 lru_map_fd = create_map(map_type, map_flags,
291 map_size * nr_cpus);
292 else
293 lru_map_fd = create_map(map_type, map_flags, map_size);
294 assert(lru_map_fd != -1);
295
296 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
297 assert(expected_map_fd != -1);
298
299 value[0] = 1234;
300
301 /* Insert 1 to tgt_free (+tgt_free keys) */
302 end_key = 1 + tgt_free;
303 for (key = 1; key < end_key; key++)
304 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
305
306 /* Any bpf_map_update will require to acquire a new node
307 * from LRU first.
308 *
309 * The local list is running out of free nodes.
310 * It gets from the global LRU list which tries to
311 * shrink the inactive list to get tgt_free
312 * number of free nodes.
313 *
314 * Hence, the oldest key 1 to tgt_free/2
315 * are removed from the LRU list.
316 */
317 key = 1;
318 if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
319 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
320 assert(!bpf_map_delete(lru_map_fd, &key));
321 } else {
322 assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST));
323 }
324
325 /* Re-insert 1 to tgt_free/2 again and do a lookup
326 * immeidately.
327 */
328 end_key = 1 + batch_size;
329 value[0] = 4321;
330 for (key = 1; key < end_key; key++) {
331 assert(bpf_map_lookup(lru_map_fd, &key, value));
332 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
333 assert(!bpf_map_lookup(lru_map_fd, &key, value));
334 assert(value[0] == 4321);
335 assert(!bpf_map_update(expected_map_fd, &key, value,
336 BPF_NOEXIST));
337 }
338
339 value[0] = 1234;
340
341 /* Insert 1+tgt_free to tgt_free*3/2 */
342 end_key = 1 + tgt_free + batch_size;
343 for (key = 1 + tgt_free; key < end_key; key++)
344 /* These newly added but not referenced keys will be
345 * gone during the next LRU shrink.
346 */
347 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
348
349 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
350 end_key = key + tgt_free;
351 for (; key < end_key; key++) {
352 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
353 assert(!bpf_map_update(expected_map_fd, &key, value,
354 BPF_NOEXIST));
355 }
356
357 assert(map_equal(lru_map_fd, expected_map_fd));
358
359 close(expected_map_fd);
360 close(lru_map_fd);
361
362 printf("Pass\n");
363}
364
365/* Size of the LRU map is 2*tgt_free
366 * It is to test the active/inactive list rotation
367 * Insert 1 to 2*tgt_free (+2*tgt_free keys)
368 * Lookup key 1 to tgt_free*3/2
369 * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
370 * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
371 */
372static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
373{
374 unsigned long long key, end_key, value[nr_cpus];
375 int lru_map_fd, expected_map_fd;
376 unsigned int batch_size;
377 unsigned int map_size;
3fbfadce 378 int next_cpu = 0;
5db58faf
MKL
379
380 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
381 map_flags);
382
3fbfadce 383 assert(sched_next_online(0, &next_cpu) != -1);
5db58faf
MKL
384
385 batch_size = tgt_free / 2;
386 assert(batch_size * 2 == tgt_free);
387
388 map_size = tgt_free * 2;
389 if (map_flags & BPF_F_NO_COMMON_LRU)
390 lru_map_fd = create_map(map_type, map_flags,
391 map_size * nr_cpus);
392 else
393 lru_map_fd = create_map(map_type, map_flags, map_size);
394 assert(lru_map_fd != -1);
395
396 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
397 assert(expected_map_fd != -1);
398
399 value[0] = 1234;
400
401 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
402 end_key = 1 + (2 * tgt_free);
403 for (key = 1; key < end_key; key++)
404 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
405
406 /* Lookup key 1 to tgt_free*3/2 */
407 end_key = tgt_free + batch_size;
408 for (key = 1; key < end_key; key++) {
409 assert(!bpf_map_lookup(lru_map_fd, &key, value));
410 assert(!bpf_map_update(expected_map_fd, &key, value,
411 BPF_NOEXIST));
412 }
413
414 /* Add 1+2*tgt_free to tgt_free*5/2
415 * (+tgt_free/2 keys)
416 */
417 key = 2 * tgt_free + 1;
418 end_key = key + batch_size;
419 for (; key < end_key; key++) {
420 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
421 assert(!bpf_map_update(expected_map_fd, &key, value,
422 BPF_NOEXIST));
423 }
424
425 assert(map_equal(lru_map_fd, expected_map_fd));
426
427 close(expected_map_fd);
428 close(lru_map_fd);
429
430 printf("Pass\n");
431}
432
433/* Test deletion */
434static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
435{
436 int lru_map_fd, expected_map_fd;
437 unsigned long long key, value[nr_cpus];
438 unsigned long long end_key;
3fbfadce 439 int next_cpu = 0;
5db58faf
MKL
440
441 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
442 map_flags);
443
3fbfadce 444 assert(sched_next_online(0, &next_cpu) != -1);
5db58faf
MKL
445
446 if (map_flags & BPF_F_NO_COMMON_LRU)
447 lru_map_fd = create_map(map_type, map_flags,
448 3 * tgt_free * nr_cpus);
449 else
450 lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
451 assert(lru_map_fd != -1);
452
453 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
454 3 * tgt_free);
455 assert(expected_map_fd != -1);
456
457 value[0] = 1234;
458
459 for (key = 1; key <= 2 * tgt_free; key++)
460 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
461
462 key = 1;
463 assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
464
465 for (key = 1; key <= tgt_free; key++) {
466 assert(!bpf_map_lookup(lru_map_fd, &key, value));
467 assert(!bpf_map_update(expected_map_fd, &key, value,
468 BPF_NOEXIST));
469 }
470
471 for (; key <= 2 * tgt_free; key++) {
472 assert(!bpf_map_delete(lru_map_fd, &key));
473 assert(bpf_map_delete(lru_map_fd, &key));
474 }
475
476 end_key = key + 2 * tgt_free;
477 for (; key < end_key; key++) {
478 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
479 assert(!bpf_map_update(expected_map_fd, &key, value,
480 BPF_NOEXIST));
481 }
482
483 assert(map_equal(lru_map_fd, expected_map_fd));
484
485 close(expected_map_fd);
486 close(lru_map_fd);
487
488 printf("Pass\n");
489}
490
491static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
492{
493 unsigned long long key, value[nr_cpus];
494
495 /* Ensure the last key inserted by previous CPU can be found */
496 assert(!bpf_map_lookup(map_fd, &last_key, value));
497
498 value[0] = 1234;
499
500 key = last_key + 1;
501 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
502 assert(!bpf_map_lookup(map_fd, &key, value));
503
504 /* Cannot find the last key because it was removed by LRU */
505 assert(bpf_map_lookup(map_fd, &last_key, value));
506}
507
508/* Test map with only one element */
509static void test_lru_sanity5(int map_type, int map_flags)
510{
511 unsigned long long key, value[nr_cpus];
3fbfadce 512 int next_cpu = 0;
5db58faf 513 int map_fd;
5db58faf
MKL
514
515 if (map_flags & BPF_F_NO_COMMON_LRU)
516 return;
517
518 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
519 map_flags);
520
521 map_fd = create_map(map_type, map_flags, 1);
522 assert(map_fd != -1);
523
524 value[0] = 1234;
525 key = 0;
526 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
527
3fbfadce 528 while (sched_next_online(0, &next_cpu) != -1) {
5db58faf
MKL
529 pid_t pid;
530
531 pid = fork();
532 if (pid == 0) {
3fbfadce 533 do_test_lru_sanity5(key, map_fd);
5db58faf
MKL
534 exit(0);
535 } else if (pid == -1) {
3fbfadce
MKL
536 printf("couldn't spawn process to test key:%llu\n",
537 key);
5db58faf
MKL
538 exit(1);
539 } else {
540 int status;
541
5db58faf
MKL
542 assert(waitpid(pid, &status, 0) == pid);
543 assert(status == 0);
544 key++;
545 }
546 }
547
548 close(map_fd);
3fbfadce
MKL
549 /* At least one key should be tested */
550 assert(key > 0);
5db58faf
MKL
551
552 printf("Pass\n");
553}
554
555int main(int argc, char **argv)
556{
557 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
558 int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
559 BPF_MAP_TYPE_LRU_PERCPU_HASH};
560 int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
561 int t, f;
562
563 setbuf(stdout, NULL);
564
565 assert(!setrlimit(RLIMIT_MEMLOCK, &r));
566
e00c7b21 567 nr_cpus = bpf_num_possible_cpus();
5db58faf
MKL
568 assert(nr_cpus != -1);
569 printf("nr_cpus:%d\n\n", nr_cpus);
570
571 for (f = 0; f < sizeof(map_flags) / sizeof(*map_flags); f++) {
572 unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
573 PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
574
575 for (t = 0; t < sizeof(map_types) / sizeof(*map_types); t++) {
576 test_lru_sanity0(map_types[t], map_flags[f]);
577 test_lru_sanity1(map_types[t], map_flags[f], tgt_free);
578 test_lru_sanity2(map_types[t], map_flags[f], tgt_free);
579 test_lru_sanity3(map_types[t], map_flags[f], tgt_free);
580 test_lru_sanity4(map_types[t], map_flags[f], tgt_free);
581 test_lru_sanity5(map_types[t], map_flags[f]);
582
583 printf("\n");
584 }
585 }
586
587 return 0;
588}