asoc: abox: check abox power domain status before resuming
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / kernel / jump_label.c
1 /*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra
6 *
7 */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 #include <linux/bug.h>
18 #include <linux/cpu.h>
19
20 #ifdef HAVE_JUMP_LABEL
21
22 /* mutex to protect coming/going of the the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
24
25 void jump_label_lock(void)
26 {
27 mutex_lock(&jump_label_mutex);
28 }
29
30 void jump_label_unlock(void)
31 {
32 mutex_unlock(&jump_label_mutex);
33 }
34
35 static int jump_label_cmp(const void *a, const void *b)
36 {
37 const struct jump_entry *jea = a;
38 const struct jump_entry *jeb = b;
39
40 if (jea->key < jeb->key)
41 return -1;
42
43 if (jea->key > jeb->key)
44 return 1;
45
46 return 0;
47 }
48
49 static void
50 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
51 {
52 unsigned long size;
53
54 size = (((unsigned long)stop - (unsigned long)start)
55 / sizeof(struct jump_entry));
56 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57 }
58
59 static void jump_label_update(struct static_key *key);
60
61 /*
62 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
63 * The use of 'atomic_read()' requires atomic.h and its problematic for some
64 * kernel headers such as kernel.h and others. Since static_key_count() is not
65 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
66 * to have it be a function here. Similarly, for 'static_key_enable()' and
67 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
68 * to be included from most/all places for HAVE_JUMP_LABEL.
69 */
70 int static_key_count(struct static_key *key)
71 {
72 /*
73 * -1 means the first static_key_slow_inc() is in progress.
74 * static_key_enabled() must return true, so return 1 here.
75 */
76 int n = atomic_read(&key->enabled);
77
78 return n >= 0 ? n : 1;
79 }
80 EXPORT_SYMBOL_GPL(static_key_count);
81
82 void static_key_slow_inc_cpuslocked(struct static_key *key)
83 {
84 int v, v1;
85
86 STATIC_KEY_CHECK_USE();
87
88 /*
89 * Careful if we get concurrent static_key_slow_inc() calls;
90 * later calls must wait for the first one to _finish_ the
91 * jump_label_update() process. At the same time, however,
92 * the jump_label_update() call below wants to see
93 * static_key_enabled(&key) for jumps to be updated properly.
94 *
95 * So give a special meaning to negative key->enabled: it sends
96 * static_key_slow_inc() down the slow path, and it is non-zero
97 * so it counts as "enabled" in jump_label_update(). Note that
98 * atomic_inc_unless_negative() checks >= 0, so roll our own.
99 */
100 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
101 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
102 if (likely(v1 == v))
103 return;
104 }
105
106 jump_label_lock();
107 if (atomic_read(&key->enabled) == 0) {
108 atomic_set(&key->enabled, -1);
109 jump_label_update(key);
110 /*
111 * Ensure that if the above cmpxchg loop observes our positive
112 * value, it must also observe all the text changes.
113 */
114 atomic_set_release(&key->enabled, 1);
115 } else {
116 atomic_inc(&key->enabled);
117 }
118 jump_label_unlock();
119 }
120
121 void static_key_slow_inc(struct static_key *key)
122 {
123 cpus_read_lock();
124 static_key_slow_inc_cpuslocked(key);
125 cpus_read_unlock();
126 }
127 EXPORT_SYMBOL_GPL(static_key_slow_inc);
128
129 void static_key_enable_cpuslocked(struct static_key *key)
130 {
131 STATIC_KEY_CHECK_USE();
132
133 if (atomic_read(&key->enabled) > 0) {
134 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
135 return;
136 }
137
138 jump_label_lock();
139 if (atomic_read(&key->enabled) == 0) {
140 atomic_set(&key->enabled, -1);
141 jump_label_update(key);
142 /*
143 * See static_key_slow_inc().
144 */
145 atomic_set_release(&key->enabled, 1);
146 }
147 jump_label_unlock();
148 }
149 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
150
151 void static_key_enable(struct static_key *key)
152 {
153 cpus_read_lock();
154 static_key_enable_cpuslocked(key);
155 cpus_read_unlock();
156 }
157 EXPORT_SYMBOL_GPL(static_key_enable);
158
159 void static_key_disable_cpuslocked(struct static_key *key)
160 {
161 STATIC_KEY_CHECK_USE();
162
163 if (atomic_read(&key->enabled) != 1) {
164 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
165 return;
166 }
167
168 jump_label_lock();
169 if (atomic_cmpxchg(&key->enabled, 1, 0))
170 jump_label_update(key);
171 jump_label_unlock();
172 }
173 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
174
175 void static_key_disable(struct static_key *key)
176 {
177 cpus_read_lock();
178 static_key_disable_cpuslocked(key);
179 cpus_read_unlock();
180 }
181 EXPORT_SYMBOL_GPL(static_key_disable);
182
183 static void __static_key_slow_dec_cpuslocked(struct static_key *key,
184 unsigned long rate_limit,
185 struct delayed_work *work)
186 {
187 /*
188 * The negative count check is valid even when a negative
189 * key->enabled is in use by static_key_slow_inc(); a
190 * __static_key_slow_dec() before the first static_key_slow_inc()
191 * returns is unbalanced, because all other static_key_slow_inc()
192 * instances block while the update is in progress.
193 */
194 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
195 WARN(atomic_read(&key->enabled) < 0,
196 "jump label: negative count!\n");
197 return;
198 }
199
200 if (rate_limit) {
201 atomic_inc(&key->enabled);
202 schedule_delayed_work(work, rate_limit);
203 } else {
204 jump_label_update(key);
205 }
206 jump_label_unlock();
207 }
208
209 static void __static_key_slow_dec(struct static_key *key,
210 unsigned long rate_limit,
211 struct delayed_work *work)
212 {
213 cpus_read_lock();
214 __static_key_slow_dec_cpuslocked(key, rate_limit, work);
215 cpus_read_unlock();
216 }
217
218 static void jump_label_update_timeout(struct work_struct *work)
219 {
220 struct static_key_deferred *key =
221 container_of(work, struct static_key_deferred, work.work);
222 __static_key_slow_dec(&key->key, 0, NULL);
223 }
224
225 void static_key_slow_dec(struct static_key *key)
226 {
227 STATIC_KEY_CHECK_USE();
228 __static_key_slow_dec(key, 0, NULL);
229 }
230 EXPORT_SYMBOL_GPL(static_key_slow_dec);
231
232 void static_key_slow_dec_cpuslocked(struct static_key *key)
233 {
234 STATIC_KEY_CHECK_USE();
235 __static_key_slow_dec_cpuslocked(key, 0, NULL);
236 }
237
238 void static_key_slow_dec_deferred(struct static_key_deferred *key)
239 {
240 STATIC_KEY_CHECK_USE();
241 __static_key_slow_dec(&key->key, key->timeout, &key->work);
242 }
243 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
244
245 void static_key_deferred_flush(struct static_key_deferred *key)
246 {
247 STATIC_KEY_CHECK_USE();
248 flush_delayed_work(&key->work);
249 }
250 EXPORT_SYMBOL_GPL(static_key_deferred_flush);
251
252 void jump_label_rate_limit(struct static_key_deferred *key,
253 unsigned long rl)
254 {
255 STATIC_KEY_CHECK_USE();
256 key->timeout = rl;
257 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
258 }
259 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
260
261 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
262 {
263 if (entry->code <= (unsigned long)end &&
264 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
265 return 1;
266
267 return 0;
268 }
269
270 static int __jump_label_text_reserved(struct jump_entry *iter_start,
271 struct jump_entry *iter_stop, void *start, void *end)
272 {
273 struct jump_entry *iter;
274
275 iter = iter_start;
276 while (iter < iter_stop) {
277 if (addr_conflict(iter, start, end))
278 return 1;
279 iter++;
280 }
281
282 return 0;
283 }
284
285 /*
286 * Update code which is definitely not currently executing.
287 * Architectures which need heavyweight synchronization to modify
288 * running code can override this to make the non-live update case
289 * cheaper.
290 */
291 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
292 enum jump_label_type type)
293 {
294 arch_jump_label_transform(entry, type);
295 }
296
297 static inline struct jump_entry *static_key_entries(struct static_key *key)
298 {
299 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
300 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
301 }
302
303 static inline bool static_key_type(struct static_key *key)
304 {
305 return key->type & JUMP_TYPE_TRUE;
306 }
307
308 static inline bool static_key_linked(struct static_key *key)
309 {
310 return key->type & JUMP_TYPE_LINKED;
311 }
312
313 static inline void static_key_clear_linked(struct static_key *key)
314 {
315 key->type &= ~JUMP_TYPE_LINKED;
316 }
317
318 static inline void static_key_set_linked(struct static_key *key)
319 {
320 key->type |= JUMP_TYPE_LINKED;
321 }
322
323 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
324 {
325 return (struct static_key *)((unsigned long)entry->key & ~1UL);
326 }
327
328 static bool jump_entry_branch(struct jump_entry *entry)
329 {
330 return (unsigned long)entry->key & 1UL;
331 }
332
333 /***
334 * A 'struct static_key' uses a union such that it either points directly
335 * to a table of 'struct jump_entry' or to a linked list of modules which in
336 * turn point to 'struct jump_entry' tables.
337 *
338 * The two lower bits of the pointer are used to keep track of which pointer
339 * type is in use and to store the initial branch direction, we use an access
340 * function which preserves these bits.
341 */
342 static void static_key_set_entries(struct static_key *key,
343 struct jump_entry *entries)
344 {
345 unsigned long type;
346
347 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
348 type = key->type & JUMP_TYPE_MASK;
349 key->entries = entries;
350 key->type |= type;
351 }
352
353 static enum jump_label_type jump_label_type(struct jump_entry *entry)
354 {
355 struct static_key *key = jump_entry_key(entry);
356 bool enabled = static_key_enabled(key);
357 bool branch = jump_entry_branch(entry);
358
359 /* See the comment in linux/jump_label.h */
360 return enabled ^ branch;
361 }
362
363 static void __jump_label_update(struct static_key *key,
364 struct jump_entry *entry,
365 struct jump_entry *stop)
366 {
367 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
368 /*
369 * entry->code set to 0 invalidates module init text sections
370 * kernel_text_address() verifies we are not in core kernel
371 * init code, see jump_label_invalidate_module_init().
372 */
373 if (entry->code && kernel_text_address(entry->code))
374 arch_jump_label_transform(entry, jump_label_type(entry));
375 }
376 }
377
378 void __init jump_label_init(void)
379 {
380 struct jump_entry *iter_start = __start___jump_table;
381 struct jump_entry *iter_stop = __stop___jump_table;
382 struct static_key *key = NULL;
383 struct jump_entry *iter;
384
385 /*
386 * Since we are initializing the static_key.enabled field with
387 * with the 'raw' int values (to avoid pulling in atomic.h) in
388 * jump_label.h, let's make sure that is safe. There are only two
389 * cases to check since we initialize to 0 or 1.
390 */
391 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
392 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
393
394 if (static_key_initialized)
395 return;
396
397 cpus_read_lock();
398 jump_label_lock();
399 jump_label_sort_entries(iter_start, iter_stop);
400
401 for (iter = iter_start; iter < iter_stop; iter++) {
402 struct static_key *iterk;
403
404 /* rewrite NOPs */
405 if (jump_label_type(iter) == JUMP_LABEL_NOP)
406 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
407
408 iterk = jump_entry_key(iter);
409 if (iterk == key)
410 continue;
411
412 key = iterk;
413 static_key_set_entries(key, iter);
414 }
415 static_key_initialized = true;
416 jump_label_unlock();
417 cpus_read_unlock();
418 }
419
420 #ifdef CONFIG_MODULES
421
422 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
423 {
424 struct static_key *key = jump_entry_key(entry);
425 bool type = static_key_type(key);
426 bool branch = jump_entry_branch(entry);
427
428 /* See the comment in linux/jump_label.h */
429 return type ^ branch;
430 }
431
432 struct static_key_mod {
433 struct static_key_mod *next;
434 struct jump_entry *entries;
435 struct module *mod;
436 };
437
438 static inline struct static_key_mod *static_key_mod(struct static_key *key)
439 {
440 WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
441 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
442 }
443
444 /***
445 * key->type and key->next are the same via union.
446 * This sets key->next and preserves the type bits.
447 *
448 * See additional comments above static_key_set_entries().
449 */
450 static void static_key_set_mod(struct static_key *key,
451 struct static_key_mod *mod)
452 {
453 unsigned long type;
454
455 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
456 type = key->type & JUMP_TYPE_MASK;
457 key->next = mod;
458 key->type |= type;
459 }
460
461 static int __jump_label_mod_text_reserved(void *start, void *end)
462 {
463 struct module *mod;
464
465 preempt_disable();
466 mod = __module_text_address((unsigned long)start);
467 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
468 preempt_enable();
469
470 if (!mod)
471 return 0;
472
473
474 return __jump_label_text_reserved(mod->jump_entries,
475 mod->jump_entries + mod->num_jump_entries,
476 start, end);
477 }
478
479 static void __jump_label_mod_update(struct static_key *key)
480 {
481 struct static_key_mod *mod;
482
483 for (mod = static_key_mod(key); mod; mod = mod->next) {
484 struct jump_entry *stop;
485 struct module *m;
486
487 /*
488 * NULL if the static_key is defined in a module
489 * that does not use it
490 */
491 if (!mod->entries)
492 continue;
493
494 m = mod->mod;
495 if (!m)
496 stop = __stop___jump_table;
497 else
498 stop = m->jump_entries + m->num_jump_entries;
499 __jump_label_update(key, mod->entries, stop);
500 }
501 }
502
503 /***
504 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
505 * @mod: module to patch
506 *
507 * Allow for run-time selection of the optimal nops. Before the module
508 * loads patch these with arch_get_jump_label_nop(), which is specified by
509 * the arch specific jump label code.
510 */
511 void jump_label_apply_nops(struct module *mod)
512 {
513 struct jump_entry *iter_start = mod->jump_entries;
514 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
515 struct jump_entry *iter;
516
517 /* if the module doesn't have jump label entries, just return */
518 if (iter_start == iter_stop)
519 return;
520
521 for (iter = iter_start; iter < iter_stop; iter++) {
522 /* Only write NOPs for arch_branch_static(). */
523 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
524 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
525 }
526 }
527
528 static int jump_label_add_module(struct module *mod)
529 {
530 struct jump_entry *iter_start = mod->jump_entries;
531 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
532 struct jump_entry *iter;
533 struct static_key *key = NULL;
534 struct static_key_mod *jlm, *jlm2;
535
536 /* if the module doesn't have jump label entries, just return */
537 if (iter_start == iter_stop)
538 return 0;
539
540 jump_label_sort_entries(iter_start, iter_stop);
541
542 for (iter = iter_start; iter < iter_stop; iter++) {
543 struct static_key *iterk;
544
545 iterk = jump_entry_key(iter);
546 if (iterk == key)
547 continue;
548
549 key = iterk;
550 if (within_module(iter->key, mod)) {
551 static_key_set_entries(key, iter);
552 continue;
553 }
554 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
555 if (!jlm)
556 return -ENOMEM;
557 if (!static_key_linked(key)) {
558 jlm2 = kzalloc(sizeof(struct static_key_mod),
559 GFP_KERNEL);
560 if (!jlm2) {
561 kfree(jlm);
562 return -ENOMEM;
563 }
564 preempt_disable();
565 jlm2->mod = __module_address((unsigned long)key);
566 preempt_enable();
567 jlm2->entries = static_key_entries(key);
568 jlm2->next = NULL;
569 static_key_set_mod(key, jlm2);
570 static_key_set_linked(key);
571 }
572 jlm->mod = mod;
573 jlm->entries = iter;
574 jlm->next = static_key_mod(key);
575 static_key_set_mod(key, jlm);
576 static_key_set_linked(key);
577
578 /* Only update if we've changed from our initial state */
579 if (jump_label_type(iter) != jump_label_init_type(iter))
580 __jump_label_update(key, iter, iter_stop);
581 }
582
583 return 0;
584 }
585
586 static void jump_label_del_module(struct module *mod)
587 {
588 struct jump_entry *iter_start = mod->jump_entries;
589 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
590 struct jump_entry *iter;
591 struct static_key *key = NULL;
592 struct static_key_mod *jlm, **prev;
593
594 for (iter = iter_start; iter < iter_stop; iter++) {
595 if (jump_entry_key(iter) == key)
596 continue;
597
598 key = jump_entry_key(iter);
599
600 if (within_module(iter->key, mod))
601 continue;
602
603 /* No memory during module load */
604 if (WARN_ON(!static_key_linked(key)))
605 continue;
606
607 prev = &key->next;
608 jlm = static_key_mod(key);
609
610 while (jlm && jlm->mod != mod) {
611 prev = &jlm->next;
612 jlm = jlm->next;
613 }
614
615 /* No memory during module load */
616 if (WARN_ON(!jlm))
617 continue;
618
619 if (prev == &key->next)
620 static_key_set_mod(key, jlm->next);
621 else
622 *prev = jlm->next;
623
624 kfree(jlm);
625
626 jlm = static_key_mod(key);
627 /* if only one etry is left, fold it back into the static_key */
628 if (jlm->next == NULL) {
629 static_key_set_entries(key, jlm->entries);
630 static_key_clear_linked(key);
631 kfree(jlm);
632 }
633 }
634 }
635
636 static void jump_label_invalidate_module_init(struct module *mod)
637 {
638 struct jump_entry *iter_start = mod->jump_entries;
639 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
640 struct jump_entry *iter;
641
642 for (iter = iter_start; iter < iter_stop; iter++) {
643 if (within_module_init(iter->code, mod))
644 iter->code = 0;
645 }
646 }
647
648 static int
649 jump_label_module_notify(struct notifier_block *self, unsigned long val,
650 void *data)
651 {
652 struct module *mod = data;
653 int ret = 0;
654
655 cpus_read_lock();
656 jump_label_lock();
657
658 switch (val) {
659 case MODULE_STATE_COMING:
660 ret = jump_label_add_module(mod);
661 if (ret) {
662 WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
663 jump_label_del_module(mod);
664 }
665 break;
666 case MODULE_STATE_GOING:
667 jump_label_del_module(mod);
668 break;
669 case MODULE_STATE_LIVE:
670 jump_label_invalidate_module_init(mod);
671 break;
672 }
673
674 jump_label_unlock();
675 cpus_read_unlock();
676
677 return notifier_from_errno(ret);
678 }
679
680 static struct notifier_block jump_label_module_nb = {
681 .notifier_call = jump_label_module_notify,
682 .priority = 1, /* higher than tracepoints */
683 };
684
685 static __init int jump_label_init_module(void)
686 {
687 return register_module_notifier(&jump_label_module_nb);
688 }
689 early_initcall(jump_label_init_module);
690
691 #endif /* CONFIG_MODULES */
692
693 /***
694 * jump_label_text_reserved - check if addr range is reserved
695 * @start: start text addr
696 * @end: end text addr
697 *
698 * checks if the text addr located between @start and @end
699 * overlaps with any of the jump label patch addresses. Code
700 * that wants to modify kernel text should first verify that
701 * it does not overlap with any of the jump label addresses.
702 * Caller must hold jump_label_mutex.
703 *
704 * returns 1 if there is an overlap, 0 otherwise
705 */
706 int jump_label_text_reserved(void *start, void *end)
707 {
708 int ret = __jump_label_text_reserved(__start___jump_table,
709 __stop___jump_table, start, end);
710
711 if (ret)
712 return ret;
713
714 #ifdef CONFIG_MODULES
715 ret = __jump_label_mod_text_reserved(start, end);
716 #endif
717 return ret;
718 }
719
720 static void jump_label_update(struct static_key *key)
721 {
722 struct jump_entry *stop = __stop___jump_table;
723 struct jump_entry *entry;
724 #ifdef CONFIG_MODULES
725 struct module *mod;
726
727 if (static_key_linked(key)) {
728 __jump_label_mod_update(key);
729 return;
730 }
731
732 preempt_disable();
733 mod = __module_address((unsigned long)key);
734 if (mod)
735 stop = mod->jump_entries + mod->num_jump_entries;
736 preempt_enable();
737 #endif
738 entry = static_key_entries(key);
739 /* if there are no users, entry can be NULL */
740 if (entry)
741 __jump_label_update(key, entry, stop);
742 }
743
744 #ifdef CONFIG_STATIC_KEYS_SELFTEST
745 static DEFINE_STATIC_KEY_TRUE(sk_true);
746 static DEFINE_STATIC_KEY_FALSE(sk_false);
747
748 static __init int jump_label_test(void)
749 {
750 int i;
751
752 for (i = 0; i < 2; i++) {
753 WARN_ON(static_key_enabled(&sk_true.key) != true);
754 WARN_ON(static_key_enabled(&sk_false.key) != false);
755
756 WARN_ON(!static_branch_likely(&sk_true));
757 WARN_ON(!static_branch_unlikely(&sk_true));
758 WARN_ON(static_branch_likely(&sk_false));
759 WARN_ON(static_branch_unlikely(&sk_false));
760
761 static_branch_disable(&sk_true);
762 static_branch_enable(&sk_false);
763
764 WARN_ON(static_key_enabled(&sk_true.key) == true);
765 WARN_ON(static_key_enabled(&sk_false.key) == false);
766
767 WARN_ON(static_branch_likely(&sk_true));
768 WARN_ON(static_branch_unlikely(&sk_true));
769 WARN_ON(!static_branch_likely(&sk_false));
770 WARN_ON(!static_branch_unlikely(&sk_false));
771
772 static_branch_enable(&sk_true);
773 static_branch_disable(&sk_false);
774 }
775
776 return 0;
777 }
778 early_initcall(jump_label_test);
779 #endif /* STATIC_KEYS_SELFTEST */
780
781 #endif /* HAVE_JUMP_LABEL */