Commit | Line | Data |
---|---|---|
0cf1bfd2 MT |
1 | /* |
2 | * KVM paravirt_ops implementation | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | * | |
18 | * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
19 | * Copyright IBM Corporation, 2007 | |
20 | * Authors: Anthony Liguori <aliguori@us.ibm.com> | |
21 | */ | |
22 | ||
56dd9470 | 23 | #include <linux/context_tracking.h> |
0cf1bfd2 MT |
24 | #include <linux/module.h> |
25 | #include <linux/kernel.h> | |
26 | #include <linux/kvm_para.h> | |
27 | #include <linux/cpu.h> | |
28 | #include <linux/mm.h> | |
1da8a77b | 29 | #include <linux/highmem.h> |
096d14a3 | 30 | #include <linux/hardirq.h> |
fd10cde9 GN |
31 | #include <linux/notifier.h> |
32 | #include <linux/reboot.h> | |
631bc487 GN |
33 | #include <linux/hash.h> |
34 | #include <linux/sched.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/kprobes.h> | |
a90ede7b | 37 | #include <asm/timer.h> |
fd10cde9 | 38 | #include <asm/cpu.h> |
631bc487 GN |
39 | #include <asm/traps.h> |
40 | #include <asm/desc.h> | |
6c047cd9 | 41 | #include <asm/tlbflush.h> |
e0875921 | 42 | #include <asm/idle.h> |
ab9cf499 MT |
43 | #include <asm/apic.h> |
44 | #include <asm/apicdef.h> | |
fc73373b | 45 | #include <asm/hypervisor.h> |
3dc4f7cf | 46 | #include <asm/kvm_guest.h> |
096d14a3 | 47 | |
fd10cde9 GN |
48 | static int kvmapf = 1; |
49 | ||
50 | static int parse_no_kvmapf(char *arg) | |
51 | { | |
52 | kvmapf = 0; | |
53 | return 0; | |
54 | } | |
55 | ||
56 | early_param("no-kvmapf", parse_no_kvmapf); | |
57 | ||
d910f5c1 GC |
58 | static int steal_acc = 1; |
59 | static int parse_no_stealacc(char *arg) | |
60 | { | |
61 | steal_acc = 0; | |
62 | return 0; | |
63 | } | |
64 | ||
65 | early_param("no-steal-acc", parse_no_stealacc); | |
66 | ||
3dc4f7cf MT |
67 | static int kvmclock_vsyscall = 1; |
68 | static int parse_no_kvmclock_vsyscall(char *arg) | |
69 | { | |
70 | kvmclock_vsyscall = 0; | |
71 | return 0; | |
72 | } | |
73 | ||
74 | early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); | |
75 | ||
fd10cde9 | 76 | static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); |
d910f5c1 GC |
77 | static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); |
78 | static int has_steal_clock = 0; | |
096d14a3 | 79 | |
0cf1bfd2 MT |
80 | /* |
81 | * No need for any "IO delay" on KVM | |
82 | */ | |
83 | static void kvm_io_delay(void) | |
84 | { | |
85 | } | |
86 | ||
631bc487 GN |
87 | #define KVM_TASK_SLEEP_HASHBITS 8 |
88 | #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) | |
89 | ||
90 | struct kvm_task_sleep_node { | |
91 | struct hlist_node link; | |
92 | wait_queue_head_t wq; | |
93 | u32 token; | |
94 | int cpu; | |
6c047cd9 | 95 | bool halted; |
631bc487 GN |
96 | }; |
97 | ||
98 | static struct kvm_task_sleep_head { | |
99 | spinlock_t lock; | |
100 | struct hlist_head list; | |
101 | } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; | |
102 | ||
103 | static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, | |
104 | u32 token) | |
105 | { | |
106 | struct hlist_node *p; | |
107 | ||
108 | hlist_for_each(p, &b->list) { | |
109 | struct kvm_task_sleep_node *n = | |
110 | hlist_entry(p, typeof(*n), link); | |
111 | if (n->token == token) | |
112 | return n; | |
113 | } | |
114 | ||
115 | return NULL; | |
116 | } | |
117 | ||
118 | void kvm_async_pf_task_wait(u32 token) | |
119 | { | |
120 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); | |
121 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; | |
122 | struct kvm_task_sleep_node n, *e; | |
123 | DEFINE_WAIT(wait); | |
124 | ||
9b132fbe LZ |
125 | rcu_irq_enter(); |
126 | ||
631bc487 GN |
127 | spin_lock(&b->lock); |
128 | e = _find_apf_task(b, token); | |
129 | if (e) { | |
130 | /* dummy entry exist -> wake up was delivered ahead of PF */ | |
131 | hlist_del(&e->link); | |
132 | kfree(e); | |
133 | spin_unlock(&b->lock); | |
9b132fbe LZ |
134 | |
135 | rcu_irq_exit(); | |
631bc487 GN |
136 | return; |
137 | } | |
138 | ||
139 | n.token = token; | |
140 | n.cpu = smp_processor_id(); | |
859f8450 | 141 | n.halted = is_idle_task(current) || preempt_count() > 1; |
631bc487 GN |
142 | init_waitqueue_head(&n.wq); |
143 | hlist_add_head(&n.link, &b->list); | |
144 | spin_unlock(&b->lock); | |
145 | ||
146 | for (;;) { | |
6c047cd9 GN |
147 | if (!n.halted) |
148 | prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); | |
631bc487 GN |
149 | if (hlist_unhashed(&n.link)) |
150 | break; | |
6c047cd9 GN |
151 | |
152 | if (!n.halted) { | |
153 | local_irq_enable(); | |
154 | schedule(); | |
155 | local_irq_disable(); | |
156 | } else { | |
157 | /* | |
158 | * We cannot reschedule. So halt. | |
159 | */ | |
9b132fbe | 160 | rcu_irq_exit(); |
6c047cd9 GN |
161 | native_safe_halt(); |
162 | local_irq_disable(); | |
ca58e312 | 163 | rcu_irq_enter(); |
6c047cd9 | 164 | } |
631bc487 | 165 | } |
6c047cd9 GN |
166 | if (!n.halted) |
167 | finish_wait(&n.wq, &wait); | |
631bc487 | 168 | |
9b132fbe | 169 | rcu_irq_exit(); |
631bc487 GN |
170 | return; |
171 | } | |
172 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); | |
173 | ||
174 | static void apf_task_wake_one(struct kvm_task_sleep_node *n) | |
175 | { | |
176 | hlist_del_init(&n->link); | |
6c047cd9 GN |
177 | if (n->halted) |
178 | smp_send_reschedule(n->cpu); | |
179 | else if (waitqueue_active(&n->wq)) | |
631bc487 GN |
180 | wake_up(&n->wq); |
181 | } | |
182 | ||
183 | static void apf_task_wake_all(void) | |
184 | { | |
185 | int i; | |
186 | ||
187 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { | |
188 | struct hlist_node *p, *next; | |
189 | struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; | |
190 | spin_lock(&b->lock); | |
191 | hlist_for_each_safe(p, next, &b->list) { | |
192 | struct kvm_task_sleep_node *n = | |
193 | hlist_entry(p, typeof(*n), link); | |
194 | if (n->cpu == smp_processor_id()) | |
195 | apf_task_wake_one(n); | |
196 | } | |
197 | spin_unlock(&b->lock); | |
198 | } | |
199 | } | |
200 | ||
201 | void kvm_async_pf_task_wake(u32 token) | |
202 | { | |
203 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); | |
204 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; | |
205 | struct kvm_task_sleep_node *n; | |
206 | ||
207 | if (token == ~0) { | |
208 | apf_task_wake_all(); | |
209 | return; | |
210 | } | |
211 | ||
212 | again: | |
213 | spin_lock(&b->lock); | |
214 | n = _find_apf_task(b, token); | |
215 | if (!n) { | |
216 | /* | |
217 | * async PF was not yet handled. | |
218 | * Add dummy entry for the token. | |
219 | */ | |
62c49cc9 | 220 | n = kzalloc(sizeof(*n), GFP_ATOMIC); |
631bc487 GN |
221 | if (!n) { |
222 | /* | |
223 | * Allocation failed! Busy wait while other cpu | |
224 | * handles async PF. | |
225 | */ | |
226 | spin_unlock(&b->lock); | |
227 | cpu_relax(); | |
228 | goto again; | |
229 | } | |
230 | n->token = token; | |
231 | n->cpu = smp_processor_id(); | |
232 | init_waitqueue_head(&n->wq); | |
233 | hlist_add_head(&n->link, &b->list); | |
234 | } else | |
235 | apf_task_wake_one(n); | |
236 | spin_unlock(&b->lock); | |
237 | return; | |
238 | } | |
239 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); | |
240 | ||
241 | u32 kvm_read_and_reset_pf_reason(void) | |
242 | { | |
243 | u32 reason = 0; | |
244 | ||
245 | if (__get_cpu_var(apf_reason).enabled) { | |
246 | reason = __get_cpu_var(apf_reason).reason; | |
247 | __get_cpu_var(apf_reason).reason = 0; | |
248 | } | |
249 | ||
250 | return reason; | |
251 | } | |
252 | EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); | |
253 | ||
254 | dotraplinkage void __kprobes | |
255 | do_async_page_fault(struct pt_regs *regs, unsigned long error_code) | |
256 | { | |
6c1e0256 FW |
257 | enum ctx_state prev_state; |
258 | ||
631bc487 GN |
259 | switch (kvm_read_and_reset_pf_reason()) { |
260 | default: | |
261 | do_page_fault(regs, error_code); | |
262 | break; | |
263 | case KVM_PV_REASON_PAGE_NOT_PRESENT: | |
264 | /* page is swapped out by the host. */ | |
6c1e0256 | 265 | prev_state = exception_enter(); |
c5e015d4 | 266 | exit_idle(); |
631bc487 | 267 | kvm_async_pf_task_wait((u32)read_cr2()); |
6c1e0256 | 268 | exception_exit(prev_state); |
631bc487 GN |
269 | break; |
270 | case KVM_PV_REASON_PAGE_READY: | |
e0875921 GN |
271 | rcu_irq_enter(); |
272 | exit_idle(); | |
631bc487 | 273 | kvm_async_pf_task_wake((u32)read_cr2()); |
e0875921 | 274 | rcu_irq_exit(); |
631bc487 GN |
275 | break; |
276 | } | |
277 | } | |
278 | ||
d3ac8815 | 279 | static void __init paravirt_ops_setup(void) |
0cf1bfd2 MT |
280 | { |
281 | pv_info.name = "KVM"; | |
9d2b6132 AL |
282 | |
283 | /* | |
284 | * KVM isn't paravirt in the sense of paravirt_enabled. A KVM | |
285 | * guest kernel works like a bare metal kernel with additional | |
286 | * features, and paravirt_enabled is about features that are | |
287 | * missing. | |
288 | */ | |
289 | pv_info.paravirt_enabled = 0; | |
0cf1bfd2 MT |
290 | |
291 | if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) | |
292 | pv_cpu_ops.io_delay = kvm_io_delay; | |
293 | ||
a90ede7b MT |
294 | #ifdef CONFIG_X86_IO_APIC |
295 | no_timer_check = 1; | |
296 | #endif | |
0cf1bfd2 MT |
297 | } |
298 | ||
d910f5c1 GC |
299 | static void kvm_register_steal_time(void) |
300 | { | |
301 | int cpu = smp_processor_id(); | |
302 | struct kvm_steal_time *st = &per_cpu(steal_time, cpu); | |
303 | ||
304 | if (!has_steal_clock) | |
305 | return; | |
306 | ||
307 | memset(st, 0, sizeof(*st)); | |
308 | ||
5dfd486c | 309 | wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); |
136867f5 SK |
310 | pr_info("kvm-stealtime: cpu %d, msr %llx\n", |
311 | cpu, (unsigned long long) slow_virt_to_phys(st)); | |
d910f5c1 GC |
312 | } |
313 | ||
ab9cf499 MT |
314 | static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; |
315 | ||
316 | static void kvm_guest_apic_eoi_write(u32 reg, u32 val) | |
317 | { | |
318 | /** | |
319 | * This relies on __test_and_clear_bit to modify the memory | |
320 | * in a way that is atomic with respect to the local CPU. | |
321 | * The hypervisor only accesses this memory from the local CPU so | |
322 | * there's no need for lock or memory barriers. | |
323 | * An optimization barrier is implied in apic write. | |
324 | */ | |
325 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) | |
326 | return; | |
90536664 | 327 | apic_write(APIC_EOI, APIC_EOI_ACK); |
ab9cf499 MT |
328 | } |
329 | ||
fd10cde9 GN |
330 | void __cpuinit kvm_guest_cpu_init(void) |
331 | { | |
332 | if (!kvm_para_available()) | |
333 | return; | |
334 | ||
335 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { | |
5dfd486c | 336 | u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason)); |
fd10cde9 | 337 | |
6adba527 GN |
338 | #ifdef CONFIG_PREEMPT |
339 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; | |
340 | #endif | |
fd10cde9 GN |
341 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); |
342 | __get_cpu_var(apf_reason).enabled = 1; | |
343 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", | |
344 | smp_processor_id()); | |
345 | } | |
d910f5c1 | 346 | |
ab9cf499 MT |
347 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { |
348 | unsigned long pa; | |
349 | /* Size alignment is implied but just to make it explicit. */ | |
350 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); | |
351 | __get_cpu_var(kvm_apic_eoi) = 0; | |
5dfd486c DH |
352 | pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi)) |
353 | | KVM_MSR_ENABLED; | |
ab9cf499 MT |
354 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); |
355 | } | |
356 | ||
d910f5c1 GC |
357 | if (has_steal_clock) |
358 | kvm_register_steal_time(); | |
fd10cde9 GN |
359 | } |
360 | ||
ab9cf499 | 361 | static void kvm_pv_disable_apf(void) |
fd10cde9 GN |
362 | { |
363 | if (!__get_cpu_var(apf_reason).enabled) | |
364 | return; | |
365 | ||
366 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); | |
367 | __get_cpu_var(apf_reason).enabled = 0; | |
368 | ||
369 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", | |
370 | smp_processor_id()); | |
371 | } | |
372 | ||
ab9cf499 MT |
373 | static void kvm_pv_guest_cpu_reboot(void *unused) |
374 | { | |
375 | /* | |
376 | * We disable PV EOI before we load a new kernel by kexec, | |
377 | * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. | |
378 | * New kernel can re-enable when it boots. | |
379 | */ | |
380 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) | |
381 | wrmsrl(MSR_KVM_PV_EOI_EN, 0); | |
382 | kvm_pv_disable_apf(); | |
8fbe6a54 | 383 | kvm_disable_steal_time(); |
ab9cf499 MT |
384 | } |
385 | ||
fd10cde9 GN |
386 | static int kvm_pv_reboot_notify(struct notifier_block *nb, |
387 | unsigned long code, void *unused) | |
388 | { | |
389 | if (code == SYS_RESTART) | |
ab9cf499 | 390 | on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); |
fd10cde9 GN |
391 | return NOTIFY_DONE; |
392 | } | |
393 | ||
394 | static struct notifier_block kvm_pv_reboot_nb = { | |
395 | .notifier_call = kvm_pv_reboot_notify, | |
396 | }; | |
397 | ||
d910f5c1 GC |
398 | static u64 kvm_steal_clock(int cpu) |
399 | { | |
400 | u64 steal; | |
401 | struct kvm_steal_time *src; | |
402 | int version; | |
403 | ||
404 | src = &per_cpu(steal_time, cpu); | |
405 | do { | |
406 | version = src->version; | |
407 | rmb(); | |
408 | steal = src->steal; | |
409 | rmb(); | |
410 | } while ((version & 1) || (version != src->version)); | |
411 | ||
412 | return steal; | |
413 | } | |
414 | ||
415 | void kvm_disable_steal_time(void) | |
416 | { | |
417 | if (!has_steal_clock) | |
418 | return; | |
419 | ||
420 | wrmsr(MSR_KVM_STEAL_TIME, 0, 0); | |
421 | } | |
422 | ||
ca3f1017 GN |
423 | #ifdef CONFIG_SMP |
424 | static void __init kvm_smp_prepare_boot_cpu(void) | |
425 | { | |
426 | WARN_ON(kvm_register_clock("primary cpu clock")); | |
fd10cde9 | 427 | kvm_guest_cpu_init(); |
ca3f1017 GN |
428 | native_smp_prepare_boot_cpu(); |
429 | } | |
fd10cde9 | 430 | |
775077a0 | 431 | static void __cpuinit kvm_guest_cpu_online(void *dummy) |
fd10cde9 GN |
432 | { |
433 | kvm_guest_cpu_init(); | |
434 | } | |
435 | ||
436 | static void kvm_guest_cpu_offline(void *dummy) | |
437 | { | |
d910f5c1 | 438 | kvm_disable_steal_time(); |
ab9cf499 MT |
439 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
440 | wrmsrl(MSR_KVM_PV_EOI_EN, 0); | |
441 | kvm_pv_disable_apf(); | |
631bc487 | 442 | apf_task_wake_all(); |
fd10cde9 GN |
443 | } |
444 | ||
445 | static int __cpuinit kvm_cpu_notify(struct notifier_block *self, | |
446 | unsigned long action, void *hcpu) | |
447 | { | |
448 | int cpu = (unsigned long)hcpu; | |
449 | switch (action) { | |
450 | case CPU_ONLINE: | |
451 | case CPU_DOWN_FAILED: | |
452 | case CPU_ONLINE_FROZEN: | |
453 | smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0); | |
454 | break; | |
455 | case CPU_DOWN_PREPARE: | |
456 | case CPU_DOWN_PREPARE_FROZEN: | |
457 | smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1); | |
458 | break; | |
459 | default: | |
460 | break; | |
461 | } | |
462 | return NOTIFY_OK; | |
463 | } | |
464 | ||
465 | static struct notifier_block __cpuinitdata kvm_cpu_notifier = { | |
466 | .notifier_call = kvm_cpu_notify, | |
467 | }; | |
ca3f1017 GN |
468 | #endif |
469 | ||
631bc487 GN |
470 | static void __init kvm_apf_trap_init(void) |
471 | { | |
472 | set_intr_gate(14, &async_page_fault); | |
473 | } | |
474 | ||
0cf1bfd2 MT |
475 | void __init kvm_guest_init(void) |
476 | { | |
631bc487 GN |
477 | int i; |
478 | ||
0cf1bfd2 MT |
479 | if (!kvm_para_available()) |
480 | return; | |
481 | ||
482 | paravirt_ops_setup(); | |
fd10cde9 | 483 | register_reboot_notifier(&kvm_pv_reboot_nb); |
631bc487 GN |
484 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) |
485 | spin_lock_init(&async_pf_sleepers[i].lock); | |
486 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) | |
487 | x86_init.irqs.trap_init = kvm_apf_trap_init; | |
488 | ||
d910f5c1 GC |
489 | if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { |
490 | has_steal_clock = 1; | |
491 | pv_time_ops.steal_clock = kvm_steal_clock; | |
492 | } | |
493 | ||
90536664 MT |
494 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
495 | apic_set_eoi_write(kvm_guest_apic_eoi_write); | |
ab9cf499 | 496 | |
3dc4f7cf MT |
497 | if (kvmclock_vsyscall) |
498 | kvm_setup_vsyscall_timeinfo(); | |
499 | ||
ca3f1017 GN |
500 | #ifdef CONFIG_SMP |
501 | smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; | |
fd10cde9 GN |
502 | register_cpu_notifier(&kvm_cpu_notifier); |
503 | #else | |
504 | kvm_guest_cpu_init(); | |
ca3f1017 | 505 | #endif |
0cf1bfd2 | 506 | } |
d910f5c1 | 507 | |
fc73373b PB |
508 | static bool __init kvm_detect(void) |
509 | { | |
510 | if (!kvm_para_available()) | |
511 | return false; | |
512 | return true; | |
513 | } | |
514 | ||
515 | const struct hypervisor_x86 x86_hyper_kvm __refconst = { | |
516 | .name = "KVM", | |
517 | .detect = kvm_detect, | |
4cca6ea0 | 518 | .x2apic_available = kvm_para_available, |
fc73373b PB |
519 | }; |
520 | EXPORT_SYMBOL_GPL(x86_hyper_kvm); | |
521 | ||
d910f5c1 GC |
522 | static __init int activate_jump_labels(void) |
523 | { | |
524 | if (has_steal_clock) { | |
c5905afb | 525 | static_key_slow_inc(¶virt_steal_enabled); |
d910f5c1 | 526 | if (steal_acc) |
c5905afb | 527 | static_key_slow_inc(¶virt_steal_rq_enabled); |
d910f5c1 GC |
528 | } |
529 | ||
530 | return 0; | |
531 | } | |
532 | arch_initcall(activate_jump_labels); |