PM: PM QOS update fix
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / i387.c
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8 #include <linux/module.h>
9 #include <linux/regset.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12
13 #include <asm/sigcontext.h>
14 #include <asm/processor.h>
15 #include <asm/math_emu.h>
16 #include <asm/uaccess.h>
17 #include <asm/ptrace.h>
18 #include <asm/i387.h>
19 #include <asm/user.h>
20
21 #ifdef CONFIG_X86_64
22 # include <asm/sigcontext32.h>
23 # include <asm/user32.h>
24 #else
25 # define save_i387_xstate_ia32 save_i387_xstate
26 # define restore_i387_xstate_ia32 restore_i387_xstate
27 # define _fpstate_ia32 _fpstate
28 # define _xstate_ia32 _xstate
29 # define sig_xstate_ia32_size sig_xstate_size
30 # define fx_sw_reserved_ia32 fx_sw_reserved
31 # define user_i387_ia32_struct user_i387_struct
32 # define user32_fxsr_struct user_fxsr_struct
33 #endif
34
35 #ifdef CONFIG_MATH_EMULATION
36 # define HAVE_HWFP (boot_cpu_data.hard_math)
37 #else
38 # define HAVE_HWFP 1
39 #endif
40
41 static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
42 unsigned int xstate_size;
43 unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
44 static struct i387_fxsave_struct fx_scratch __cpuinitdata;
45
46 void __cpuinit mxcsr_feature_mask_init(void)
47 {
48 unsigned long mask = 0;
49
50 clts();
51 if (cpu_has_fxsr) {
52 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
53 asm volatile("fxsave %0" : : "m" (fx_scratch));
54 mask = fx_scratch.mxcsr_mask;
55 if (mask == 0)
56 mask = 0x0000ffbf;
57 }
58 mxcsr_feature_mask &= mask;
59 stts();
60 }
61
62 void __cpuinit init_thread_xstate(void)
63 {
64 if (!HAVE_HWFP) {
65 xstate_size = sizeof(struct i387_soft_struct);
66 return;
67 }
68
69 if (cpu_has_xsave) {
70 xsave_cntxt_init();
71 return;
72 }
73
74 if (cpu_has_fxsr)
75 xstate_size = sizeof(struct i387_fxsave_struct);
76 #ifdef CONFIG_X86_32
77 else
78 xstate_size = sizeof(struct i387_fsave_struct);
79 #endif
80 }
81
82 #ifdef CONFIG_X86_64
83 /*
84 * Called at bootup to set up the initial FPU state that is later cloned
85 * into all processes.
86 */
87 void __cpuinit fpu_init(void)
88 {
89 unsigned long oldcr0 = read_cr0();
90
91 set_in_cr4(X86_CR4_OSFXSR);
92 set_in_cr4(X86_CR4_OSXMMEXCPT);
93
94 write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */
95
96 /*
97 * Boot processor to setup the FP and extended state context info.
98 */
99 if (!smp_processor_id())
100 init_thread_xstate();
101 xsave_init();
102
103 mxcsr_feature_mask_init();
104 /* clean state in init */
105 if (cpu_has_xsave)
106 current_thread_info()->status = TS_XSAVE;
107 else
108 current_thread_info()->status = 0;
109 clear_used_math();
110 }
111 #endif /* CONFIG_X86_64 */
112
113 /*
114 * The _current_ task is using the FPU for the first time
115 * so initialize it and set the mxcsr to its default
116 * value at reset if we support XMM instructions and then
117 * remeber the current task has used the FPU.
118 */
119 int init_fpu(struct task_struct *tsk)
120 {
121 if (tsk_used_math(tsk)) {
122 if (HAVE_HWFP && tsk == current)
123 unlazy_fpu(tsk);
124 return 0;
125 }
126
127 /*
128 * Memory allocation at the first usage of the FPU and other state.
129 */
130 if (!tsk->thread.xstate) {
131 tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
132 GFP_KERNEL);
133 if (!tsk->thread.xstate)
134 return -ENOMEM;
135 }
136
137 #ifdef CONFIG_X86_32
138 if (!HAVE_HWFP) {
139 memset(tsk->thread.xstate, 0, xstate_size);
140 finit_task(tsk);
141 set_stopped_child_used_math(tsk);
142 return 0;
143 }
144 #endif
145
146 if (cpu_has_fxsr) {
147 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
148
149 memset(fx, 0, xstate_size);
150 fx->cwd = 0x37f;
151 if (cpu_has_xmm)
152 fx->mxcsr = MXCSR_DEFAULT;
153 } else {
154 struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
155 memset(fp, 0, xstate_size);
156 fp->cwd = 0xffff037fu;
157 fp->swd = 0xffff0000u;
158 fp->twd = 0xffffffffu;
159 fp->fos = 0xffff0000u;
160 }
161 /*
162 * Only the device not available exception or ptrace can call init_fpu.
163 */
164 set_stopped_child_used_math(tsk);
165 return 0;
166 }
167
168 /*
169 * The xstateregs_active() routine is the same as the fpregs_active() routine,
170 * as the "regset->n" for the xstate regset will be updated based on the feature
171 * capabilites supported by the xsave.
172 */
173 int fpregs_active(struct task_struct *target, const struct user_regset *regset)
174 {
175 return tsk_used_math(target) ? regset->n : 0;
176 }
177
178 int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
179 {
180 return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
181 }
182
183 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
184 unsigned int pos, unsigned int count,
185 void *kbuf, void __user *ubuf)
186 {
187 int ret;
188
189 if (!cpu_has_fxsr)
190 return -ENODEV;
191
192 ret = init_fpu(target);
193 if (ret)
194 return ret;
195
196 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
197 &target->thread.xstate->fxsave, 0, -1);
198 }
199
200 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
201 unsigned int pos, unsigned int count,
202 const void *kbuf, const void __user *ubuf)
203 {
204 int ret;
205
206 if (!cpu_has_fxsr)
207 return -ENODEV;
208
209 ret = init_fpu(target);
210 if (ret)
211 return ret;
212
213 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
214 &target->thread.xstate->fxsave, 0, -1);
215
216 /*
217 * mxcsr reserved bits must be masked to zero for security reasons.
218 */
219 target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
220
221 /*
222 * update the header bits in the xsave header, indicating the
223 * presence of FP and SSE state.
224 */
225 if (cpu_has_xsave)
226 target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
227
228 return ret;
229 }
230
231 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
232 unsigned int pos, unsigned int count,
233 void *kbuf, void __user *ubuf)
234 {
235 int ret;
236
237 if (!cpu_has_xsave)
238 return -ENODEV;
239
240 ret = init_fpu(target);
241 if (ret)
242 return ret;
243
244 /*
245 * Copy the 48bytes defined by the software first into the xstate
246 * memory layout in the thread struct, so that we can copy the entire
247 * xstateregs to the user using one user_regset_copyout().
248 */
249 memcpy(&target->thread.xstate->fxsave.sw_reserved,
250 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
251
252 /*
253 * Copy the xstate memory layout.
254 */
255 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
256 &target->thread.xstate->xsave, 0, -1);
257 return ret;
258 }
259
260 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
261 unsigned int pos, unsigned int count,
262 const void *kbuf, const void __user *ubuf)
263 {
264 int ret;
265 struct xsave_hdr_struct *xsave_hdr;
266
267 if (!cpu_has_xsave)
268 return -ENODEV;
269
270 ret = init_fpu(target);
271 if (ret)
272 return ret;
273
274 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
275 &target->thread.xstate->xsave, 0, -1);
276
277 /*
278 * mxcsr reserved bits must be masked to zero for security reasons.
279 */
280 target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
281
282 xsave_hdr = &target->thread.xstate->xsave.xsave_hdr;
283
284 xsave_hdr->xstate_bv &= pcntxt_mask;
285 /*
286 * These bits must be zero.
287 */
288 xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
289
290 return ret;
291 }
292
293 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
294
295 /*
296 * FPU tag word conversions.
297 */
298
299 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
300 {
301 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
302
303 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
304 tmp = ~twd;
305 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
306 /* and move the valid bits to the lower byte. */
307 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
308 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
309 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
310
311 return tmp;
312 }
313
314 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
315 #define FP_EXP_TAG_VALID 0
316 #define FP_EXP_TAG_ZERO 1
317 #define FP_EXP_TAG_SPECIAL 2
318 #define FP_EXP_TAG_EMPTY 3
319
320 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
321 {
322 struct _fpxreg *st;
323 u32 tos = (fxsave->swd >> 11) & 7;
324 u32 twd = (unsigned long) fxsave->twd;
325 u32 tag;
326 u32 ret = 0xffff0000u;
327 int i;
328
329 for (i = 0; i < 8; i++, twd >>= 1) {
330 if (twd & 0x1) {
331 st = FPREG_ADDR(fxsave, (i - tos) & 7);
332
333 switch (st->exponent & 0x7fff) {
334 case 0x7fff:
335 tag = FP_EXP_TAG_SPECIAL;
336 break;
337 case 0x0000:
338 if (!st->significand[0] &&
339 !st->significand[1] &&
340 !st->significand[2] &&
341 !st->significand[3])
342 tag = FP_EXP_TAG_ZERO;
343 else
344 tag = FP_EXP_TAG_SPECIAL;
345 break;
346 default:
347 if (st->significand[3] & 0x8000)
348 tag = FP_EXP_TAG_VALID;
349 else
350 tag = FP_EXP_TAG_SPECIAL;
351 break;
352 }
353 } else {
354 tag = FP_EXP_TAG_EMPTY;
355 }
356 ret |= tag << (2 * i);
357 }
358 return ret;
359 }
360
361 /*
362 * FXSR floating point environment conversions.
363 */
364
365 static void
366 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
367 {
368 struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
369 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
370 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
371 int i;
372
373 env->cwd = fxsave->cwd | 0xffff0000u;
374 env->swd = fxsave->swd | 0xffff0000u;
375 env->twd = twd_fxsr_to_i387(fxsave);
376
377 #ifdef CONFIG_X86_64
378 env->fip = fxsave->rip;
379 env->foo = fxsave->rdp;
380 if (tsk == current) {
381 /*
382 * should be actually ds/cs at fpu exception time, but
383 * that information is not available in 64bit mode.
384 */
385 asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos));
386 asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs));
387 } else {
388 struct pt_regs *regs = task_pt_regs(tsk);
389
390 env->fos = 0xffff0000 | tsk->thread.ds;
391 env->fcs = regs->cs;
392 }
393 #else
394 env->fip = fxsave->fip;
395 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
396 env->foo = fxsave->foo;
397 env->fos = fxsave->fos;
398 #endif
399
400 for (i = 0; i < 8; ++i)
401 memcpy(&to[i], &from[i], sizeof(to[0]));
402 }
403
404 static void convert_to_fxsr(struct task_struct *tsk,
405 const struct user_i387_ia32_struct *env)
406
407 {
408 struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
409 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
410 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
411 int i;
412
413 fxsave->cwd = env->cwd;
414 fxsave->swd = env->swd;
415 fxsave->twd = twd_i387_to_fxsr(env->twd);
416 fxsave->fop = (u16) ((u32) env->fcs >> 16);
417 #ifdef CONFIG_X86_64
418 fxsave->rip = env->fip;
419 fxsave->rdp = env->foo;
420 /* cs and ds ignored */
421 #else
422 fxsave->fip = env->fip;
423 fxsave->fcs = (env->fcs & 0xffff);
424 fxsave->foo = env->foo;
425 fxsave->fos = env->fos;
426 #endif
427
428 for (i = 0; i < 8; ++i)
429 memcpy(&to[i], &from[i], sizeof(from[0]));
430 }
431
432 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
433 unsigned int pos, unsigned int count,
434 void *kbuf, void __user *ubuf)
435 {
436 struct user_i387_ia32_struct env;
437 int ret;
438
439 ret = init_fpu(target);
440 if (ret)
441 return ret;
442
443 if (!HAVE_HWFP)
444 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
445
446 if (!cpu_has_fxsr) {
447 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
448 &target->thread.xstate->fsave, 0,
449 -1);
450 }
451
452 if (kbuf && pos == 0 && count == sizeof(env)) {
453 convert_from_fxsr(kbuf, target);
454 return 0;
455 }
456
457 convert_from_fxsr(&env, target);
458
459 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
460 }
461
462 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
463 unsigned int pos, unsigned int count,
464 const void *kbuf, const void __user *ubuf)
465 {
466 struct user_i387_ia32_struct env;
467 int ret;
468
469 ret = init_fpu(target);
470 if (ret)
471 return ret;
472
473 if (!HAVE_HWFP)
474 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
475
476 if (!cpu_has_fxsr) {
477 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
478 &target->thread.xstate->fsave, 0, -1);
479 }
480
481 if (pos > 0 || count < sizeof(env))
482 convert_from_fxsr(&env, target);
483
484 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
485 if (!ret)
486 convert_to_fxsr(target, &env);
487
488 /*
489 * update the header bit in the xsave header, indicating the
490 * presence of FP.
491 */
492 if (cpu_has_xsave)
493 target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
494 return ret;
495 }
496
497 /*
498 * Signal frame handlers.
499 */
500
501 static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
502 {
503 struct task_struct *tsk = current;
504 struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
505
506 fp->status = fp->swd;
507 if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
508 return -1;
509 return 1;
510 }
511
512 static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
513 {
514 struct task_struct *tsk = current;
515 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
516 struct user_i387_ia32_struct env;
517 int err = 0;
518
519 convert_from_fxsr(&env, tsk);
520 if (__copy_to_user(buf, &env, sizeof(env)))
521 return -1;
522
523 err |= __put_user(fx->swd, &buf->status);
524 err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
525 if (err)
526 return -1;
527
528 if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size))
529 return -1;
530 return 1;
531 }
532
533 static int save_i387_xsave(void __user *buf)
534 {
535 struct task_struct *tsk = current;
536 struct _fpstate_ia32 __user *fx = buf;
537 int err = 0;
538
539 /*
540 * For legacy compatible, we always set FP/SSE bits in the bit
541 * vector while saving the state to the user context.
542 * This will enable us capturing any changes(during sigreturn) to
543 * the FP/SSE bits by the legacy applications which don't touch
544 * xstate_bv in the xsave header.
545 *
546 * xsave aware applications can change the xstate_bv in the xsave
547 * header as well as change any contents in the memory layout.
548 * xrestore as part of sigreturn will capture all the changes.
549 */
550 tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
551
552 if (save_i387_fxsave(fx) < 0)
553 return -1;
554
555 err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32,
556 sizeof(struct _fpx_sw_bytes));
557 err |= __put_user(FP_XSTATE_MAGIC2,
558 (__u32 __user *) (buf + sig_xstate_ia32_size
559 - FP_XSTATE_MAGIC2_SIZE));
560 if (err)
561 return -1;
562
563 return 1;
564 }
565
566 int save_i387_xstate_ia32(void __user *buf)
567 {
568 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
569 struct task_struct *tsk = current;
570
571 if (!used_math())
572 return 0;
573
574 if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size))
575 return -EACCES;
576 /*
577 * This will cause a "finit" to be triggered by the next
578 * attempted FPU operation by the 'current' process.
579 */
580 clear_used_math();
581
582 if (!HAVE_HWFP) {
583 return fpregs_soft_get(current, NULL,
584 0, sizeof(struct user_i387_ia32_struct),
585 NULL, fp) ? -1 : 1;
586 }
587
588 unlazy_fpu(tsk);
589
590 if (cpu_has_xsave)
591 return save_i387_xsave(fp);
592 if (cpu_has_fxsr)
593 return save_i387_fxsave(fp);
594 else
595 return save_i387_fsave(fp);
596 }
597
598 static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
599 {
600 struct task_struct *tsk = current;
601
602 return __copy_from_user(&tsk->thread.xstate->fsave, buf,
603 sizeof(struct i387_fsave_struct));
604 }
605
606 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
607 unsigned int size)
608 {
609 struct task_struct *tsk = current;
610 struct user_i387_ia32_struct env;
611 int err;
612
613 err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0],
614 size);
615 /* mxcsr reserved bits must be masked to zero for security reasons */
616 tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
617 if (err || __copy_from_user(&env, buf, sizeof(env)))
618 return 1;
619 convert_to_fxsr(tsk, &env);
620
621 return 0;
622 }
623
624 static int restore_i387_xsave(void __user *buf)
625 {
626 struct _fpx_sw_bytes fx_sw_user;
627 struct _fpstate_ia32 __user *fx_user =
628 ((struct _fpstate_ia32 __user *) buf);
629 struct i387_fxsave_struct __user *fx =
630 (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
631 struct xsave_hdr_struct *xsave_hdr =
632 &current->thread.xstate->xsave.xsave_hdr;
633 u64 mask;
634 int err;
635
636 if (check_for_xstate(fx, buf, &fx_sw_user))
637 goto fx_only;
638
639 mask = fx_sw_user.xstate_bv;
640
641 err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
642
643 xsave_hdr->xstate_bv &= pcntxt_mask;
644 /*
645 * These bits must be zero.
646 */
647 xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
648
649 /*
650 * Init the state that is not present in the memory layout
651 * and enabled by the OS.
652 */
653 mask = ~(pcntxt_mask & ~mask);
654 xsave_hdr->xstate_bv &= mask;
655
656 return err;
657 fx_only:
658 /*
659 * Couldn't find the extended state information in the memory
660 * layout. Restore the FP/SSE and init the other extended state
661 * enabled by the OS.
662 */
663 xsave_hdr->xstate_bv = XSTATE_FPSSE;
664 return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct));
665 }
666
667 int restore_i387_xstate_ia32(void __user *buf)
668 {
669 int err;
670 struct task_struct *tsk = current;
671 struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
672
673 if (HAVE_HWFP)
674 clear_fpu(tsk);
675
676 if (!buf) {
677 if (used_math()) {
678 clear_fpu(tsk);
679 clear_used_math();
680 }
681
682 return 0;
683 } else
684 if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size))
685 return -EACCES;
686
687 if (!used_math()) {
688 err = init_fpu(tsk);
689 if (err)
690 return err;
691 }
692
693 if (HAVE_HWFP) {
694 if (cpu_has_xsave)
695 err = restore_i387_xsave(buf);
696 else if (cpu_has_fxsr)
697 err = restore_i387_fxsave(fp, sizeof(struct
698 i387_fxsave_struct));
699 else
700 err = restore_i387_fsave(fp);
701 } else {
702 err = fpregs_soft_set(current, NULL,
703 0, sizeof(struct user_i387_ia32_struct),
704 NULL, fp) != 0;
705 }
706 set_used_math();
707
708 return err;
709 }
710
711 /*
712 * FPU state for core dumps.
713 * This is only used for a.out dumps now.
714 * It is declared generically using elf_fpregset_t (which is
715 * struct user_i387_struct) but is in fact only used for 32-bit
716 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
717 */
718 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
719 {
720 struct task_struct *tsk = current;
721 int fpvalid;
722
723 fpvalid = !!used_math();
724 if (fpvalid)
725 fpvalid = !fpregs_get(tsk, NULL,
726 0, sizeof(struct user_i387_ia32_struct),
727 fpu, NULL);
728
729 return fpvalid;
730 }
731 EXPORT_SYMBOL(dump_fpu);
732
733 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */