Merge tag 'v3.10.90' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm64 / kernel / signal32.c
1 /*
2 * Based on arch/arm/kernel/signal.c
3 *
4 * Copyright (C) 1995-2009 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 * Modified by Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <linux/compat.h>
22 #include <linux/signal.h>
23 #include <linux/syscalls.h>
24 #include <linux/ratelimit.h>
25
26 #include <asm/fpsimd.h>
27 #include <asm/signal32.h>
28 #include <asm/uaccess.h>
29 #include <asm/unistd.h>
30
31 struct compat_sigcontext {
32 /* We always set these two fields to 0 */
33 compat_ulong_t trap_no;
34 compat_ulong_t error_code;
35
36 compat_ulong_t oldmask;
37 compat_ulong_t arm_r0;
38 compat_ulong_t arm_r1;
39 compat_ulong_t arm_r2;
40 compat_ulong_t arm_r3;
41 compat_ulong_t arm_r4;
42 compat_ulong_t arm_r5;
43 compat_ulong_t arm_r6;
44 compat_ulong_t arm_r7;
45 compat_ulong_t arm_r8;
46 compat_ulong_t arm_r9;
47 compat_ulong_t arm_r10;
48 compat_ulong_t arm_fp;
49 compat_ulong_t arm_ip;
50 compat_ulong_t arm_sp;
51 compat_ulong_t arm_lr;
52 compat_ulong_t arm_pc;
53 compat_ulong_t arm_cpsr;
54 compat_ulong_t fault_address;
55 };
56
57 struct compat_ucontext {
58 compat_ulong_t uc_flags;
59 compat_uptr_t uc_link;
60 compat_stack_t uc_stack;
61 struct compat_sigcontext uc_mcontext;
62 compat_sigset_t uc_sigmask;
63 int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))];
64 compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8)));
65 };
66
67 struct compat_vfp_sigframe {
68 compat_ulong_t magic;
69 compat_ulong_t size;
70 struct compat_user_vfp {
71 compat_u64 fpregs[32];
72 compat_ulong_t fpscr;
73 } ufp;
74 struct compat_user_vfp_exc {
75 compat_ulong_t fpexc;
76 compat_ulong_t fpinst;
77 compat_ulong_t fpinst2;
78 } ufp_exc;
79 } __attribute__((__aligned__(8)));
80
81 #define VFP_MAGIC 0x56465001
82 #define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe)
83
84 struct compat_aux_sigframe {
85 struct compat_vfp_sigframe vfp;
86
87 /* Something that isn't a valid magic number for any coprocessor. */
88 unsigned long end_magic;
89 } __attribute__((__aligned__(8)));
90
91 struct compat_sigframe {
92 struct compat_ucontext uc;
93 compat_ulong_t retcode[2];
94 };
95
96 struct compat_rt_sigframe {
97 struct compat_siginfo info;
98 struct compat_sigframe sig;
99 };
100
101 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
102
103 /*
104 * For ARM syscalls, the syscall number has to be loaded into r7.
105 * We do not support an OABI userspace.
106 */
107 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
108 #define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
109 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
110 #define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
111
112 /*
113 * For Thumb syscalls, we also pass the syscall number via r7. We therefore
114 * need two 16-bit instructions.
115 */
116 #define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
117 0x2700 | __NR_compat_sigreturn)
118 #define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
119 0x2700 | __NR_compat_rt_sigreturn)
120
121 const compat_ulong_t aarch32_sigret_code[6] = {
122 /*
123 * AArch32 sigreturn code.
124 * We don't construct an OABI SWI - instead we just set the imm24 field
125 * to the EABI syscall number so that we create a sane disassembly.
126 */
127 MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
128 MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
129 };
130
131 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
132 {
133 compat_sigset_t cset;
134
135 cset.sig[0] = set->sig[0] & 0xffffffffull;
136 cset.sig[1] = set->sig[0] >> 32;
137
138 return copy_to_user(uset, &cset, sizeof(*uset));
139 }
140
141 static inline int get_sigset_t(sigset_t *set,
142 const compat_sigset_t __user *uset)
143 {
144 compat_sigset_t s32;
145
146 if (copy_from_user(&s32, uset, sizeof(*uset)))
147 return -EFAULT;
148
149 set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
150 return 0;
151 }
152
153 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
154 {
155 int err;
156
157 if (!access_ok(VERIFY_WRITE, to, sizeof(*to)))
158 return -EFAULT;
159
160 /* If you change siginfo_t structure, please be sure
161 * this code is fixed accordingly.
162 * It should never copy any pad contained in the structure
163 * to avoid security leaks, but must copy the generic
164 * 3 ints plus the relevant union member.
165 * This routine must convert siginfo from 64bit to 32bit as well
166 * at the same time.
167 */
168 err = __put_user(from->si_signo, &to->si_signo);
169 err |= __put_user(from->si_errno, &to->si_errno);
170 err |= __put_user((short)from->si_code, &to->si_code);
171 if (from->si_code < 0)
172 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad,
173 SI_PAD_SIZE);
174 else switch (from->si_code & __SI_MASK) {
175 case __SI_KILL:
176 err |= __put_user(from->si_pid, &to->si_pid);
177 err |= __put_user(from->si_uid, &to->si_uid);
178 break;
179 case __SI_TIMER:
180 err |= __put_user(from->si_tid, &to->si_tid);
181 err |= __put_user(from->si_overrun, &to->si_overrun);
182 err |= __put_user(from->si_int, &to->si_int);
183 break;
184 case __SI_POLL:
185 err |= __put_user(from->si_band, &to->si_band);
186 err |= __put_user(from->si_fd, &to->si_fd);
187 break;
188 case __SI_FAULT:
189 err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr,
190 &to->si_addr);
191 #ifdef BUS_MCEERR_AO
192 /*
193 * Other callers might not initialize the si_lsb field,
194 * so check explicitely for the right codes here.
195 */
196 if (from->si_signo == SIGBUS &&
197 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
198 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
199 #endif
200 break;
201 case __SI_CHLD:
202 err |= __put_user(from->si_pid, &to->si_pid);
203 err |= __put_user(from->si_uid, &to->si_uid);
204 err |= __put_user(from->si_status, &to->si_status);
205 err |= __put_user(from->si_utime, &to->si_utime);
206 err |= __put_user(from->si_stime, &to->si_stime);
207 break;
208 case __SI_RT: /* This is not generated by the kernel as of now. */
209 case __SI_MESGQ: /* But this is */
210 err |= __put_user(from->si_pid, &to->si_pid);
211 err |= __put_user(from->si_uid, &to->si_uid);
212 err |= __put_user(from->si_int, &to->si_int);
213 break;
214 #ifdef __ARCH_SIGSYS
215 case __SI_SYS:
216 err |= __put_user((compat_uptr_t)(unsigned long)
217 from->si_call_addr, &to->si_call_addr);
218 err |= __put_user(from->si_syscall, &to->si_syscall);
219 err |= __put_user(from->si_arch, &to->si_arch);
220 break;
221 #endif
222 default: /* this is just in case for now ... */
223 err |= __put_user(from->si_pid, &to->si_pid);
224 err |= __put_user(from->si_uid, &to->si_uid);
225 break;
226 }
227 return err;
228 }
229
230 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
231 {
232 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
233 copy_from_user(to->_sifields._pad,
234 from->_sifields._pad, SI_PAD_SIZE))
235 return -EFAULT;
236
237 return 0;
238 }
239
240 /*
241 * VFP save/restore code.
242 *
243 * We have to be careful with endianness, since the fpsimd context-switch
244 * code operates on 128-bit (Q) register values whereas the compat ABI
245 * uses an array of 64-bit (D) registers. Consequently, we need to swap
246 * the two halves of each Q register when running on a big-endian CPU.
247 */
248 union __fpsimd_vreg {
249 __uint128_t raw;
250 struct {
251 #ifdef __AARCH64EB__
252 u64 hi;
253 u64 lo;
254 #else
255 u64 lo;
256 u64 hi;
257 #endif
258 };
259 };
260
261 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
262 {
263 struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
264 compat_ulong_t magic = VFP_MAGIC;
265 compat_ulong_t size = VFP_STORAGE_SIZE;
266 compat_ulong_t fpscr, fpexc;
267 int i, err = 0;
268
269 /*
270 * Save the hardware registers to the fpsimd_state structure.
271 * Note that this also saves V16-31, which aren't visible
272 * in AArch32.
273 */
274 fpsimd_preserve_current_state();
275
276 /* Place structure header on the stack */
277 __put_user_error(magic, &frame->magic, err);
278 __put_user_error(size, &frame->size, err);
279
280 /*
281 * Now copy the FP registers. Since the registers are packed,
282 * we can copy the prefix we want (V0-V15) as it is.
283 */
284 for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
285 union __fpsimd_vreg vreg = {
286 .raw = fpsimd->vregs[i >> 1],
287 };
288
289 __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
290 __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
291 }
292
293 /* Create an AArch32 fpscr from the fpsr and the fpcr. */
294 fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
295 (fpsimd->fpcr & VFP_FPSCR_CTRL_MASK);
296 __put_user_error(fpscr, &frame->ufp.fpscr, err);
297
298 /*
299 * The exception register aren't available so we fake up a
300 * basic FPEXC and zero everything else.
301 */
302 fpexc = (1 << 30);
303 __put_user_error(fpexc, &frame->ufp_exc.fpexc, err);
304 __put_user_error(0, &frame->ufp_exc.fpinst, err);
305 __put_user_error(0, &frame->ufp_exc.fpinst2, err);
306
307 return err ? -EFAULT : 0;
308 }
309
310 static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
311 {
312 struct fpsimd_state fpsimd;
313 compat_ulong_t magic = VFP_MAGIC;
314 compat_ulong_t size = VFP_STORAGE_SIZE;
315 compat_ulong_t fpscr;
316 int i, err = 0;
317
318 __get_user_error(magic, &frame->magic, err);
319 __get_user_error(size, &frame->size, err);
320
321 if (err)
322 return -EFAULT;
323 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
324 return -EINVAL;
325
326 /* Copy the FP registers into the start of the fpsimd_state. */
327 for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
328 union __fpsimd_vreg vreg;
329
330 __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
331 __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
332 fpsimd.vregs[i >> 1] = vreg.raw;
333 }
334
335 /* Extract the fpsr and the fpcr from the fpscr */
336 __get_user_error(fpscr, &frame->ufp.fpscr, err);
337 fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK;
338 fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
339
340 /*
341 * We don't need to touch the exception register, so
342 * reload the hardware state.
343 */
344 if (!err)
345 fpsimd_update_current_state(&fpsimd);
346
347 return err ? -EFAULT : 0;
348 }
349
350 static int compat_restore_sigframe(struct pt_regs *regs,
351 struct compat_sigframe __user *sf)
352 {
353 int err;
354 sigset_t set;
355 struct compat_aux_sigframe __user *aux;
356
357 err = get_sigset_t(&set, &sf->uc.uc_sigmask);
358 if (err == 0) {
359 sigdelsetmask(&set, ~_BLOCKABLE);
360 set_current_blocked(&set);
361 }
362
363 __get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
364 __get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
365 __get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
366 __get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
367 __get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
368 __get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
369 __get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
370 __get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
371 __get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
372 __get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
373 __get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
374 __get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
375 __get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
376 __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
377 __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
378 __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
379 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
380
381 /*
382 * Avoid compat_sys_sigreturn() restarting.
383 */
384 regs->syscallno = ~0UL;
385
386 err |= !valid_user_regs(&regs->user_regs);
387
388 aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
389 if (err == 0)
390 err |= compat_restore_vfp_context(&aux->vfp);
391
392 return err;
393 }
394
395 asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
396 {
397 struct compat_sigframe __user *frame;
398
399 /* Always make any pending restarted system calls return -EINTR */
400 current_thread_info()->restart_block.fn = do_no_restart_syscall;
401
402 /*
403 * Since we stacked the signal on a 64-bit boundary,
404 * then 'sp' should be word aligned here. If it's
405 * not, then the user is trying to mess with us.
406 */
407 if (regs->compat_sp & 7)
408 goto badframe;
409
410 frame = (struct compat_sigframe __user *)regs->compat_sp;
411
412 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
413 goto badframe;
414
415 if (compat_restore_sigframe(regs, frame))
416 goto badframe;
417
418 return regs->regs[0];
419
420 badframe:
421 if (show_unhandled_signals)
422 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
423 current->comm, task_pid_nr(current), __func__,
424 regs->pc, regs->sp);
425 force_sig(SIGSEGV, current);
426 return 0;
427 }
428
429 asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
430 {
431 struct compat_rt_sigframe __user *frame;
432
433 /* Always make any pending restarted system calls return -EINTR */
434 current_thread_info()->restart_block.fn = do_no_restart_syscall;
435
436 /*
437 * Since we stacked the signal on a 64-bit boundary,
438 * then 'sp' should be word aligned here. If it's
439 * not, then the user is trying to mess with us.
440 */
441 if (regs->compat_sp & 7)
442 goto badframe;
443
444 frame = (struct compat_rt_sigframe __user *)regs->compat_sp;
445
446 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
447 goto badframe;
448
449 if (compat_restore_sigframe(regs, &frame->sig))
450 goto badframe;
451
452 if (compat_restore_altstack(&frame->sig.uc.uc_stack))
453 goto badframe;
454
455 return regs->regs[0];
456
457 badframe:
458 if (show_unhandled_signals)
459 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
460 current->comm, task_pid_nr(current), __func__,
461 regs->pc, regs->sp);
462 force_sig(SIGSEGV, current);
463 return 0;
464 }
465
466 static void __user *compat_get_sigframe(struct k_sigaction *ka,
467 struct pt_regs *regs,
468 int framesize)
469 {
470 compat_ulong_t sp = regs->compat_sp;
471 void __user *frame;
472
473 /*
474 * This is the X/Open sanctioned signal stack switching.
475 */
476 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
477 sp = current->sas_ss_sp + current->sas_ss_size;
478
479 /*
480 * ATPCS B01 mandates 8-byte alignment
481 */
482 frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7));
483
484 /*
485 * Check that we can actually write to the signal frame.
486 */
487 if (!access_ok(VERIFY_WRITE, frame, framesize))
488 frame = NULL;
489
490 return frame;
491 }
492
493 static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
494 compat_ulong_t __user *rc, void __user *frame,
495 int usig)
496 {
497 compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
498 compat_ulong_t retcode;
499 compat_ulong_t spsr = regs->pstate & ~PSR_f;
500 int thumb;
501
502 /* Check if the handler is written for ARM or Thumb */
503 thumb = handler & 1;
504
505 if (thumb) {
506 spsr |= COMPAT_PSR_T_BIT;
507 spsr &= ~COMPAT_PSR_IT_MASK;
508 } else {
509 spsr &= ~COMPAT_PSR_T_BIT;
510 }
511
512 if (ka->sa.sa_flags & SA_RESTORER) {
513 retcode = ptr_to_compat(ka->sa.sa_restorer);
514 } else {
515 /* Set up sigreturn pointer */
516 unsigned int idx = thumb << 1;
517
518 if (ka->sa.sa_flags & SA_SIGINFO)
519 idx += 3;
520
521 retcode = AARCH32_VECTORS_BASE +
522 AARCH32_KERN_SIGRET_CODE_OFFSET +
523 (idx << 2) + thumb;
524 }
525
526 regs->regs[0] = usig;
527 regs->compat_sp = ptr_to_compat(frame);
528 regs->compat_lr = retcode;
529 regs->pc = handler;
530 regs->pstate = spsr;
531 }
532
533 static int compat_setup_sigframe(struct compat_sigframe __user *sf,
534 struct pt_regs *regs, sigset_t *set)
535 {
536 struct compat_aux_sigframe __user *aux;
537 int err = 0;
538
539 __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
540 __put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
541 __put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
542 __put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
543 __put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
544 __put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
545 __put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
546 __put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
547 __put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
548 __put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
549 __put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
550 __put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
551 __put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
552 __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
553 __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
554 __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
555 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
556
557 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
558 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.error_code, err);
559 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
560 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
561
562 err |= put_sigset_t(&sf->uc.uc_sigmask, set);
563
564 aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
565
566 if (err == 0)
567 err |= compat_preserve_vfp_context(&aux->vfp);
568 __put_user_error(0, &aux->end_magic, err);
569
570 return err;
571 }
572
573 /*
574 * 32-bit signal handling routines called from signal.c
575 */
576 int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
577 sigset_t *set, struct pt_regs *regs)
578 {
579 struct compat_rt_sigframe __user *frame;
580 int err = 0;
581
582 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
583
584 if (!frame)
585 return 1;
586
587 err |= copy_siginfo_to_user32(&frame->info, info);
588
589 __put_user_error(0, &frame->sig.uc.uc_flags, err);
590 __put_user_error(0, &frame->sig.uc.uc_link, err);
591
592 err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp);
593
594 err |= compat_setup_sigframe(&frame->sig, regs, set);
595
596 if (err == 0) {
597 compat_setup_return(regs, ka, frame->sig.retcode, frame, usig);
598 regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info;
599 regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc;
600 }
601
602 return err;
603 }
604
605 int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set,
606 struct pt_regs *regs)
607 {
608 struct compat_sigframe __user *frame;
609 int err = 0;
610
611 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
612
613 if (!frame)
614 return 1;
615
616 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
617
618 err |= compat_setup_sigframe(frame, regs, set);
619 if (err == 0)
620 compat_setup_return(regs, ka, frame->retcode, frame, usig);
621
622 return err;
623 }
624
625 void compat_setup_restart_syscall(struct pt_regs *regs)
626 {
627 regs->regs[7] = __NR_compat_restart_syscall;
628 }