[IA64] regset: 64-bit support
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / ia64 / kernel / ptrace.c
CommitLineData
1da177e4
LT
1/*
2 * Kernel support for the ptrace() and syscall tracing interfaces.
3 *
4 * Copyright (C) 1999-2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
c70f8f68
SL
6 * Copyright (C) 2006 Intel Co
7 * 2006-08-12 - IA64 Native Utrace implementation support added by
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
1da177e4
LT
9 *
10 * Derived from the x86 and Alpha versions.
11 */
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/smp_lock.h>
19#include <linux/user.h>
20#include <linux/security.h>
21#include <linux/audit.h>
7ed20e1a 22#include <linux/signal.h>
c70f8f68
SL
23#include <linux/regset.h>
24#include <linux/elf.h>
1da177e4
LT
25
26#include <asm/pgtable.h>
27#include <asm/processor.h>
28#include <asm/ptrace_offsets.h>
29#include <asm/rse.h>
30#include <asm/system.h>
31#include <asm/uaccess.h>
32#include <asm/unwind.h>
33#ifdef CONFIG_PERFMON
34#include <asm/perfmon.h>
35#endif
36
37#include "entry.h"
38
39/*
40 * Bits in the PSR that we allow ptrace() to change:
41 * be, up, ac, mfl, mfh (the user mask; five bits total)
42 * db (debug breakpoint fault; one bit)
43 * id (instruction debug fault disable; one bit)
44 * dd (data debug fault disable; one bit)
45 * ri (restart instruction; two bits)
46 * is (instruction set; one bit)
47 */
48#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
49 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
50
51#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
52#define PFM_MASK MASK(38)
53
54#define PTRACE_DEBUG 0
55
56#if PTRACE_DEBUG
57# define dprintk(format...) printk(format)
58# define inline
59#else
60# define dprintk(format...)
61#endif
62
63/* Return TRUE if PT was created due to kernel-entry via a system-call. */
64
65static inline int
66in_syscall (struct pt_regs *pt)
67{
68 return (long) pt->cr_ifs >= 0;
69}
70
71/*
72 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
73 * bitset where bit i is set iff the NaT bit of register i is set.
74 */
75unsigned long
76ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
77{
78# define GET_BITS(first, last, unat) \
79 ({ \
80 unsigned long bit = ia64_unat_pos(&pt->r##first); \
81 unsigned long nbits = (last - first + 1); \
82 unsigned long mask = MASK(nbits) << first; \
83 unsigned long dist; \
84 if (bit < first) \
85 dist = 64 + bit - first; \
86 else \
87 dist = bit - first; \
88 ia64_rotr(unat, dist) & mask; \
89 })
90 unsigned long val;
91
92 /*
93 * Registers that are stored consecutively in struct pt_regs
94 * can be handled in parallel. If the register order in
95 * struct_pt_regs changes, this code MUST be updated.
96 */
97 val = GET_BITS( 1, 1, scratch_unat);
98 val |= GET_BITS( 2, 3, scratch_unat);
99 val |= GET_BITS(12, 13, scratch_unat);
100 val |= GET_BITS(14, 14, scratch_unat);
101 val |= GET_BITS(15, 15, scratch_unat);
102 val |= GET_BITS( 8, 11, scratch_unat);
103 val |= GET_BITS(16, 31, scratch_unat);
104 return val;
105
106# undef GET_BITS
107}
108
109/*
110 * Set the NaT bits for the scratch registers according to NAT and
111 * return the resulting unat (assuming the scratch registers are
112 * stored in PT).
113 */
114unsigned long
115ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
116{
117# define PUT_BITS(first, last, nat) \
118 ({ \
119 unsigned long bit = ia64_unat_pos(&pt->r##first); \
120 unsigned long nbits = (last - first + 1); \
121 unsigned long mask = MASK(nbits) << first; \
122 long dist; \
123 if (bit < first) \
124 dist = 64 + bit - first; \
125 else \
126 dist = bit - first; \
127 ia64_rotl(nat & mask, dist); \
128 })
129 unsigned long scratch_unat;
130
131 /*
132 * Registers that are stored consecutively in struct pt_regs
133 * can be handled in parallel. If the register order in
134 * struct_pt_regs changes, this code MUST be updated.
135 */
136 scratch_unat = PUT_BITS( 1, 1, nat);
137 scratch_unat |= PUT_BITS( 2, 3, nat);
138 scratch_unat |= PUT_BITS(12, 13, nat);
139 scratch_unat |= PUT_BITS(14, 14, nat);
140 scratch_unat |= PUT_BITS(15, 15, nat);
141 scratch_unat |= PUT_BITS( 8, 11, nat);
142 scratch_unat |= PUT_BITS(16, 31, nat);
143
144 return scratch_unat;
145
146# undef PUT_BITS
147}
148
149#define IA64_MLX_TEMPLATE 0x2
150#define IA64_MOVL_OPCODE 6
151
152void
153ia64_increment_ip (struct pt_regs *regs)
154{
155 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
156
157 if (ri > 2) {
158 ri = 0;
159 regs->cr_iip += 16;
160 } else if (ri == 2) {
161 get_user(w0, (char __user *) regs->cr_iip + 0);
162 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
163 /*
164 * rfi'ing to slot 2 of an MLX bundle causes
165 * an illegal operation fault. We don't want
166 * that to happen...
167 */
168 ri = 0;
169 regs->cr_iip += 16;
170 }
171 }
172 ia64_psr(regs)->ri = ri;
173}
174
175void
176ia64_decrement_ip (struct pt_regs *regs)
177{
178 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
179
180 if (ia64_psr(regs)->ri == 0) {
181 regs->cr_iip -= 16;
182 ri = 2;
183 get_user(w0, (char __user *) regs->cr_iip + 0);
184 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
185 /*
186 * rfi'ing to slot 2 of an MLX bundle causes
187 * an illegal operation fault. We don't want
188 * that to happen...
189 */
190 ri = 1;
191 }
192 }
193 ia64_psr(regs)->ri = ri;
194}
195
196/*
197 * This routine is used to read an rnat bits that are stored on the
198 * kernel backing store. Since, in general, the alignment of the user
199 * and kernel are different, this is not completely trivial. In
200 * essence, we need to construct the user RNAT based on up to two
201 * kernel RNAT values and/or the RNAT value saved in the child's
202 * pt_regs.
203 *
204 * user rbs
205 *
206 * +--------+ <-- lowest address
207 * | slot62 |
208 * +--------+
209 * | rnat | 0x....1f8
210 * +--------+
211 * | slot00 | \
212 * +--------+ |
213 * | slot01 | > child_regs->ar_rnat
214 * +--------+ |
215 * | slot02 | / kernel rbs
216 * +--------+ +--------+
217 * <- child_regs->ar_bspstore | slot61 | <-- krbs
218 * +- - - - + +--------+
219 * | slot62 |
220 * +- - - - + +--------+
221 * | rnat |
222 * +- - - - + +--------+
223 * vrnat | slot00 |
224 * +- - - - + +--------+
225 * = =
226 * +--------+
227 * | slot00 | \
228 * +--------+ |
229 * | slot01 | > child_stack->ar_rnat
230 * +--------+ |
231 * | slot02 | /
232 * +--------+
233 * <--- child_stack->ar_bspstore
234 *
235 * The way to think of this code is as follows: bit 0 in the user rnat
236 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
237 * value. The kernel rnat value holding this bit is stored in
238 * variable rnat0. rnat1 is loaded with the kernel rnat value that
239 * form the upper bits of the user rnat value.
240 *
241 * Boundary cases:
242 *
243 * o when reading the rnat "below" the first rnat slot on the kernel
244 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
245 * merged in from pt->ar_rnat.
246 *
247 * o when reading the rnat "above" the last rnat slot on the kernel
248 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
249 */
250static unsigned long
251get_rnat (struct task_struct *task, struct switch_stack *sw,
252 unsigned long *krbs, unsigned long *urnat_addr,
253 unsigned long *urbs_end)
254{
255 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
256 unsigned long umask = 0, mask, m;
257 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
258 long num_regs, nbits;
259 struct pt_regs *pt;
260
6450578f 261 pt = task_pt_regs(task);
1da177e4
LT
262 kbsp = (unsigned long *) sw->ar_bspstore;
263 ubspstore = (unsigned long *) pt->ar_bspstore;
264
265 if (urbs_end < urnat_addr)
266 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
267 else
268 nbits = 63;
269 mask = MASK(nbits);
270 /*
271 * First, figure out which bit number slot 0 in user-land maps
272 * to in the kernel rnat. Do this by figuring out how many
273 * register slots we're beyond the user's backingstore and
274 * then computing the equivalent address in kernel space.
275 */
276 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
277 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
278 shift = ia64_rse_slot_num(slot0_kaddr);
279 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
280 rnat0_kaddr = rnat1_kaddr - 64;
281
282 if (ubspstore + 63 > urnat_addr) {
283 /* some bits need to be merged in from pt->ar_rnat */
284 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
285 urnat = (pt->ar_rnat & umask);
286 mask &= ~umask;
287 if (!mask)
288 return urnat;
289 }
290
291 m = mask << shift;
292 if (rnat0_kaddr >= kbsp)
293 rnat0 = sw->ar_rnat;
294 else if (rnat0_kaddr > krbs)
295 rnat0 = *rnat0_kaddr;
296 urnat |= (rnat0 & m) >> shift;
297
298 m = mask >> (63 - shift);
299 if (rnat1_kaddr >= kbsp)
300 rnat1 = sw->ar_rnat;
301 else if (rnat1_kaddr > krbs)
302 rnat1 = *rnat1_kaddr;
303 urnat |= (rnat1 & m) << (63 - shift);
304 return urnat;
305}
306
307/*
308 * The reverse of get_rnat.
309 */
310static void
311put_rnat (struct task_struct *task, struct switch_stack *sw,
312 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
313 unsigned long *urbs_end)
314{
315 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
316 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
317 long num_regs, nbits;
318 struct pt_regs *pt;
319 unsigned long cfm, *urbs_kargs;
320
6450578f 321 pt = task_pt_regs(task);
1da177e4
LT
322 kbsp = (unsigned long *) sw->ar_bspstore;
323 ubspstore = (unsigned long *) pt->ar_bspstore;
324
325 urbs_kargs = urbs_end;
326 if (in_syscall(pt)) {
327 /*
328 * If entered via syscall, don't allow user to set rnat bits
329 * for syscall args.
330 */
331 cfm = pt->cr_ifs;
332 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
333 }
334
335 if (urbs_kargs >= urnat_addr)
336 nbits = 63;
337 else {
338 if ((urnat_addr - 63) >= urbs_kargs)
339 return;
340 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
341 }
342 mask = MASK(nbits);
343
344 /*
345 * First, figure out which bit number slot 0 in user-land maps
346 * to in the kernel rnat. Do this by figuring out how many
347 * register slots we're beyond the user's backingstore and
348 * then computing the equivalent address in kernel space.
349 */
350 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
351 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
352 shift = ia64_rse_slot_num(slot0_kaddr);
353 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
354 rnat0_kaddr = rnat1_kaddr - 64;
355
356 if (ubspstore + 63 > urnat_addr) {
357 /* some bits need to be place in pt->ar_rnat: */
358 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
359 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
360 mask &= ~umask;
361 if (!mask)
362 return;
363 }
364 /*
365 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
366 * rnat slot is ignored. so we don't have to clear it here.
367 */
368 rnat0 = (urnat << shift);
369 m = mask << shift;
370 if (rnat0_kaddr >= kbsp)
371 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
372 else if (rnat0_kaddr > krbs)
373 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
374
375 rnat1 = (urnat >> (63 - shift));
376 m = mask >> (63 - shift);
377 if (rnat1_kaddr >= kbsp)
378 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
379 else if (rnat1_kaddr > krbs)
380 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
381}
382
383static inline int
384on_kernel_rbs (unsigned long addr, unsigned long bspstore,
385 unsigned long urbs_end)
386{
387 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
388 urbs_end);
389 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
390}
391
392/*
393 * Read a word from the user-level backing store of task CHILD. ADDR
394 * is the user-level address to read the word from, VAL a pointer to
395 * the return value, and USER_BSP gives the end of the user-level
396 * backing store (i.e., it's the address that would be in ar.bsp after
397 * the user executed a "cover" instruction).
398 *
399 * This routine takes care of accessing the kernel register backing
400 * store for those registers that got spilled there. It also takes
401 * care of calculating the appropriate RNaT collection words.
402 */
403long
404ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
405 unsigned long user_rbs_end, unsigned long addr, long *val)
406{
407 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
408 struct pt_regs *child_regs;
409 size_t copied;
410 long ret;
411
412 urbs_end = (long *) user_rbs_end;
413 laddr = (unsigned long *) addr;
6450578f 414 child_regs = task_pt_regs(child);
1da177e4
LT
415 bspstore = (unsigned long *) child_regs->ar_bspstore;
416 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
417 if (on_kernel_rbs(addr, (unsigned long) bspstore,
418 (unsigned long) urbs_end))
419 {
420 /*
421 * Attempt to read the RBS in an area that's actually
422 * on the kernel RBS => read the corresponding bits in
423 * the kernel RBS.
424 */
425 rnat_addr = ia64_rse_rnat_addr(laddr);
426 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
427
428 if (laddr == rnat_addr) {
429 /* return NaT collection word itself */
430 *val = ret;
431 return 0;
432 }
433
434 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
435 /*
436 * It is implementation dependent whether the
437 * data portion of a NaT value gets saved on a
438 * st8.spill or RSE spill (e.g., see EAS 2.6,
439 * 4.4.4.6 Register Spill and Fill). To get
440 * consistent behavior across all possible
441 * IA-64 implementations, we return zero in
442 * this case.
443 */
444 *val = 0;
445 return 0;
446 }
447
448 if (laddr < urbs_end) {
449 /*
450 * The desired word is on the kernel RBS and
451 * is not a NaT.
452 */
453 regnum = ia64_rse_num_regs(bspstore, laddr);
454 *val = *ia64_rse_skip_regs(krbs, regnum);
455 return 0;
456 }
457 }
458 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
459 if (copied != sizeof(ret))
460 return -EIO;
461 *val = ret;
462 return 0;
463}
464
465long
466ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
467 unsigned long user_rbs_end, unsigned long addr, long val)
468{
469 unsigned long *bspstore, *krbs, regnum, *laddr;
470 unsigned long *urbs_end = (long *) user_rbs_end;
471 struct pt_regs *child_regs;
472
473 laddr = (unsigned long *) addr;
6450578f 474 child_regs = task_pt_regs(child);
1da177e4
LT
475 bspstore = (unsigned long *) child_regs->ar_bspstore;
476 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
477 if (on_kernel_rbs(addr, (unsigned long) bspstore,
478 (unsigned long) urbs_end))
479 {
480 /*
481 * Attempt to write the RBS in an area that's actually
482 * on the kernel RBS => write the corresponding bits
483 * in the kernel RBS.
484 */
485 if (ia64_rse_is_rnat_slot(laddr))
486 put_rnat(child, child_stack, krbs, laddr, val,
487 urbs_end);
488 else {
489 if (laddr < urbs_end) {
490 regnum = ia64_rse_num_regs(bspstore, laddr);
491 *ia64_rse_skip_regs(krbs, regnum) = val;
492 }
493 }
494 } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
495 != sizeof(val))
496 return -EIO;
497 return 0;
498}
499
500/*
501 * Calculate the address of the end of the user-level register backing
502 * store. This is the address that would have been stored in ar.bsp
503 * if the user had executed a "cover" instruction right before
504 * entering the kernel. If CFMP is not NULL, it is used to return the
505 * "current frame mask" that was active at the time the kernel was
506 * entered.
507 */
508unsigned long
509ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
510 unsigned long *cfmp)
511{
512 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
513 long ndirty;
514
515 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
516 bspstore = (unsigned long *) pt->ar_bspstore;
517 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
518
519 if (in_syscall(pt))
520 ndirty += (cfm & 0x7f);
521 else
522 cfm &= ~(1UL << 63); /* clear valid bit */
523
524 if (cfmp)
525 *cfmp = cfm;
526 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
527}
528
529/*
530 * Synchronize (i.e, write) the RSE backing store living in kernel
531 * space to the VM of the CHILD task. SW and PT are the pointers to
532 * the switch_stack and pt_regs structures, respectively.
533 * USER_RBS_END is the user-level address at which the backing store
534 * ends.
535 */
536long
537ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
538 unsigned long user_rbs_start, unsigned long user_rbs_end)
539{
540 unsigned long addr, val;
541 long ret;
542
543 /* now copy word for word from kernel rbs to user rbs: */
544 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
545 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
546 if (ret < 0)
547 return ret;
548 if (access_process_vm(child, addr, &val, sizeof(val), 1)
549 != sizeof(val))
550 return -EIO;
551 }
552 return 0;
553}
554
3b2ce0b1
PT
555static long
556ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
557 unsigned long user_rbs_start, unsigned long user_rbs_end)
558{
559 unsigned long addr, val;
560 long ret;
561
562 /* now copy word for word from user rbs to kernel rbs: */
563 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
564 if (access_process_vm(child, addr, &val, sizeof(val), 0)
565 != sizeof(val))
566 return -EIO;
567
568 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
569 if (ret < 0)
570 return ret;
571 }
572 return 0;
573}
574
575typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
576 unsigned long, unsigned long);
577
578static void do_sync_rbs(struct unw_frame_info *info, void *arg)
579{
580 struct pt_regs *pt;
581 unsigned long urbs_end;
582 syncfunc_t fn = arg;
583
584 if (unw_unwind_to_user(info) < 0)
585 return;
586 pt = task_pt_regs(info->task);
587 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
588
589 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
590}
591
592/*
593 * when a thread is stopped (ptraced), debugger might change thread's user
594 * stack (change memory directly), and we must avoid the RSE stored in kernel
595 * to override user stack (user space's RSE is newer than kernel's in the
596 * case). To workaround the issue, we copy kernel RSE to user RSE before the
597 * task is stopped, so user RSE has updated data. we then copy user RSE to
598 * kernel after the task is resummed from traced stop and kernel will use the
599 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
600 * synchronize user RSE to kernel.
601 */
602void ia64_ptrace_stop(void)
603{
604 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
605 return;
606 tsk_set_notify_resume(current);
607 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
608}
609
610/*
611 * This is called to read back the register backing store.
612 */
613void ia64_sync_krbs(void)
614{
615 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
616 tsk_clear_notify_resume(current);
617
618 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
619}
620
aa91a2e9
PT
621/*
622 * After PTRACE_ATTACH, a thread's register backing store area in user
623 * space is assumed to contain correct data whenever the thread is
624 * stopped. arch_ptrace_stop takes care of this on tracing stops.
625 * But if the child was already stopped for job control when we attach
626 * to it, then it might not ever get into ptrace_stop by the time we
627 * want to examine the user memory containing the RBS.
628 */
629void
630ptrace_attach_sync_user_rbs (struct task_struct *child)
631{
632 int stopped = 0;
633 struct unw_frame_info info;
634
635 /*
636 * If the child is in TASK_STOPPED, we need to change that to
637 * TASK_TRACED momentarily while we operate on it. This ensures
638 * that the child won't be woken up and return to user mode while
639 * we are doing the sync. (It can only be woken up for SIGKILL.)
640 */
641
642 read_lock(&tasklist_lock);
643 if (child->signal) {
644 spin_lock_irq(&child->sighand->siglock);
645 if (child->state == TASK_STOPPED &&
646 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
647 tsk_set_notify_resume(child);
648
649 child->state = TASK_TRACED;
650 stopped = 1;
651 }
652 spin_unlock_irq(&child->sighand->siglock);
653 }
654 read_unlock(&tasklist_lock);
655
656 if (!stopped)
657 return;
658
659 unw_init_from_blocked_task(&info, child);
660 do_sync_rbs(&info, ia64_sync_user_rbs);
661
662 /*
663 * Now move the child back into TASK_STOPPED if it should be in a
664 * job control stop, so that SIGCONT can be used to wake it up.
665 */
666 read_lock(&tasklist_lock);
667 if (child->signal) {
668 spin_lock_irq(&child->sighand->siglock);
669 if (child->state == TASK_TRACED &&
670 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
671 child->state = TASK_STOPPED;
672 }
673 spin_unlock_irq(&child->sighand->siglock);
674 }
675 read_unlock(&tasklist_lock);
676}
677
1da177e4
LT
678static inline int
679thread_matches (struct task_struct *thread, unsigned long addr)
680{
681 unsigned long thread_rbs_end;
682 struct pt_regs *thread_regs;
683
684 if (ptrace_check_attach(thread, 0) < 0)
685 /*
686 * If the thread is not in an attachable state, we'll
687 * ignore it. The net effect is that if ADDR happens
688 * to overlap with the portion of the thread's
689 * register backing store that is currently residing
690 * on the thread's kernel stack, then ptrace() may end
691 * up accessing a stale value. But if the thread
692 * isn't stopped, that's a problem anyhow, so we're
693 * doing as well as we can...
694 */
695 return 0;
696
6450578f 697 thread_regs = task_pt_regs(thread);
1da177e4
LT
698 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
699 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
700 return 0;
701
702 return 1; /* looks like we've got a winner */
703}
704
1da177e4
LT
705/*
706 * Write f32-f127 back to task->thread.fph if it has been modified.
707 */
708inline void
709ia64_flush_fph (struct task_struct *task)
710{
6450578f 711 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
1da177e4 712
05062d96
PC
713 /*
714 * Prevent migrating this task while
715 * we're fiddling with the FPU state
716 */
717 preempt_disable();
1da177e4
LT
718 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
719 psr->mfh = 0;
720 task->thread.flags |= IA64_THREAD_FPH_VALID;
721 ia64_save_fpu(&task->thread.fph[0]);
722 }
05062d96 723 preempt_enable();
1da177e4
LT
724}
725
726/*
727 * Sync the fph state of the task so that it can be manipulated
728 * through thread.fph. If necessary, f32-f127 are written back to
729 * thread.fph or, if the fph state hasn't been used before, thread.fph
730 * is cleared to zeroes. Also, access to f32-f127 is disabled to
731 * ensure that the task picks up the state from thread.fph when it
732 * executes again.
733 */
734void
735ia64_sync_fph (struct task_struct *task)
736{
6450578f 737 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
1da177e4
LT
738
739 ia64_flush_fph(task);
740 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
741 task->thread.flags |= IA64_THREAD_FPH_VALID;
742 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
743 }
744 ia64_drop_fpu(task);
745 psr->dfh = 1;
746}
747
748static int
749access_fr (struct unw_frame_info *info, int regnum, int hi,
750 unsigned long *data, int write_access)
751{
752 struct ia64_fpreg fpval;
753 int ret;
754
755 ret = unw_get_fr(info, regnum, &fpval);
756 if (ret < 0)
757 return ret;
758
759 if (write_access) {
760 fpval.u.bits[hi] = *data;
761 ret = unw_set_fr(info, regnum, fpval);
762 } else
763 *data = fpval.u.bits[hi];
764 return ret;
765}
766
767/*
768 * Change the machine-state of CHILD such that it will return via the normal
769 * kernel exit-path, rather than the syscall-exit path.
770 */
771static void
772convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
773 unsigned long cfm)
774{
775 struct unw_frame_info info, prev_info;
02a017a9 776 unsigned long ip, sp, pr;
1da177e4
LT
777
778 unw_init_from_blocked_task(&info, child);
779 while (1) {
780 prev_info = info;
781 if (unw_unwind(&info) < 0)
782 return;
02a017a9
DMT
783
784 unw_get_sp(&info, &sp);
785 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
786 < IA64_PT_REGS_SIZE) {
787 dprintk("ptrace.%s: ran off the top of the kernel "
d4ed8084 788 "stack\n", __func__);
02a017a9
DMT
789 return;
790 }
791 if (unw_get_pr (&prev_info, &pr) < 0) {
792 unw_get_rp(&prev_info, &ip);
793 dprintk("ptrace.%s: failed to read "
794 "predicate register (ip=0x%lx)\n",
d4ed8084 795 __func__, ip);
1da177e4 796 return;
02a017a9
DMT
797 }
798 if (unw_is_intr_frame(&info)
799 && (pr & (1UL << PRED_USER_STACK)))
1da177e4
LT
800 break;
801 }
802
7f9eaedf
DMT
803 /*
804 * Note: at the time of this call, the target task is blocked
805 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
806 * (aka, "pLvSys") we redirect execution from
807 * .work_pending_syscall_end to .work_processed_kernel.
808 */
1da177e4 809 unw_get_pr(&prev_info, &pr);
7f9eaedf 810 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
1da177e4
LT
811 pr |= (1UL << PRED_NON_SYSCALL);
812 unw_set_pr(&prev_info, pr);
813
814 pt->cr_ifs = (1UL << 63) | cfm;
7f9eaedf
DMT
815 /*
816 * Clear the memory that is NOT written on syscall-entry to
817 * ensure we do not leak kernel-state to user when execution
818 * resumes.
819 */
820 pt->r2 = 0;
821 pt->r3 = 0;
822 pt->r14 = 0;
823 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
824 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
825 pt->b7 = 0;
826 pt->ar_ccv = 0;
827 pt->ar_csd = 0;
828 pt->ar_ssd = 0;
1da177e4
LT
829}
830
831static int
832access_nat_bits (struct task_struct *child, struct pt_regs *pt,
833 struct unw_frame_info *info,
834 unsigned long *data, int write_access)
835{
836 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
837 char nat = 0;
838
839 if (write_access) {
840 nat_bits = *data;
841 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
842 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
843 dprintk("ptrace: failed to set ar.unat\n");
844 return -1;
845 }
846 for (regnum = 4; regnum <= 7; ++regnum) {
847 unw_get_gr(info, regnum, &dummy, &nat);
848 unw_set_gr(info, regnum, dummy,
849 (nat_bits >> regnum) & 1);
850 }
851 } else {
852 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
853 dprintk("ptrace: failed to read ar.unat\n");
854 return -1;
855 }
856 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
857 for (regnum = 4; regnum <= 7; ++regnum) {
858 unw_get_gr(info, regnum, &dummy, &nat);
859 nat_bits |= (nat != 0) << regnum;
860 }
861 *data = nat_bits;
862 }
863 return 0;
864}
865
866static int
867access_uarea (struct task_struct *child, unsigned long addr,
868 unsigned long *data, int write_access)
869{
972559a0 870 unsigned long *ptr, regnum, urbs_end, cfm;
1da177e4
LT
871 struct switch_stack *sw;
872 struct pt_regs *pt;
873# define pt_reg_addr(pt, reg) ((void *) \
874 ((unsigned long) (pt) \
875 + offsetof(struct pt_regs, reg)))
876
877
6450578f 878 pt = task_pt_regs(child);
1da177e4
LT
879 sw = (struct switch_stack *) (child->thread.ksp + 16);
880
881 if ((addr & 0x7) != 0) {
882 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
883 return -1;
884 }
885
886 if (addr < PT_F127 + 16) {
887 /* accessing fph */
888 if (write_access)
889 ia64_sync_fph(child);
890 else
891 ia64_flush_fph(child);
892 ptr = (unsigned long *)
893 ((unsigned long) &child->thread.fph + addr);
894 } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
895 /* scratch registers untouched by kernel (saved in pt_regs) */
896 ptr = pt_reg_addr(pt, f10) + (addr - PT_F10);
897 } else if (addr >= PT_F12 && addr < PT_F15 + 16) {
898 /*
899 * Scratch registers untouched by kernel (saved in
900 * switch_stack).
901 */
902 ptr = (unsigned long *) ((long) sw
903 + (addr - PT_NAT_BITS - 32));
904 } else if (addr < PT_AR_LC + 8) {
905 /* preserved state: */
906 struct unw_frame_info info;
907 char nat = 0;
908 int ret;
909
910 unw_init_from_blocked_task(&info, child);
911 if (unw_unwind_to_user(&info) < 0)
912 return -1;
913
914 switch (addr) {
915 case PT_NAT_BITS:
916 return access_nat_bits(child, pt, &info,
917 data, write_access);
918
919 case PT_R4: case PT_R5: case PT_R6: case PT_R7:
920 if (write_access) {
921 /* read NaT bit first: */
922 unsigned long dummy;
923
924 ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4,
925 &dummy, &nat);
926 if (ret < 0)
927 return ret;
928 }
929 return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data,
930 &nat, write_access);
931
932 case PT_B1: case PT_B2: case PT_B3:
933 case PT_B4: case PT_B5:
934 return unw_access_br(&info, (addr - PT_B1)/8 + 1, data,
935 write_access);
936
937 case PT_AR_EC:
938 return unw_access_ar(&info, UNW_AR_EC, data,
939 write_access);
940
941 case PT_AR_LC:
942 return unw_access_ar(&info, UNW_AR_LC, data,
943 write_access);
944
945 default:
946 if (addr >= PT_F2 && addr < PT_F5 + 16)
947 return access_fr(&info, (addr - PT_F2)/16 + 2,
948 (addr & 8) != 0, data,
949 write_access);
950 else if (addr >= PT_F16 && addr < PT_F31 + 16)
951 return access_fr(&info,
952 (addr - PT_F16)/16 + 16,
953 (addr & 8) != 0,
954 data, write_access);
955 else {
956 dprintk("ptrace: rejecting access to register "
957 "address 0x%lx\n", addr);
958 return -1;
959 }
960 }
961 } else if (addr < PT_F9+16) {
962 /* scratch state */
963 switch (addr) {
964 case PT_AR_BSP:
965 /*
966 * By convention, we use PT_AR_BSP to refer to
967 * the end of the user-level backing store.
968 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
969 * to get the real value of ar.bsp at the time
970 * the kernel was entered.
971 *
972 * Furthermore, when changing the contents of
08b23d74
PT
973 * PT_AR_BSP (or PT_CFM) while the task is
974 * blocked in a system call, convert the state
975 * so that the non-system-call exit
1da177e4
LT
976 * path is used. This ensures that the proper
977 * state will be picked up when resuming
978 * execution. However, it *also* means that
979 * once we write PT_AR_BSP/PT_CFM, it won't be
980 * possible to modify the syscall arguments of
981 * the pending system call any longer. This
982 * shouldn't be an issue because modifying
983 * PT_AR_BSP/PT_CFM generally implies that
984 * we're either abandoning the pending system
985 * call or that we defer it's re-execution
986 * (e.g., due to GDB doing an inferior
987 * function call).
988 */
989 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
990 if (write_access) {
991 if (*data != urbs_end) {
1da177e4
LT
992 if (in_syscall(pt))
993 convert_to_non_syscall(child,
994 pt,
995 cfm);
996 /*
997 * Simulate user-level write
998 * of ar.bsp:
999 */
1000 pt->loadrs = 0;
1001 pt->ar_bspstore = *data;
1002 }
1003 } else
1004 *data = urbs_end;
1005 return 0;
1006
1007 case PT_CFM:
1008 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
1009 if (write_access) {
1010 if (((cfm ^ *data) & PFM_MASK) != 0) {
1da177e4
LT
1011 if (in_syscall(pt))
1012 convert_to_non_syscall(child,
1013 pt,
1014 cfm);
1015 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1016 | (*data & PFM_MASK));
1017 }
1018 } else
1019 *data = cfm;
1020 return 0;
1021
1022 case PT_CR_IPSR:
b09e789c
SL
1023 if (write_access) {
1024 unsigned long tmp = *data;
1025 /* psr.ri==3 is a reserved value: SDM 2:25 */
1026 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1027 tmp &= ~IA64_PSR_RI;
1028 pt->cr_ipsr = ((tmp & IPSR_MASK)
1da177e4 1029 | (pt->cr_ipsr & ~IPSR_MASK));
b09e789c 1030 } else
1da177e4
LT
1031 *data = (pt->cr_ipsr & IPSR_MASK);
1032 return 0;
1033
4ea78729
MC
1034 case PT_AR_RSC:
1035 if (write_access)
1036 pt->ar_rsc = *data | (3 << 2); /* force PL3 */
1037 else
1038 *data = pt->ar_rsc;
1039 return 0;
1040
1da177e4 1041 case PT_AR_RNAT:
972559a0
PT
1042 ptr = pt_reg_addr(pt, ar_rnat);
1043 break;
1da177e4
LT
1044 case PT_R1:
1045 ptr = pt_reg_addr(pt, r1);
1046 break;
1047 case PT_R2: case PT_R3:
1048 ptr = pt_reg_addr(pt, r2) + (addr - PT_R2);
1049 break;
1050 case PT_R8: case PT_R9: case PT_R10: case PT_R11:
1051 ptr = pt_reg_addr(pt, r8) + (addr - PT_R8);
1052 break;
1053 case PT_R12: case PT_R13:
1054 ptr = pt_reg_addr(pt, r12) + (addr - PT_R12);
1055 break;
1056 case PT_R14:
1057 ptr = pt_reg_addr(pt, r14);
1058 break;
1059 case PT_R15:
1060 ptr = pt_reg_addr(pt, r15);
1061 break;
1062 case PT_R16: case PT_R17: case PT_R18: case PT_R19:
1063 case PT_R20: case PT_R21: case PT_R22: case PT_R23:
1064 case PT_R24: case PT_R25: case PT_R26: case PT_R27:
1065 case PT_R28: case PT_R29: case PT_R30: case PT_R31:
1066 ptr = pt_reg_addr(pt, r16) + (addr - PT_R16);
1067 break;
1068 case PT_B0:
1069 ptr = pt_reg_addr(pt, b0);
1070 break;
1071 case PT_B6:
1072 ptr = pt_reg_addr(pt, b6);
1073 break;
1074 case PT_B7:
1075 ptr = pt_reg_addr(pt, b7);
1076 break;
1077 case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
1078 case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
1079 ptr = pt_reg_addr(pt, f6) + (addr - PT_F6);
1080 break;
1081 case PT_AR_BSPSTORE:
1082 ptr = pt_reg_addr(pt, ar_bspstore);
1083 break;
1da177e4
LT
1084 case PT_AR_UNAT:
1085 ptr = pt_reg_addr(pt, ar_unat);
1086 break;
1087 case PT_AR_PFS:
1088 ptr = pt_reg_addr(pt, ar_pfs);
1089 break;
1090 case PT_AR_CCV:
1091 ptr = pt_reg_addr(pt, ar_ccv);
1092 break;
1093 case PT_AR_FPSR:
1094 ptr = pt_reg_addr(pt, ar_fpsr);
1095 break;
1096 case PT_CR_IIP:
1097 ptr = pt_reg_addr(pt, cr_iip);
1098 break;
1099 case PT_PR:
1100 ptr = pt_reg_addr(pt, pr);
1101 break;
1102 /* scratch register */
1103
1104 default:
1105 /* disallow accessing anything else... */
1106 dprintk("ptrace: rejecting access to register "
1107 "address 0x%lx\n", addr);
1108 return -1;
1109 }
1110 } else if (addr <= PT_AR_SSD) {
1111 ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD);
1112 } else {
1113 /* access debug registers */
1114
1115 if (addr >= PT_IBR) {
1116 regnum = (addr - PT_IBR) >> 3;
1117 ptr = &child->thread.ibr[0];
1118 } else {
1119 regnum = (addr - PT_DBR) >> 3;
1120 ptr = &child->thread.dbr[0];
1121 }
1122
1123 if (regnum >= 8) {
1124 dprintk("ptrace: rejecting access to register "
1125 "address 0x%lx\n", addr);
1126 return -1;
1127 }
1128#ifdef CONFIG_PERFMON
1129 /*
1130 * Check if debug registers are used by perfmon. This
1131 * test must be done once we know that we can do the
1132 * operation, i.e. the arguments are all valid, but
1133 * before we start modifying the state.
1134 *
1135 * Perfmon needs to keep a count of how many processes
1136 * are trying to modify the debug registers for system
1137 * wide monitoring sessions.
1138 *
1139 * We also include read access here, because they may
1140 * cause the PMU-installed debug register state
1141 * (dbr[], ibr[]) to be reset. The two arrays are also
1142 * used by perfmon, but we do not use
1143 * IA64_THREAD_DBG_VALID. The registers are restored
1144 * by the PMU context switch code.
1145 */
1146 if (pfm_use_debug_registers(child)) return -1;
1147#endif
1148
1149 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
1150 child->thread.flags |= IA64_THREAD_DBG_VALID;
1151 memset(child->thread.dbr, 0,
1152 sizeof(child->thread.dbr));
1153 memset(child->thread.ibr, 0,
1154 sizeof(child->thread.ibr));
1155 }
1156
1157 ptr += regnum;
1158
1159 if ((regnum & 1) && write_access) {
1160 /* don't let the user set kernel-level breakpoints: */
1161 *ptr = *data & ~(7UL << 56);
1162 return 0;
1163 }
1164 }
1165 if (write_access)
1166 *ptr = *data;
1167 else
1168 *data = *ptr;
1169 return 0;
1170}
1171
1172static long
1173ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1174{
1175 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
1176 struct unw_frame_info info;
1177 struct ia64_fpreg fpval;
1178 struct switch_stack *sw;
1179 struct pt_regs *pt;
1180 long ret, retval = 0;
1181 char nat = 0;
1182 int i;
1183
1184 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
1185 return -EIO;
1186
6450578f 1187 pt = task_pt_regs(child);
1da177e4
LT
1188 sw = (struct switch_stack *) (child->thread.ksp + 16);
1189 unw_init_from_blocked_task(&info, child);
1190 if (unw_unwind_to_user(&info) < 0) {
1191 return -EIO;
1192 }
1193
1194 if (((unsigned long) ppr & 0x7) != 0) {
1195 dprintk("ptrace:unaligned register address %p\n", ppr);
1196 return -EIO;
1197 }
1198
1199 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
1200 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
1201 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
1202 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
1203 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
1204 || access_uarea(child, PT_CFM, &cfm, 0)
1205 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
1206 return -EIO;
1207
1208 /* control regs */
1209
1210 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
1211 retval |= __put_user(psr, &ppr->cr_ipsr);
1212
1213 /* app regs */
1214
1215 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1216 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
1217 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1218 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1219 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1220 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1221
1222 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
1223 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
1224 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1225 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
1226 retval |= __put_user(cfm, &ppr->cfm);
1227
1228 /* gr1-gr3 */
1229
1230 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
1231 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
1232
1233 /* gr4-gr7 */
1234
1235 for (i = 4; i < 8; i++) {
1236 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
1237 return -EIO;
1238 retval |= __put_user(val, &ppr->gr[i]);
1239 }
1240
1241 /* gr8-gr11 */
1242
1243 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
1244
1245 /* gr12-gr15 */
1246
1247 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
1248 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
1249 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
1250
1251 /* gr16-gr31 */
1252
1253 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
1254
1255 /* b0 */
1256
1257 retval |= __put_user(pt->b0, &ppr->br[0]);
1258
1259 /* b1-b5 */
1260
1261 for (i = 1; i < 6; i++) {
1262 if (unw_access_br(&info, i, &val, 0) < 0)
1263 return -EIO;
1264 __put_user(val, &ppr->br[i]);
1265 }
1266
1267 /* b6-b7 */
1268
1269 retval |= __put_user(pt->b6, &ppr->br[6]);
1270 retval |= __put_user(pt->b7, &ppr->br[7]);
1271
1272 /* fr2-fr5 */
1273
1274 for (i = 2; i < 6; i++) {
1275 if (unw_get_fr(&info, i, &fpval) < 0)
1276 return -EIO;
1277 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
1278 }
1279
1280 /* fr6-fr11 */
1281
1282 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
1283 sizeof(struct ia64_fpreg) * 6);
1284
1285 /* fp scratch regs(12-15) */
1286
1287 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
1288 sizeof(struct ia64_fpreg) * 4);
1289
1290 /* fr16-fr31 */
1291
1292 for (i = 16; i < 32; i++) {
1293 if (unw_get_fr(&info, i, &fpval) < 0)
1294 return -EIO;
1295 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
1296 }
1297
1298 /* fph */
1299
1300 ia64_flush_fph(child);
1301 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
1302 sizeof(ppr->fr[32]) * 96);
1303
1304 /* preds */
1305
1306 retval |= __put_user(pt->pr, &ppr->pr);
1307
1308 /* nat bits */
1309
1310 retval |= __put_user(nat_bits, &ppr->nat);
1311
1312 ret = retval ? -EIO : 0;
1313 return ret;
1314}
1315
1316static long
1317ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1318{
4ea78729 1319 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
1da177e4
LT
1320 struct unw_frame_info info;
1321 struct switch_stack *sw;
1322 struct ia64_fpreg fpval;
1323 struct pt_regs *pt;
1324 long ret, retval = 0;
1325 int i;
1326
1327 memset(&fpval, 0, sizeof(fpval));
1328
1329 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1330 return -EIO;
1331
6450578f 1332 pt = task_pt_regs(child);
1da177e4
LT
1333 sw = (struct switch_stack *) (child->thread.ksp + 16);
1334 unw_init_from_blocked_task(&info, child);
1335 if (unw_unwind_to_user(&info) < 0) {
1336 return -EIO;
1337 }
1338
1339 if (((unsigned long) ppr & 0x7) != 0) {
1340 dprintk("ptrace:unaligned register address %p\n", ppr);
1341 return -EIO;
1342 }
1343
1344 /* control regs */
1345
1346 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1347 retval |= __get_user(psr, &ppr->cr_ipsr);
1348
1349 /* app regs */
1350
1351 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
4ea78729 1352 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1da177e4
LT
1353 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1354 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1355 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1356 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1357
1358 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1359 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1360 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1361 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1362 retval |= __get_user(cfm, &ppr->cfm);
1363
1364 /* gr1-gr3 */
1365
1366 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1367 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1368
1369 /* gr4-gr7 */
1370
1371 for (i = 4; i < 8; i++) {
1372 retval |= __get_user(val, &ppr->gr[i]);
1373 /* NaT bit will be set via PT_NAT_BITS: */
1374 if (unw_set_gr(&info, i, val, 0) < 0)
1375 return -EIO;
1376 }
1377
1378 /* gr8-gr11 */
1379
1380 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1381
1382 /* gr12-gr15 */
1383
1384 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1385 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1386 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1387
1388 /* gr16-gr31 */
1389
1390 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1391
1392 /* b0 */
1393
1394 retval |= __get_user(pt->b0, &ppr->br[0]);
1395
1396 /* b1-b5 */
1397
1398 for (i = 1; i < 6; i++) {
1399 retval |= __get_user(val, &ppr->br[i]);
1400 unw_set_br(&info, i, val);
1401 }
1402
1403 /* b6-b7 */
1404
1405 retval |= __get_user(pt->b6, &ppr->br[6]);
1406 retval |= __get_user(pt->b7, &ppr->br[7]);
1407
1408 /* fr2-fr5 */
1409
1410 for (i = 2; i < 6; i++) {
1411 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1412 if (unw_set_fr(&info, i, fpval) < 0)
1413 return -EIO;
1414 }
1415
1416 /* fr6-fr11 */
1417
1418 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1419 sizeof(ppr->fr[6]) * 6);
1420
1421 /* fp scratch regs(12-15) */
1422
1423 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1424 sizeof(ppr->fr[12]) * 4);
1425
1426 /* fr16-fr31 */
1427
1428 for (i = 16; i < 32; i++) {
1429 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1430 sizeof(fpval));
1431 if (unw_set_fr(&info, i, fpval) < 0)
1432 return -EIO;
1433 }
1434
1435 /* fph */
1436
1437 ia64_sync_fph(child);
1438 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1439 sizeof(ppr->fr[32]) * 96);
1440
1441 /* preds */
1442
1443 retval |= __get_user(pt->pr, &ppr->pr);
1444
1445 /* nat bits */
1446
1447 retval |= __get_user(nat_bits, &ppr->nat);
1448
1449 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
4ea78729 1450 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1da177e4
LT
1451 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1452 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1453 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1454 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1455 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1456 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1457
1458 ret = retval ? -EIO : 0;
1459 return ret;
1460}
1461
8db3f525
PT
1462void
1463user_enable_single_step (struct task_struct *child)
1464{
1465 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1466
1467 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1468 child_psr->ss = 1;
1469}
1470
1471void
1472user_enable_block_step (struct task_struct *child)
1473{
1474 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1475
1476 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1477 child_psr->tb = 1;
1478}
1479
1480void
1481user_disable_single_step (struct task_struct *child)
1482{
1483 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1484
1485 /* make sure the single step/taken-branch trap bits are not set: */
1486 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1487 child_psr->ss = 0;
1488 child_psr->tb = 0;
1489}
1490
1da177e4
LT
1491/*
1492 * Called by kernel/ptrace.c when detaching..
1493 *
1494 * Make sure the single step bit is not set.
1495 */
1496void
1497ptrace_disable (struct task_struct *child)
1498{
aa17f6f9 1499 user_disable_single_step(child);
1da177e4
LT
1500}
1501
eac738e6
PT
1502long
1503arch_ptrace (struct task_struct *child, long request, long addr, long data)
1da177e4 1504{
1da177e4 1505 switch (request) {
aa17f6f9
PT
1506 case PTRACE_PEEKTEXT:
1507 case PTRACE_PEEKDATA:
1da177e4 1508 /* read word at location addr */
972559a0 1509 if (access_process_vm(child, addr, &data, sizeof(data), 0)
aa17f6f9
PT
1510 != sizeof(data))
1511 return -EIO;
1512 /* ensure return value is not mistaken for error code */
972559a0 1513 force_successful_syscall_return();
aa17f6f9 1514 return data;
1da177e4 1515
972559a0
PT
1516 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1517 * by the generic ptrace_request().
1518 */
1da177e4 1519
aa17f6f9 1520 case PTRACE_PEEKUSR:
1da177e4 1521 /* read the word at addr in the USER area */
aa17f6f9
PT
1522 if (access_uarea(child, addr, &data, 0) < 0)
1523 return -EIO;
1524 /* ensure return value is not mistaken for error code */
1da177e4 1525 force_successful_syscall_return();
aa17f6f9 1526 return data;
1da177e4 1527
aa17f6f9 1528 case PTRACE_POKEUSR:
1da177e4 1529 /* write the word at addr in the USER area */
aa17f6f9
PT
1530 if (access_uarea(child, addr, &data, 1) < 0)
1531 return -EIO;
1532 return 0;
1da177e4 1533
aa17f6f9 1534 case PTRACE_OLD_GETSIGINFO:
1da177e4 1535 /* for backwards-compatibility */
aa17f6f9 1536 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1da177e4 1537
aa17f6f9 1538 case PTRACE_OLD_SETSIGINFO:
1da177e4 1539 /* for backwards-compatibility */
aa17f6f9
PT
1540 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1541
1542 case PTRACE_GETREGS:
1543 return ptrace_getregs(child,
1544 (struct pt_all_user_regs __user *) data);
1545
1546 case PTRACE_SETREGS:
1547 return ptrace_setregs(child,
1548 (struct pt_all_user_regs __user *) data);
1549
1550 default:
1551 return ptrace_request(child, request, addr, data);
1da177e4 1552 }
1da177e4
LT
1553}
1554
1555
90f9d70a 1556static void
1da177e4
LT
1557syscall_trace (void)
1558{
1da177e4
LT
1559 /*
1560 * The 0x80 provides a way for the tracing parent to
1561 * distinguish between a syscall stop and SIGTRAP delivery.
1562 */
1563 ptrace_notify(SIGTRAP
1564 | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
1565
1566 /*
1567 * This isn't the same as continuing with a signal, but it
1568 * will do for normal use. strace only continues with a
1569 * signal if the stopping signal is not SIGTRAP. -brl
1570 */
1571 if (current->exit_code) {
1572 send_sig(current->exit_code, current, 1);
1573 current->exit_code = 0;
1574 }
1575}
1576
1577/* "asmlinkage" so the input arguments are preserved... */
1578
1579asmlinkage void
1580syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1581 long arg4, long arg5, long arg6, long arg7,
1582 struct pt_regs regs)
1583{
2fd6f58b
DW
1584 if (test_thread_flag(TIF_SYSCALL_TRACE)
1585 && (current->ptrace & PT_PTRACED))
1586 syscall_trace();
1da177e4 1587
3b2ce0b1
PT
1588 /* copy user rbs to kernel rbs */
1589 if (test_thread_flag(TIF_RESTORE_RSE))
1590 ia64_sync_krbs();
1591
1da177e4 1592 if (unlikely(current->audit_context)) {
2fd6f58b
DW
1593 long syscall;
1594 int arch;
1595
1596 if (IS_IA32_PROCESS(&regs)) {
1da177e4 1597 syscall = regs.r1;
2fd6f58b
DW
1598 arch = AUDIT_ARCH_I386;
1599 } else {
1da177e4 1600 syscall = regs.r15;
2fd6f58b
DW
1601 arch = AUDIT_ARCH_IA64;
1602 }
1da177e4 1603
5411be59 1604 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1da177e4
LT
1605 }
1606
1da177e4
LT
1607}
1608
1609/* "asmlinkage" so the input arguments are preserved... */
1610
1611asmlinkage void
1612syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1613 long arg4, long arg5, long arg6, long arg7,
1614 struct pt_regs regs)
1615{
ee436dc4
DW
1616 if (unlikely(current->audit_context)) {
1617 int success = AUDITSC_RESULT(regs.r10);
1618 long result = regs.r8;
1619
1620 if (success != AUDITSC_SUCCESS)
1621 result = -result;
5411be59 1622 audit_syscall_exit(success, result);
ee436dc4 1623 }
1da177e4 1624
90f9d70a 1625 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1626 || test_thread_flag(TIF_SINGLESTEP))
1da177e4
LT
1627 && (current->ptrace & PT_PTRACED))
1628 syscall_trace();
3b2ce0b1
PT
1629
1630 /* copy user rbs to kernel rbs */
1631 if (test_thread_flag(TIF_RESTORE_RSE))
1632 ia64_sync_krbs();
1da177e4 1633}
c70f8f68
SL
1634
1635/* Utrace implementation starts here */
1636struct regset_get {
1637 void *kbuf;
1638 void __user *ubuf;
1639};
1640
1641struct regset_set {
1642 const void *kbuf;
1643 const void __user *ubuf;
1644};
1645
1646struct regset_getset {
1647 struct task_struct *target;
1648 const struct user_regset *regset;
1649 union {
1650 struct regset_get get;
1651 struct regset_set set;
1652 } u;
1653 unsigned int pos;
1654 unsigned int count;
1655 int ret;
1656};
1657
1658static int
1659access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1660 unsigned long addr, unsigned long *data, int write_access)
1661{
1662 struct pt_regs *pt;
1663 unsigned long *ptr = NULL;
1664 int ret;
1665 char nat = 0;
1666
1667 pt = task_pt_regs(target);
1668 switch (addr) {
1669 case ELF_GR_OFFSET(1):
1670 ptr = &pt->r1;
1671 break;
1672 case ELF_GR_OFFSET(2):
1673 case ELF_GR_OFFSET(3):
1674 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1675 break;
1676 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1677 if (write_access) {
1678 /* read NaT bit first: */
1679 unsigned long dummy;
1680
1681 ret = unw_get_gr(info, addr/8, &dummy, &nat);
1682 if (ret < 0)
1683 return ret;
1684 }
1685 return unw_access_gr(info, addr/8, data, &nat, write_access);
1686 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1687 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1688 break;
1689 case ELF_GR_OFFSET(12):
1690 case ELF_GR_OFFSET(13):
1691 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1692 break;
1693 case ELF_GR_OFFSET(14):
1694 ptr = &pt->r14;
1695 break;
1696 case ELF_GR_OFFSET(15):
1697 ptr = &pt->r15;
1698 }
1699 if (write_access)
1700 *ptr = *data;
1701 else
1702 *data = *ptr;
1703 return 0;
1704}
1705
1706static int
1707access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1708 unsigned long addr, unsigned long *data, int write_access)
1709{
1710 struct pt_regs *pt;
1711 unsigned long *ptr = NULL;
1712
1713 pt = task_pt_regs(target);
1714 switch (addr) {
1715 case ELF_BR_OFFSET(0):
1716 ptr = &pt->b0;
1717 break;
1718 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1719 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1720 data, write_access);
1721 case ELF_BR_OFFSET(6):
1722 ptr = &pt->b6;
1723 break;
1724 case ELF_BR_OFFSET(7):
1725 ptr = &pt->b7;
1726 }
1727 if (write_access)
1728 *ptr = *data;
1729 else
1730 *data = *ptr;
1731 return 0;
1732}
1733
1734static int
1735access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1736 unsigned long addr, unsigned long *data, int write_access)
1737{
1738 struct pt_regs *pt;
1739 unsigned long cfm, urbs_end;
1740 unsigned long *ptr = NULL;
1741
1742 pt = task_pt_regs(target);
1743 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1744 switch (addr) {
1745 case ELF_AR_RSC_OFFSET:
1746 /* force PL3 */
1747 if (write_access)
1748 pt->ar_rsc = *data | (3 << 2);
1749 else
1750 *data = pt->ar_rsc;
1751 return 0;
1752 case ELF_AR_BSP_OFFSET:
1753 /*
1754 * By convention, we use PT_AR_BSP to refer to
1755 * the end of the user-level backing store.
1756 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1757 * to get the real value of ar.bsp at the time
1758 * the kernel was entered.
1759 *
1760 * Furthermore, when changing the contents of
1761 * PT_AR_BSP (or PT_CFM) while the task is
1762 * blocked in a system call, convert the state
1763 * so that the non-system-call exit
1764 * path is used. This ensures that the proper
1765 * state will be picked up when resuming
1766 * execution. However, it *also* means that
1767 * once we write PT_AR_BSP/PT_CFM, it won't be
1768 * possible to modify the syscall arguments of
1769 * the pending system call any longer. This
1770 * shouldn't be an issue because modifying
1771 * PT_AR_BSP/PT_CFM generally implies that
1772 * we're either abandoning the pending system
1773 * call or that we defer it's re-execution
1774 * (e.g., due to GDB doing an inferior
1775 * function call).
1776 */
1777 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1778 if (write_access) {
1779 if (*data != urbs_end) {
1780 if (in_syscall(pt))
1781 convert_to_non_syscall(target,
1782 pt,
1783 cfm);
1784 /*
1785 * Simulate user-level write
1786 * of ar.bsp:
1787 */
1788 pt->loadrs = 0;
1789 pt->ar_bspstore = *data;
1790 }
1791 } else
1792 *data = urbs_end;
1793 return 0;
1794 case ELF_AR_BSPSTORE_OFFSET:
1795 ptr = &pt->ar_bspstore;
1796 break;
1797 case ELF_AR_RNAT_OFFSET:
1798 ptr = &pt->ar_rnat;
1799 break;
1800 case ELF_AR_CCV_OFFSET:
1801 ptr = &pt->ar_ccv;
1802 break;
1803 case ELF_AR_UNAT_OFFSET:
1804 ptr = &pt->ar_unat;
1805 break;
1806 case ELF_AR_FPSR_OFFSET:
1807 ptr = &pt->ar_fpsr;
1808 break;
1809 case ELF_AR_PFS_OFFSET:
1810 ptr = &pt->ar_pfs;
1811 break;
1812 case ELF_AR_LC_OFFSET:
1813 return unw_access_ar(info, UNW_AR_LC, data,
1814 write_access);
1815 case ELF_AR_EC_OFFSET:
1816 return unw_access_ar(info, UNW_AR_EC, data,
1817 write_access);
1818 case ELF_AR_CSD_OFFSET:
1819 ptr = &pt->ar_csd;
1820 break;
1821 case ELF_AR_SSD_OFFSET:
1822 ptr = &pt->ar_ssd;
1823 }
1824 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1825 switch (addr) {
1826 case ELF_CR_IIP_OFFSET:
1827 ptr = &pt->cr_iip;
1828 break;
1829 case ELF_CFM_OFFSET:
1830 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1831 if (write_access) {
1832 if (((cfm ^ *data) & PFM_MASK) != 0) {
1833 if (in_syscall(pt))
1834 convert_to_non_syscall(target,
1835 pt,
1836 cfm);
1837 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1838 | (*data & PFM_MASK));
1839 }
1840 } else
1841 *data = cfm;
1842 return 0;
1843 case ELF_CR_IPSR_OFFSET:
1844 if (write_access) {
1845 unsigned long tmp = *data;
1846 /* psr.ri==3 is a reserved value: SDM 2:25 */
1847 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1848 tmp &= ~IA64_PSR_RI;
1849 pt->cr_ipsr = ((tmp & IPSR_MASK)
1850 | (pt->cr_ipsr & ~IPSR_MASK));
1851 } else
1852 *data = (pt->cr_ipsr & IPSR_MASK);
1853 return 0;
1854 }
1855 } else if (addr == ELF_NAT_OFFSET)
1856 return access_nat_bits(target, pt, info,
1857 data, write_access);
1858 else if (addr == ELF_PR_OFFSET)
1859 ptr = &pt->pr;
1860 else
1861 return -1;
1862
1863 if (write_access)
1864 *ptr = *data;
1865 else
1866 *data = *ptr;
1867
1868 return 0;
1869}
1870
1871static int
1872access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1873 unsigned long addr, unsigned long *data, int write_access)
1874{
1875 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1876 return access_elf_gpreg(target, info, addr, data, write_access);
1877 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1878 return access_elf_breg(target, info, addr, data, write_access);
1879 else
1880 return access_elf_areg(target, info, addr, data, write_access);
1881}
1882
1883void do_gpregs_get(struct unw_frame_info *info, void *arg)
1884{
1885 struct pt_regs *pt;
1886 struct regset_getset *dst = arg;
1887 elf_greg_t tmp[16];
1888 unsigned int i, index, min_copy;
1889
1890 if (unw_unwind_to_user(info) < 0)
1891 return;
1892
1893 /*
1894 * coredump format:
1895 * r0-r31
1896 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1897 * predicate registers (p0-p63)
1898 * b0-b7
1899 * ip cfm user-mask
1900 * ar.rsc ar.bsp ar.bspstore ar.rnat
1901 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1902 */
1903
1904
1905 /* Skip r0 */
1906 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1907 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1908 &dst->u.get.kbuf,
1909 &dst->u.get.ubuf,
1910 0, ELF_GR_OFFSET(1));
1911 if (dst->ret || dst->count == 0)
1912 return;
1913 }
1914
1915 /* gr1 - gr15 */
1916 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1917 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1918 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1919 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1920 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1921 index++)
1922 if (access_elf_reg(dst->target, info, i,
1923 &tmp[index], 0) < 0) {
1924 dst->ret = -EIO;
1925 return;
1926 }
1927 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1928 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1929 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1930 if (dst->ret || dst->count == 0)
1931 return;
1932 }
1933
1934 /* r16-r31 */
1935 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1936 pt = task_pt_regs(dst->target);
1937 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1938 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1939 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1940 if (dst->ret || dst->count == 0)
1941 return;
1942 }
1943
1944 /* nat, pr, b0 - b7 */
1945 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1946 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1947 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1948 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1949 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1950 index++)
1951 if (access_elf_reg(dst->target, info, i,
1952 &tmp[index], 0) < 0) {
1953 dst->ret = -EIO;
1954 return;
1955 }
1956 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1957 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1958 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1959 if (dst->ret || dst->count == 0)
1960 return;
1961 }
1962
1963 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1964 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1965 */
1966 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1967 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1968 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1969 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1970 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1971 index++)
1972 if (access_elf_reg(dst->target, info, i,
1973 &tmp[index], 0) < 0) {
1974 dst->ret = -EIO;
1975 return;
1976 }
1977 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1978 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1979 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1980 }
1981}
1982
1983void do_gpregs_set(struct unw_frame_info *info, void *arg)
1984{
1985 struct pt_regs *pt;
1986 struct regset_getset *dst = arg;
1987 elf_greg_t tmp[16];
1988 unsigned int i, index;
1989
1990 if (unw_unwind_to_user(info) < 0)
1991 return;
1992
1993 /* Skip r0 */
1994 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1995 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1996 &dst->u.set.kbuf,
1997 &dst->u.set.ubuf,
1998 0, ELF_GR_OFFSET(1));
1999 if (dst->ret || dst->count == 0)
2000 return;
2001 }
2002
2003 /* gr1-gr15 */
2004 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
2005 i = dst->pos;
2006 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
2007 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2008 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
2009 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
2010 if (dst->ret)
2011 return;
2012 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
2013 if (access_elf_reg(dst->target, info, i,
2014 &tmp[index], 1) < 0) {
2015 dst->ret = -EIO;
2016 return;
2017 }
2018 if (dst->count == 0)
2019 return;
2020 }
2021
2022 /* gr16-gr31 */
2023 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
2024 pt = task_pt_regs(dst->target);
2025 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2026 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
2027 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
2028 if (dst->ret || dst->count == 0)
2029 return;
2030 }
2031
2032 /* nat, pr, b0 - b7 */
2033 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
2034 i = dst->pos;
2035 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
2036 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2037 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
2038 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
2039 if (dst->ret)
2040 return;
2041 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
2042 if (access_elf_reg(dst->target, info, i,
2043 &tmp[index], 1) < 0) {
2044 dst->ret = -EIO;
2045 return;
2046 }
2047 if (dst->count == 0)
2048 return;
2049 }
2050
2051 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
2052 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
2053 */
2054 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
2055 i = dst->pos;
2056 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
2057 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2058 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
2059 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
2060 if (dst->ret)
2061 return;
2062 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
2063 if (access_elf_reg(dst->target, info, i,
2064 &tmp[index], 1) < 0) {
2065 dst->ret = -EIO;
2066 return;
2067 }
2068 }
2069}
2070
2071#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
2072
2073void do_fpregs_get(struct unw_frame_info *info, void *arg)
2074{
2075 struct regset_getset *dst = arg;
2076 struct task_struct *task = dst->target;
2077 elf_fpreg_t tmp[30];
2078 int index, min_copy, i;
2079
2080 if (unw_unwind_to_user(info) < 0)
2081 return;
2082
2083 /* Skip pos 0 and 1 */
2084 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
2085 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
2086 &dst->u.get.kbuf,
2087 &dst->u.get.ubuf,
2088 0, ELF_FP_OFFSET(2));
2089 if (dst->count == 0 || dst->ret)
2090 return;
2091 }
2092
2093 /* fr2-fr31 */
2094 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
2095 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
2096
2097 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
2098 dst->pos + dst->count);
2099 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
2100 index++)
2101 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
2102 &tmp[index])) {
2103 dst->ret = -EIO;
2104 return;
2105 }
2106 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2107 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
2108 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
2109 if (dst->count == 0 || dst->ret)
2110 return;
2111 }
2112
2113 /* fph */
2114 if (dst->count > 0) {
2115 ia64_flush_fph(dst->target);
2116 if (task->thread.flags & IA64_THREAD_FPH_VALID)
2117 dst->ret = user_regset_copyout(
2118 &dst->pos, &dst->count,
2119 &dst->u.get.kbuf, &dst->u.get.ubuf,
2120 &dst->target->thread.fph,
2121 ELF_FP_OFFSET(32), -1);
2122 else
2123 /* Zero fill instead. */
2124 dst->ret = user_regset_copyout_zero(
2125 &dst->pos, &dst->count,
2126 &dst->u.get.kbuf, &dst->u.get.ubuf,
2127 ELF_FP_OFFSET(32), -1);
2128 }
2129}
2130
2131void do_fpregs_set(struct unw_frame_info *info, void *arg)
2132{
2133 struct regset_getset *dst = arg;
2134 elf_fpreg_t fpreg, tmp[30];
2135 int index, start, end;
2136
2137 if (unw_unwind_to_user(info) < 0)
2138 return;
2139
2140 /* Skip pos 0 and 1 */
2141 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
2142 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
2143 &dst->u.set.kbuf,
2144 &dst->u.set.ubuf,
2145 0, ELF_FP_OFFSET(2));
2146 if (dst->count == 0 || dst->ret)
2147 return;
2148 }
2149
2150 /* fr2-fr31 */
2151 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
2152 start = dst->pos;
2153 end = min(((unsigned int)ELF_FP_OFFSET(32)),
2154 dst->pos + dst->count);
2155 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2156 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
2157 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
2158 if (dst->ret)
2159 return;
2160
2161 if (start & 0xF) { /* only write high part */
2162 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
2163 &fpreg)) {
2164 dst->ret = -EIO;
2165 return;
2166 }
2167 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
2168 = fpreg.u.bits[0];
2169 start &= ~0xFUL;
2170 }
2171 if (end & 0xF) { /* only write low part */
2172 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
2173 &fpreg)) {
2174 dst->ret = -EIO;
2175 return;
2176 }
2177 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
2178 = fpreg.u.bits[1];
2179 end = (end + 0xF) & ~0xFUL;
2180 }
2181
2182 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
2183 index = start / sizeof(elf_fpreg_t);
2184 if (unw_set_fr(info, index, tmp[index - 2])) {
2185 dst->ret = -EIO;
2186 return;
2187 }
2188 }
2189 if (dst->ret || dst->count == 0)
2190 return;
2191 }
2192
2193 /* fph */
2194 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
2195 ia64_sync_fph(dst->target);
2196 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2197 &dst->u.set.kbuf,
2198 &dst->u.set.ubuf,
2199 &dst->target->thread.fph,
2200 ELF_FP_OFFSET(32), -1);
2201 }
2202}
2203
2204static int
2205do_regset_call(void (*call)(struct unw_frame_info *, void *),
2206 struct task_struct *target,
2207 const struct user_regset *regset,
2208 unsigned int pos, unsigned int count,
2209 const void *kbuf, const void __user *ubuf)
2210{
2211 struct regset_getset info = { .target = target, .regset = regset,
2212 .pos = pos, .count = count,
2213 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
2214 .ret = 0 };
2215
2216 if (target == current)
2217 unw_init_running(call, &info);
2218 else {
2219 struct unw_frame_info ufi;
2220 memset(&ufi, 0, sizeof(ufi));
2221 unw_init_from_blocked_task(&ufi, target);
2222 (*call)(&ufi, &info);
2223 }
2224
2225 return info.ret;
2226}
2227
2228static int
2229gpregs_get(struct task_struct *target,
2230 const struct user_regset *regset,
2231 unsigned int pos, unsigned int count,
2232 void *kbuf, void __user *ubuf)
2233{
2234 return do_regset_call(do_gpregs_get, target, regset, pos, count,
2235 kbuf, ubuf);
2236}
2237
2238static int gpregs_set(struct task_struct *target,
2239 const struct user_regset *regset,
2240 unsigned int pos, unsigned int count,
2241 const void *kbuf, const void __user *ubuf)
2242{
2243 return do_regset_call(do_gpregs_set, target, regset, pos, count,
2244 kbuf, ubuf);
2245}
2246
2247static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
2248{
2249 do_sync_rbs(info, ia64_sync_user_rbs);
2250}
2251
2252/*
2253 * This is called to write back the register backing store.
2254 * ptrace does this before it stops, so that a tracer reading the user
2255 * memory after the thread stops will get the current register data.
2256 */
2257static int
2258gpregs_writeback(struct task_struct *target,
2259 const struct user_regset *regset,
2260 int now)
2261{
2262 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
2263 return 0;
2264 tsk_set_notify_resume(target);
2265 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
2266 NULL, NULL);
2267}
2268
2269static int
2270fpregs_active(struct task_struct *target, const struct user_regset *regset)
2271{
2272 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
2273}
2274
2275static int fpregs_get(struct task_struct *target,
2276 const struct user_regset *regset,
2277 unsigned int pos, unsigned int count,
2278 void *kbuf, void __user *ubuf)
2279{
2280 return do_regset_call(do_fpregs_get, target, regset, pos, count,
2281 kbuf, ubuf);
2282}
2283
2284static int fpregs_set(struct task_struct *target,
2285 const struct user_regset *regset,
2286 unsigned int pos, unsigned int count,
2287 const void *kbuf, const void __user *ubuf)
2288{
2289 return do_regset_call(do_fpregs_set, target, regset, pos, count,
2290 kbuf, ubuf);
2291}
2292
2293static const struct user_regset native_regsets[] = {
2294 {
2295 .core_note_type = NT_PRSTATUS,
2296 .n = ELF_NGREG,
2297 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2298 .get = gpregs_get, .set = gpregs_set,
2299 .writeback = gpregs_writeback
2300 },
2301 {
2302 .core_note_type = NT_PRFPREG,
2303 .n = ELF_NFPREG,
2304 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2305 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2306 },
2307};
2308
2309static const struct user_regset_view user_ia64_view = {
2310 .name = "ia64",
2311 .e_machine = EM_IA_64,
2312 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2313};
2314
2315const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2316{
2317 return &user_ia64_view;
2318}