166df5bab769cc7990b10bb802151995c8ccf3df
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / arch / parisc / kernel / entry.S
1 /*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25 #include <linux/config.h>
26 #include <asm/asm-offsets.h>
27
28 /* we have the following possibilities to act on an interruption:
29 * - handle in assembly and use shadowed registers only
30 * - save registers to kernel stack and handle in assembly or C */
31
32
33 #include <asm/psw.h>
34 #include <asm/assembly.h> /* for LDREG/STREG defines */
35 #include <asm/pgtable.h>
36 #include <asm/signal.h>
37 #include <asm/unistd.h>
38 #include <asm/thread_info.h>
39
40 #ifdef CONFIG_64BIT
41 #define CMPIB cmpib,*
42 #define CMPB cmpb,*
43 #define COND(x) *x
44
45 .level 2.0w
46 #else
47 #define CMPIB cmpib,
48 #define CMPB cmpb,
49 #define COND(x) x
50
51 .level 2.0
52 #endif
53
54 .import pa_dbit_lock,data
55
56 /* space_to_prot macro creates a prot id from a space id */
57
58 #if (SPACEID_SHIFT) == 0
59 .macro space_to_prot spc prot
60 depd,z \spc,62,31,\prot
61 .endm
62 #else
63 .macro space_to_prot spc prot
64 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
65 .endm
66 #endif
67
68 /* Switch to virtual mapping, trashing only %r1 */
69 .macro virt_map
70 /* pcxt_ssm_bug */
71 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
72 mtsp %r0, %sr4
73 mtsp %r0, %sr5
74 mfsp %sr7, %r1
75 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
76 mtsp %r1, %sr3
77 tovirt_r1 %r29
78 load32 KERNEL_PSW, %r1
79
80 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
81 mtsp %r0, %sr6
82 mtsp %r0, %sr7
83 mtctl %r0, %cr17 /* Clear IIASQ tail */
84 mtctl %r0, %cr17 /* Clear IIASQ head */
85 mtctl %r1, %ipsw
86 load32 4f, %r1
87 mtctl %r1, %cr18 /* Set IIAOQ tail */
88 ldo 4(%r1), %r1
89 mtctl %r1, %cr18 /* Set IIAOQ head */
90 rfir
91 nop
92 4:
93 .endm
94
95 /*
96 * The "get_stack" macros are responsible for determining the
97 * kernel stack value.
98 *
99 * For Faults:
100 * If sr7 == 0
101 * Already using a kernel stack, so call the
102 * get_stack_use_r30 macro to push a pt_regs structure
103 * on the stack, and store registers there.
104 * else
105 * Need to set up a kernel stack, so call the
106 * get_stack_use_cr30 macro to set up a pointer
107 * to the pt_regs structure contained within the
108 * task pointer pointed to by cr30. Set the stack
109 * pointer to point to the end of the task structure.
110 *
111 * For Interrupts:
112 * If sr7 == 0
113 * Already using a kernel stack, check to see if r30
114 * is already pointing to the per processor interrupt
115 * stack. If it is, call the get_stack_use_r30 macro
116 * to push a pt_regs structure on the stack, and store
117 * registers there. Otherwise, call get_stack_use_cr31
118 * to get a pointer to the base of the interrupt stack
119 * and push a pt_regs structure on that stack.
120 * else
121 * Need to set up a kernel stack, so call the
122 * get_stack_use_cr30 macro to set up a pointer
123 * to the pt_regs structure contained within the
124 * task pointer pointed to by cr30. Set the stack
125 * pointer to point to the end of the task structure.
126 * N.B: We don't use the interrupt stack for the
127 * first interrupt from userland, because signals/
128 * resched's are processed when returning to userland,
129 * and we can sleep in those cases.
130 *
131 * Note that we use shadowed registers for temps until
132 * we can save %r26 and %r29. %r26 is used to preserve
133 * %r8 (a shadowed register) which temporarily contained
134 * either the fault type ("code") or the eirr. We need
135 * to use a non-shadowed register to carry the value over
136 * the rfir in virt_map. We use %r26 since this value winds
137 * up being passed as the argument to either do_cpu_irq_mask
138 * or handle_interruption. %r29 is used to hold a pointer
139 * the register save area, and once again, it needs to
140 * be a non-shadowed register so that it survives the rfir.
141 *
142 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
143 */
144
145 .macro get_stack_use_cr30
146
147 /* we save the registers in the task struct */
148
149 mfctl %cr30, %r1
150 tophys %r1,%r9
151 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
152 tophys %r1,%r9
153 ldo TASK_REGS(%r9),%r9
154 STREG %r30, PT_GR30(%r9)
155 STREG %r29,PT_GR29(%r9)
156 STREG %r26,PT_GR26(%r9)
157 copy %r9,%r29
158 mfctl %cr30, %r1
159 ldo THREAD_SZ_ALGN(%r1), %r30
160 .endm
161
162 .macro get_stack_use_r30
163
164 /* we put a struct pt_regs on the stack and save the registers there */
165
166 tophys %r30,%r9
167 STREG %r30,PT_GR30(%r9)
168 ldo PT_SZ_ALGN(%r30),%r30
169 STREG %r29,PT_GR29(%r9)
170 STREG %r26,PT_GR26(%r9)
171 copy %r9,%r29
172 .endm
173
174 .macro rest_stack
175 LDREG PT_GR1(%r29), %r1
176 LDREG PT_GR30(%r29),%r30
177 LDREG PT_GR29(%r29),%r29
178 .endm
179
180 /* default interruption handler
181 * (calls traps.c:handle_interruption) */
182 .macro def code
183 b intr_save
184 ldi \code, %r8
185 .align 32
186 .endm
187
188 /* Interrupt interruption handler
189 * (calls irq.c:do_cpu_irq_mask) */
190 .macro extint code
191 b intr_extint
192 mfsp %sr7,%r16
193 .align 32
194 .endm
195
196 .import os_hpmc, code
197
198 /* HPMC handler */
199 .macro hpmc code
200 nop /* must be a NOP, will be patched later */
201 load32 PA(os_hpmc), %r3
202 bv,n 0(%r3)
203 nop
204 .word 0 /* checksum (will be patched) */
205 .word PA(os_hpmc) /* address of handler */
206 .word 0 /* length of handler */
207 .endm
208
209 /*
210 * Performance Note: Instructions will be moved up into
211 * this part of the code later on, once we are sure
212 * that the tlb miss handlers are close to final form.
213 */
214
215 /* Register definitions for tlb miss handler macros */
216
217 va = r8 /* virtual address for which the trap occured */
218 spc = r24 /* space for which the trap occured */
219
220 #ifndef CONFIG_64BIT
221
222 /*
223 * itlb miss interruption handler (parisc 1.1 - 32 bit)
224 */
225
226 .macro itlb_11 code
227
228 mfctl %pcsq, spc
229 b itlb_miss_11
230 mfctl %pcoq, va
231
232 .align 32
233 .endm
234 #endif
235
236 /*
237 * itlb miss interruption handler (parisc 2.0)
238 */
239
240 .macro itlb_20 code
241 mfctl %pcsq, spc
242 #ifdef CONFIG_64BIT
243 b itlb_miss_20w
244 #else
245 b itlb_miss_20
246 #endif
247 mfctl %pcoq, va
248
249 .align 32
250 .endm
251
252 #ifndef CONFIG_64BIT
253 /*
254 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
255 *
256 * Note: naitlb misses will be treated
257 * as an ordinary itlb miss for now.
258 * However, note that naitlb misses
259 * have the faulting address in the
260 * IOR/ISR.
261 */
262
263 .macro naitlb_11 code
264
265 mfctl %isr,spc
266 b itlb_miss_11
267 mfctl %ior,va
268 /* FIXME: If user causes a naitlb miss, the priv level may not be in
269 * lower bits of va, where the itlb miss handler is expecting them
270 */
271
272 .align 32
273 .endm
274 #endif
275
276 /*
277 * naitlb miss interruption handler (parisc 2.0)
278 *
279 * Note: naitlb misses will be treated
280 * as an ordinary itlb miss for now.
281 * However, note that naitlb misses
282 * have the faulting address in the
283 * IOR/ISR.
284 */
285
286 .macro naitlb_20 code
287
288 mfctl %isr,spc
289 #ifdef CONFIG_64BIT
290 b itlb_miss_20w
291 #else
292 b itlb_miss_20
293 #endif
294 mfctl %ior,va
295 /* FIXME: If user causes a naitlb miss, the priv level may not be in
296 * lower bits of va, where the itlb miss handler is expecting them
297 */
298
299 .align 32
300 .endm
301
302 #ifndef CONFIG_64BIT
303 /*
304 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
305 */
306
307 .macro dtlb_11 code
308
309 mfctl %isr, spc
310 b dtlb_miss_11
311 mfctl %ior, va
312
313 .align 32
314 .endm
315 #endif
316
317 /*
318 * dtlb miss interruption handler (parisc 2.0)
319 */
320
321 .macro dtlb_20 code
322
323 mfctl %isr, spc
324 #ifdef CONFIG_64BIT
325 b dtlb_miss_20w
326 #else
327 b dtlb_miss_20
328 #endif
329 mfctl %ior, va
330
331 .align 32
332 .endm
333
334 #ifndef CONFIG_64BIT
335 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
336
337 .macro nadtlb_11 code
338
339 mfctl %isr,spc
340 b nadtlb_miss_11
341 mfctl %ior,va
342
343 .align 32
344 .endm
345 #endif
346
347 /* nadtlb miss interruption handler (parisc 2.0) */
348
349 .macro nadtlb_20 code
350
351 mfctl %isr,spc
352 #ifdef CONFIG_64BIT
353 b nadtlb_miss_20w
354 #else
355 b nadtlb_miss_20
356 #endif
357 mfctl %ior,va
358
359 .align 32
360 .endm
361
362 #ifndef CONFIG_64BIT
363 /*
364 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
365 */
366
367 .macro dbit_11 code
368
369 mfctl %isr,spc
370 b dbit_trap_11
371 mfctl %ior,va
372
373 .align 32
374 .endm
375 #endif
376
377 /*
378 * dirty bit trap interruption handler (parisc 2.0)
379 */
380
381 .macro dbit_20 code
382
383 mfctl %isr,spc
384 #ifdef CONFIG_64BIT
385 b dbit_trap_20w
386 #else
387 b dbit_trap_20
388 #endif
389 mfctl %ior,va
390
391 .align 32
392 .endm
393
394 /* The following are simple 32 vs 64 bit instruction
395 * abstractions for the macros */
396 .macro EXTR reg1,start,length,reg2
397 #ifdef CONFIG_64BIT
398 extrd,u \reg1,32+\start,\length,\reg2
399 #else
400 extrw,u \reg1,\start,\length,\reg2
401 #endif
402 .endm
403
404 .macro DEP reg1,start,length,reg2
405 #ifdef CONFIG_64BIT
406 depd \reg1,32+\start,\length,\reg2
407 #else
408 depw \reg1,\start,\length,\reg2
409 #endif
410 .endm
411
412 .macro DEPI val,start,length,reg
413 #ifdef CONFIG_64BIT
414 depdi \val,32+\start,\length,\reg
415 #else
416 depwi \val,\start,\length,\reg
417 #endif
418 .endm
419
420 /* In LP64, the space contains part of the upper 32 bits of the
421 * fault. We have to extract this and place it in the va,
422 * zeroing the corresponding bits in the space register */
423 .macro space_adjust spc,va,tmp
424 #ifdef CONFIG_64BIT
425 extrd,u \spc,63,SPACEID_SHIFT,\tmp
426 depd %r0,63,SPACEID_SHIFT,\spc
427 depd \tmp,31,SPACEID_SHIFT,\va
428 #endif
429 .endm
430
431 .import swapper_pg_dir,code
432
433 /* Get the pgd. For faults on space zero (kernel space), this
434 * is simply swapper_pg_dir. For user space faults, the
435 * pgd is stored in %cr25 */
436 .macro get_pgd spc,reg
437 ldil L%PA(swapper_pg_dir),\reg
438 ldo R%PA(swapper_pg_dir)(\reg),\reg
439 or,COND(=) %r0,\spc,%r0
440 mfctl %cr25,\reg
441 .endm
442
443 /*
444 space_check(spc,tmp,fault)
445
446 spc - The space we saw the fault with.
447 tmp - The place to store the current space.
448 fault - Function to call on failure.
449
450 Only allow faults on different spaces from the
451 currently active one if we're the kernel
452
453 */
454 .macro space_check spc,tmp,fault
455 mfsp %sr7,\tmp
456 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
457 * as kernel, so defeat the space
458 * check if it is */
459 copy \spc,\tmp
460 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
461 cmpb,COND(<>),n \tmp,\spc,\fault
462 .endm
463
464 /* Look up a PTE in a 2-Level scheme (faulting at each
465 * level if the entry isn't present
466 *
467 * NOTE: we use ldw even for LP64, since the short pointers
468 * can address up to 1TB
469 */
470 .macro L2_ptep pmd,pte,index,va,fault
471 #if PT_NLEVELS == 3
472 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
473 #else
474 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
475 #endif
476 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
477 copy %r0,\pte
478 ldw,s \index(\pmd),\pmd
479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
481 copy \pmd,%r9
482 #ifdef CONFIG_64BIT
483 shld %r9,PxD_VALUE_SHIFT,\pmd
484 #else
485 shlw %r9,PxD_VALUE_SHIFT,\pmd
486 #endif
487 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
488 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
489 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
490 LDREG %r0(\pmd),\pte /* pmd is now pte */
491 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
492 .endm
493
494 /* Look up PTE in a 3-Level scheme.
495 *
496 * Here we implement a Hybrid L2/L3 scheme: we allocate the
497 * first pmd adjacent to the pgd. This means that we can
498 * subtract a constant offset to get to it. The pmd and pgd
499 * sizes are arranged so that a single pmd covers 4GB (giving
500 * a full LP64 process access to 8TB) so our lookups are
501 * effectively L2 for the first 4GB of the kernel (i.e. for
502 * all ILP32 processes and all the kernel for machines with
503 * under 4GB of memory) */
504 .macro L3_ptep pgd,pte,index,va,fault
505 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
506 copy %r0,\pte
507 extrd,u,*= \va,31,32,%r0
508 ldw,s \index(\pgd),\pgd
509 extrd,u,*= \va,31,32,%r0
510 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
511 extrd,u,*= \va,31,32,%r0
512 shld \pgd,PxD_VALUE_SHIFT,\index
513 extrd,u,*= \va,31,32,%r0
514 copy \index,\pgd
515 extrd,u,*<> \va,31,32,%r0
516 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
517 L2_ptep \pgd,\pte,\index,\va,\fault
518 .endm
519
520 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
521 * don't needlessly dirty the cache line if it was already set */
522 .macro update_ptep ptep,pte,tmp,tmp1
523 ldi _PAGE_ACCESSED,\tmp1
524 or \tmp1,\pte,\tmp
525 and,COND(<>) \tmp1,\pte,%r0
526 STREG \tmp,0(\ptep)
527 .endm
528
529 /* Set the dirty bit (and accessed bit). No need to be
530 * clever, this is only used from the dirty fault */
531 .macro update_dirty ptep,pte,tmp
532 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
533 or \tmp,\pte,\pte
534 STREG \pte,0(\ptep)
535 .endm
536
537 /* Convert the pte and prot to tlb insertion values. How
538 * this happens is quite subtle, read below */
539 .macro make_insert_tlb spc,pte,prot
540 space_to_prot \spc \prot /* create prot id from space */
541 /* The following is the real subtlety. This is depositing
542 * T <-> _PAGE_REFTRAP
543 * D <-> _PAGE_DIRTY
544 * B <-> _PAGE_DMB (memory break)
545 *
546 * Then incredible subtlety: The access rights are
547 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
548 * See 3-14 of the parisc 2.0 manual
549 *
550 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
551 * trigger an access rights trap in user space if the user
552 * tries to read an unreadable page */
553 depd \pte,8,7,\prot
554
555 /* PAGE_USER indicates the page can be read with user privileges,
556 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
557 * contains _PAGE_READ */
558 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
559 depdi 7,11,3,\prot
560 /* If we're a gateway page, drop PL2 back to zero for promotion
561 * to kernel privilege (so we can execute the page as kernel).
562 * Any privilege promotion page always denys read and write */
563 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
564 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
565
566 /* Get rid of prot bits and convert to page addr for iitlbt */
567
568 depd %r0,63,PAGE_SHIFT,\pte
569 extrd,u \pte,56,32,\pte
570 .endm
571
572 /* Identical macro to make_insert_tlb above, except it
573 * makes the tlb entry for the differently formatted pa11
574 * insertion instructions */
575 .macro make_insert_tlb_11 spc,pte,prot
576 zdep \spc,30,15,\prot
577 dep \pte,8,7,\prot
578 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
579 depi 1,12,1,\prot
580 extru,= \pte,_PAGE_USER_BIT,1,%r0
581 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
582 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
583 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
584
585 /* Get rid of prot bits and convert to page addr for iitlba */
586
587 depi 0,31,12,\pte
588 extru \pte,24,25,\pte
589
590 .endm
591
592 /* This is for ILP32 PA2.0 only. The TLB insertion needs
593 * to extend into I/O space if the address is 0xfXXXXXXX
594 * so we extend the f's into the top word of the pte in
595 * this case */
596 .macro f_extend pte,tmp
597 extrd,s \pte,42,4,\tmp
598 addi,<> 1,\tmp,%r0
599 extrd,s \pte,63,25,\pte
600 .endm
601
602 /* The alias region is an 8MB aligned 16MB to do clear and
603 * copy user pages at addresses congruent with the user
604 * virtual address.
605 *
606 * To use the alias page, you set %r26 up with the to TLB
607 * entry (identifying the physical page) and %r23 up with
608 * the from tlb entry (or nothing if only a to entry---for
609 * clear_user_page_asm) */
610 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
611 cmpib,COND(<>),n 0,\spc,\fault
612 ldil L%(TMPALIAS_MAP_START),\tmp
613 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
614 /* on LP64, ldi will sign extend into the upper 32 bits,
615 * which is behaviour we don't want */
616 depdi 0,31,32,\tmp
617 #endif
618 copy \va,\tmp1
619 DEPI 0,31,23,\tmp1
620 cmpb,COND(<>),n \tmp,\tmp1,\fault
621 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
622 depd,z \prot,8,7,\prot
623 /*
624 * OK, it is in the temp alias region, check whether "from" or "to".
625 * Check "subtle" note in pacache.S re: r23/r26.
626 */
627 #ifdef CONFIG_64BIT
628 extrd,u,*= \va,41,1,%r0
629 #else
630 extrw,u,= \va,9,1,%r0
631 #endif
632 or,COND(tr) %r23,%r0,\pte
633 or %r26,%r0,\pte
634 .endm
635
636
637 /*
638 * Align fault_vector_20 on 4K boundary so that both
639 * fault_vector_11 and fault_vector_20 are on the
640 * same page. This is only necessary as long as we
641 * write protect the kernel text, which we may stop
642 * doing once we use large page translations to cover
643 * the static part of the kernel address space.
644 */
645
646 .export fault_vector_20
647
648 .text
649
650 .align 4096
651
652 fault_vector_20:
653 /* First vector is invalid (0) */
654 .ascii "cows can fly"
655 .byte 0
656 .align 32
657
658 hpmc 1
659 def 2
660 def 3
661 extint 4
662 def 5
663 itlb_20 6
664 def 7
665 def 8
666 def 9
667 def 10
668 def 11
669 def 12
670 def 13
671 def 14
672 dtlb_20 15
673 #if 0
674 naitlb_20 16
675 #else
676 def 16
677 #endif
678 nadtlb_20 17
679 def 18
680 def 19
681 dbit_20 20
682 def 21
683 def 22
684 def 23
685 def 24
686 def 25
687 def 26
688 def 27
689 def 28
690 def 29
691 def 30
692 def 31
693
694 #ifndef CONFIG_64BIT
695
696 .export fault_vector_11
697
698 .align 2048
699
700 fault_vector_11:
701 /* First vector is invalid (0) */
702 .ascii "cows can fly"
703 .byte 0
704 .align 32
705
706 hpmc 1
707 def 2
708 def 3
709 extint 4
710 def 5
711 itlb_11 6
712 def 7
713 def 8
714 def 9
715 def 10
716 def 11
717 def 12
718 def 13
719 def 14
720 dtlb_11 15
721 #if 0
722 naitlb_11 16
723 #else
724 def 16
725 #endif
726 nadtlb_11 17
727 def 18
728 def 19
729 dbit_11 20
730 def 21
731 def 22
732 def 23
733 def 24
734 def 25
735 def 26
736 def 27
737 def 28
738 def 29
739 def 30
740 def 31
741
742 #endif
743
744 .import handle_interruption,code
745 .import do_cpu_irq_mask,code
746
747 /*
748 * r26 = function to be called
749 * r25 = argument to pass in
750 * r24 = flags for do_fork()
751 *
752 * Kernel threads don't ever return, so they don't need
753 * a true register context. We just save away the arguments
754 * for copy_thread/ret_ to properly set up the child.
755 */
756
757 #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
758 #define CLONE_UNTRACED 0x00800000
759
760 .export __kernel_thread, code
761 .import do_fork
762 __kernel_thread:
763 STREG %r2, -RP_OFFSET(%r30)
764
765 copy %r30, %r1
766 ldo PT_SZ_ALGN(%r30),%r30
767 #ifdef CONFIG_64BIT
768 /* Yo, function pointers in wide mode are little structs... -PB */
769 ldd 24(%r26), %r2
770 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
771 ldd 16(%r26), %r26
772
773 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
774 copy %r0, %r22 /* user_tid */
775 #endif
776 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
777 STREG %r25, PT_GR25(%r1)
778 ldil L%CLONE_UNTRACED, %r26
779 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
780 or %r26, %r24, %r26 /* will have kernel mappings. */
781 ldi 1, %r25 /* stack_start, signals kernel thread */
782 stw %r0, -52(%r30) /* user_tid */
783 #ifdef CONFIG_64BIT
784 ldo -16(%r30),%r29 /* Reference param save area */
785 #endif
786 BL do_fork, %r2
787 copy %r1, %r24 /* pt_regs */
788
789 /* Parent Returns here */
790
791 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
792 ldo -PT_SZ_ALGN(%r30), %r30
793 bv %r0(%r2)
794 nop
795
796 /*
797 * Child Returns here
798 *
799 * copy_thread moved args from temp save area set up above
800 * into task save area.
801 */
802
803 .export ret_from_kernel_thread
804 ret_from_kernel_thread:
805
806 /* Call schedule_tail first though */
807 BL schedule_tail, %r2
808 nop
809
810 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
811 LDREG TASK_PT_GR25(%r1), %r26
812 #ifdef CONFIG_64BIT
813 LDREG TASK_PT_GR27(%r1), %r27
814 LDREG TASK_PT_GR22(%r1), %r22
815 #endif
816 LDREG TASK_PT_GR26(%r1), %r1
817 ble 0(%sr7, %r1)
818 copy %r31, %r2
819
820 #ifdef CONFIG_64BIT
821 ldo -16(%r30),%r29 /* Reference param save area */
822 loadgp /* Thread could have been in a module */
823 #endif
824 #ifndef CONFIG_64BIT
825 b sys_exit
826 #else
827 load32 sys_exit, %r1
828 bv %r0(%r1)
829 #endif
830 ldi 0, %r26
831
832 .import sys_execve, code
833 .export __execve, code
834 __execve:
835 copy %r2, %r15
836 copy %r30, %r16
837 ldo PT_SZ_ALGN(%r30), %r30
838 STREG %r26, PT_GR26(%r16)
839 STREG %r25, PT_GR25(%r16)
840 STREG %r24, PT_GR24(%r16)
841 #ifdef CONFIG_64BIT
842 ldo -16(%r30),%r29 /* Reference param save area */
843 #endif
844 BL sys_execve, %r2
845 copy %r16, %r26
846
847 cmpib,=,n 0,%r28,intr_return /* forward */
848
849 /* yes, this will trap and die. */
850 copy %r15, %r2
851 copy %r16, %r30
852 bv %r0(%r2)
853 nop
854
855 .align 4
856
857 /*
858 * struct task_struct *_switch_to(struct task_struct *prev,
859 * struct task_struct *next)
860 *
861 * switch kernel stacks and return prev */
862 .export _switch_to, code
863 _switch_to:
864 STREG %r2, -RP_OFFSET(%r30)
865
866 callee_save_float
867 callee_save
868
869 load32 _switch_to_ret, %r2
870
871 STREG %r2, TASK_PT_KPC(%r26)
872 LDREG TASK_PT_KPC(%r25), %r2
873
874 STREG %r30, TASK_PT_KSP(%r26)
875 LDREG TASK_PT_KSP(%r25), %r30
876 LDREG TASK_THREAD_INFO(%r25), %r25
877 bv %r0(%r2)
878 mtctl %r25,%cr30
879
880 _switch_to_ret:
881 mtctl %r0, %cr0 /* Needed for single stepping */
882 callee_rest
883 callee_rest_float
884
885 LDREG -RP_OFFSET(%r30), %r2
886 bv %r0(%r2)
887 copy %r26, %r28
888
889 /*
890 * Common rfi return path for interruptions, kernel execve, and
891 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
892 * return via this path if the signal was received when the process
893 * was running; if the process was blocked on a syscall then the
894 * normal syscall_exit path is used. All syscalls for traced
895 * proceses exit via intr_restore.
896 *
897 * XXX If any syscalls that change a processes space id ever exit
898 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
899 * adjust IASQ[0..1].
900 *
901 */
902
903 .align 4096
904
905 .export syscall_exit_rfi
906 syscall_exit_rfi:
907 mfctl %cr30,%r16
908 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
909 ldo TASK_REGS(%r16),%r16
910 /* Force iaoq to userspace, as the user has had access to our current
911 * context via sigcontext. Also Filter the PSW for the same reason.
912 */
913 LDREG PT_IAOQ0(%r16),%r19
914 depi 3,31,2,%r19
915 STREG %r19,PT_IAOQ0(%r16)
916 LDREG PT_IAOQ1(%r16),%r19
917 depi 3,31,2,%r19
918 STREG %r19,PT_IAOQ1(%r16)
919 LDREG PT_PSW(%r16),%r19
920 load32 USER_PSW_MASK,%r1
921 #ifdef CONFIG_64BIT
922 load32 USER_PSW_HI_MASK,%r20
923 depd %r20,31,32,%r1
924 #endif
925 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
926 load32 USER_PSW,%r1
927 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
928 STREG %r19,PT_PSW(%r16)
929
930 /*
931 * If we aren't being traced, we never saved space registers
932 * (we don't store them in the sigcontext), so set them
933 * to "proper" values now (otherwise we'll wind up restoring
934 * whatever was last stored in the task structure, which might
935 * be inconsistent if an interrupt occured while on the gateway
936 * page) Note that we may be "trashing" values the user put in
937 * them, but we don't support the the user changing them.
938 */
939
940 STREG %r0,PT_SR2(%r16)
941 mfsp %sr3,%r19
942 STREG %r19,PT_SR0(%r16)
943 STREG %r19,PT_SR1(%r16)
944 STREG %r19,PT_SR3(%r16)
945 STREG %r19,PT_SR4(%r16)
946 STREG %r19,PT_SR5(%r16)
947 STREG %r19,PT_SR6(%r16)
948 STREG %r19,PT_SR7(%r16)
949
950 intr_return:
951 /* NOTE: Need to enable interrupts incase we schedule. */
952 ssm PSW_SM_I, %r0
953
954 /* Check for software interrupts */
955
956 .import irq_stat,data
957
958 load32 irq_stat,%r19
959 #ifdef CONFIG_SMP
960 mfctl %cr30,%r1
961 ldw TI_CPU(%r1),%r1 /* get cpu # - int */
962 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
963 ** irq_stat[] is defined using ____cacheline_aligned.
964 */
965 #ifdef CONFIG_64BIT
966 shld %r1, 6, %r20
967 #else
968 shlw %r1, 5, %r20
969 #endif
970 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
971 #endif /* CONFIG_SMP */
972
973 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
974 cmpib,<>,n 0,%r20,intr_do_softirq /* forward */
975
976 intr_check_resched:
977
978 /* check for reschedule */
979 mfctl %cr30,%r1
980 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
981 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
982
983 intr_check_sig:
984 /* As above */
985 mfctl %cr30,%r1
986 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */
987 bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
988
989 intr_restore:
990 copy %r16,%r29
991 ldo PT_FR31(%r29),%r1
992 rest_fp %r1
993 rest_general %r29
994
995 /* inverse of virt_map */
996 pcxt_ssm_bug
997 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
998 tophys_r1 %r29
999
1000 /* Restore space id's and special cr's from PT_REGS
1001 * structure pointed to by r29
1002 */
1003 rest_specials %r29
1004
1005 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
1006 * It also restores r1 and r30.
1007 */
1008 rest_stack
1009
1010 rfi
1011 nop
1012 nop
1013 nop
1014 nop
1015 nop
1016 nop
1017 nop
1018 nop
1019
1020 .import do_softirq,code
1021 intr_do_softirq:
1022 BL do_softirq,%r2
1023 #ifdef CONFIG_64BIT
1024 ldo -16(%r30),%r29 /* Reference param save area */
1025 #else
1026 nop
1027 #endif
1028 b intr_check_resched
1029 nop
1030
1031 .import schedule,code
1032 intr_do_resched:
1033 /* Only do reschedule if we are returning to user space */
1034 LDREG PT_IASQ0(%r16), %r20
1035 CMPIB= 0,%r20,intr_restore /* backward */
1036 nop
1037 LDREG PT_IASQ1(%r16), %r20
1038 CMPIB= 0,%r20,intr_restore /* backward */
1039 nop
1040
1041 #ifdef CONFIG_64BIT
1042 ldo -16(%r30),%r29 /* Reference param save area */
1043 #endif
1044
1045 ldil L%intr_check_sig, %r2
1046 #ifndef CONFIG_64BIT
1047 b schedule
1048 #else
1049 load32 schedule, %r20
1050 bv %r0(%r20)
1051 #endif
1052 ldo R%intr_check_sig(%r2), %r2
1053
1054
1055 .import do_signal,code
1056 intr_do_signal:
1057 /*
1058 This check is critical to having LWS
1059 working. The IASQ is zero on the gateway
1060 page and we cannot deliver any signals until
1061 we get off the gateway page.
1062
1063 Only do signals if we are returning to user space
1064 */
1065 LDREG PT_IASQ0(%r16), %r20
1066 CMPIB= 0,%r20,intr_restore /* backward */
1067 nop
1068 LDREG PT_IASQ1(%r16), %r20
1069 CMPIB= 0,%r20,intr_restore /* backward */
1070 nop
1071
1072 copy %r0, %r24 /* unsigned long in_syscall */
1073 copy %r16, %r25 /* struct pt_regs *regs */
1074 #ifdef CONFIG_64BIT
1075 ldo -16(%r30),%r29 /* Reference param save area */
1076 #endif
1077
1078 BL do_signal,%r2
1079 copy %r0, %r26 /* sigset_t *oldset = NULL */
1080
1081 b intr_check_sig
1082 nop
1083
1084 /*
1085 * External interrupts.
1086 */
1087
1088 intr_extint:
1089 CMPIB=,n 0,%r16,1f
1090 get_stack_use_cr30
1091 b,n 3f
1092
1093 1:
1094 #if 0 /* Interrupt Stack support not working yet! */
1095 mfctl %cr31,%r1
1096 copy %r30,%r17
1097 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
1098 #ifdef CONFIG_64BIT
1099 depdi 0,63,15,%r17
1100 #else
1101 depi 0,31,15,%r17
1102 #endif
1103 CMPB=,n %r1,%r17,2f
1104 get_stack_use_cr31
1105 b,n 3f
1106 #endif
1107 2:
1108 get_stack_use_r30
1109
1110 3:
1111 save_specials %r29
1112 virt_map
1113 save_general %r29
1114
1115 ldo PT_FR0(%r29), %r24
1116 save_fp %r24
1117
1118 loadgp
1119
1120 copy %r29, %r26 /* arg0 is pt_regs */
1121 copy %r29, %r16 /* save pt_regs */
1122
1123 ldil L%intr_return, %r2
1124
1125 #ifdef CONFIG_64BIT
1126 ldo -16(%r30),%r29 /* Reference param save area */
1127 #endif
1128
1129 b do_cpu_irq_mask
1130 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1131
1132
1133 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1134
1135 .export intr_save, code /* for os_hpmc */
1136
1137 intr_save:
1138 mfsp %sr7,%r16
1139 CMPIB=,n 0,%r16,1f
1140 get_stack_use_cr30
1141 b 2f
1142 copy %r8,%r26
1143
1144 1:
1145 get_stack_use_r30
1146 copy %r8,%r26
1147
1148 2:
1149 save_specials %r29
1150
1151 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1152
1153 /*
1154 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1155 * traps.c.
1156 * 2) Once we start executing code above 4 Gb, we need
1157 * to adjust iasq/iaoq here in the same way we
1158 * adjust isr/ior below.
1159 */
1160
1161 CMPIB=,n 6,%r26,skip_save_ior
1162
1163
1164 mfctl %cr20, %r16 /* isr */
1165 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1166 mfctl %cr21, %r17 /* ior */
1167
1168
1169 #ifdef CONFIG_64BIT
1170 /*
1171 * If the interrupted code was running with W bit off (32 bit),
1172 * clear the b bits (bits 0 & 1) in the ior.
1173 * save_specials left ipsw value in r8 for us to test.
1174 */
1175 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1176 depdi 0,1,2,%r17
1177
1178 /*
1179 * FIXME: This code has hardwired assumptions about the split
1180 * between space bits and offset bits. This will change
1181 * when we allow alternate page sizes.
1182 */
1183
1184 /* adjust isr/ior. */
1185
1186 extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
1187 depd %r1,31,7,%r17 /* deposit them into ior */
1188 depdi 0,63,7,%r16 /* clear them from isr */
1189 #endif
1190 STREG %r16, PT_ISR(%r29)
1191 STREG %r17, PT_IOR(%r29)
1192
1193
1194 skip_save_ior:
1195 virt_map
1196 save_general %r29
1197
1198 ldo PT_FR0(%r29), %r25
1199 save_fp %r25
1200
1201 loadgp
1202
1203 copy %r29, %r25 /* arg1 is pt_regs */
1204 #ifdef CONFIG_64BIT
1205 ldo -16(%r30),%r29 /* Reference param save area */
1206 #endif
1207
1208 ldil L%intr_check_sig, %r2
1209 copy %r25, %r16 /* save pt_regs */
1210
1211 b handle_interruption
1212 ldo R%intr_check_sig(%r2), %r2
1213
1214
1215 /*
1216 * Note for all tlb miss handlers:
1217 *
1218 * cr24 contains a pointer to the kernel address space
1219 * page directory.
1220 *
1221 * cr25 contains a pointer to the current user address
1222 * space page directory.
1223 *
1224 * sr3 will contain the space id of the user address space
1225 * of the current running thread while that thread is
1226 * running in the kernel.
1227 */
1228
1229 /*
1230 * register number allocations. Note that these are all
1231 * in the shadowed registers
1232 */
1233
1234 t0 = r1 /* temporary register 0 */
1235 va = r8 /* virtual address for which the trap occured */
1236 t1 = r9 /* temporary register 1 */
1237 pte = r16 /* pte/phys page # */
1238 prot = r17 /* prot bits */
1239 spc = r24 /* space for which the trap occured */
1240 ptp = r25 /* page directory/page table pointer */
1241
1242 #ifdef CONFIG_64BIT
1243
1244 dtlb_miss_20w:
1245 space_adjust spc,va,t0
1246 get_pgd spc,ptp
1247 space_check spc,t0,dtlb_fault
1248
1249 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1250
1251 update_ptep ptp,pte,t0,t1
1252
1253 make_insert_tlb spc,pte,prot
1254
1255 idtlbt pte,prot
1256
1257 rfir
1258 nop
1259
1260 dtlb_check_alias_20w:
1261 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1262
1263 idtlbt pte,prot
1264
1265 rfir
1266 nop
1267
1268 nadtlb_miss_20w:
1269 space_adjust spc,va,t0
1270 get_pgd spc,ptp
1271 space_check spc,t0,nadtlb_fault
1272
1273 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
1274
1275 update_ptep ptp,pte,t0,t1
1276
1277 make_insert_tlb spc,pte,prot
1278
1279 idtlbt pte,prot
1280
1281 rfir
1282 nop
1283
1284 nadtlb_check_flush_20w:
1285 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1286
1287 /* Insert a "flush only" translation */
1288
1289 depdi,z 7,7,3,prot
1290 depdi 1,10,1,prot
1291
1292 /* Get rid of prot bits and convert to page addr for idtlbt */
1293
1294 depdi 0,63,12,pte
1295 extrd,u pte,56,52,pte
1296 idtlbt pte,prot
1297
1298 rfir
1299 nop
1300
1301 #else
1302
1303 dtlb_miss_11:
1304 get_pgd spc,ptp
1305
1306 space_check spc,t0,dtlb_fault
1307
1308 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1309
1310 update_ptep ptp,pte,t0,t1
1311
1312 make_insert_tlb_11 spc,pte,prot
1313
1314 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1315 mtsp spc,%sr1
1316
1317 idtlba pte,(%sr1,va)
1318 idtlbp prot,(%sr1,va)
1319
1320 mtsp t0, %sr1 /* Restore sr1 */
1321
1322 rfir
1323 nop
1324
1325 dtlb_check_alias_11:
1326
1327 /* Check to see if fault is in the temporary alias region */
1328
1329 cmpib,<>,n 0,spc,dtlb_fault /* forward */
1330 ldil L%(TMPALIAS_MAP_START),t0
1331 copy va,t1
1332 depwi 0,31,23,t1
1333 cmpb,<>,n t0,t1,dtlb_fault /* forward */
1334 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1335 depw,z prot,8,7,prot
1336
1337 /*
1338 * OK, it is in the temp alias region, check whether "from" or "to".
1339 * Check "subtle" note in pacache.S re: r23/r26.
1340 */
1341
1342 extrw,u,= va,9,1,r0
1343 or,tr %r23,%r0,pte /* If "from" use "from" page */
1344 or %r26,%r0,pte /* else "to", use "to" page */
1345
1346 idtlba pte,(va)
1347 idtlbp prot,(va)
1348
1349 rfir
1350 nop
1351
1352 nadtlb_miss_11:
1353 get_pgd spc,ptp
1354
1355 space_check spc,t0,nadtlb_fault
1356
1357 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
1358
1359 update_ptep ptp,pte,t0,t1
1360
1361 make_insert_tlb_11 spc,pte,prot
1362
1363
1364 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1365 mtsp spc,%sr1
1366
1367 idtlba pte,(%sr1,va)
1368 idtlbp prot,(%sr1,va)
1369
1370 mtsp t0, %sr1 /* Restore sr1 */
1371
1372 rfir
1373 nop
1374
1375 nadtlb_check_flush_11:
1376 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1377
1378 /* Insert a "flush only" translation */
1379
1380 zdepi 7,7,3,prot
1381 depi 1,10,1,prot
1382
1383 /* Get rid of prot bits and convert to page addr for idtlba */
1384
1385 depi 0,31,12,pte
1386 extru pte,24,25,pte
1387
1388 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1389 mtsp spc,%sr1
1390
1391 idtlba pte,(%sr1,va)
1392 idtlbp prot,(%sr1,va)
1393
1394 mtsp t0, %sr1 /* Restore sr1 */
1395
1396 rfir
1397 nop
1398
1399 dtlb_miss_20:
1400 space_adjust spc,va,t0
1401 get_pgd spc,ptp
1402 space_check spc,t0,dtlb_fault
1403
1404 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1405
1406 update_ptep ptp,pte,t0,t1
1407
1408 make_insert_tlb spc,pte,prot
1409
1410 f_extend pte,t0
1411
1412 idtlbt pte,prot
1413
1414 rfir
1415 nop
1416
1417 dtlb_check_alias_20:
1418 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1419
1420 idtlbt pte,prot
1421
1422 rfir
1423 nop
1424
1425 nadtlb_miss_20:
1426 get_pgd spc,ptp
1427
1428 space_check spc,t0,nadtlb_fault
1429
1430 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
1431
1432 update_ptep ptp,pte,t0,t1
1433
1434 make_insert_tlb spc,pte,prot
1435
1436 f_extend pte,t0
1437
1438 idtlbt pte,prot
1439
1440 rfir
1441 nop
1442
1443 nadtlb_check_flush_20:
1444 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1445
1446 /* Insert a "flush only" translation */
1447
1448 depdi,z 7,7,3,prot
1449 depdi 1,10,1,prot
1450
1451 /* Get rid of prot bits and convert to page addr for idtlbt */
1452
1453 depdi 0,63,12,pte
1454 extrd,u pte,56,32,pte
1455 idtlbt pte,prot
1456
1457 rfir
1458 nop
1459 #endif
1460
1461 nadtlb_emulate:
1462
1463 /*
1464 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1465 * probei instructions. We don't want to fault for these
1466 * instructions (not only does it not make sense, it can cause
1467 * deadlocks, since some flushes are done with the mmap
1468 * semaphore held). If the translation doesn't exist, we can't
1469 * insert a translation, so have to emulate the side effects
1470 * of the instruction. Since we don't insert a translation
1471 * we can get a lot of faults during a flush loop, so it makes
1472 * sense to try to do it here with minimum overhead. We only
1473 * emulate fdc,fic,pdc,probew,prober instructions whose base
1474 * and index registers are not shadowed. We defer everything
1475 * else to the "slow" path.
1476 */
1477
1478 mfctl %cr19,%r9 /* Get iir */
1479
1480 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1481 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1482
1483 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1484 ldi 0x280,%r16
1485 and %r9,%r16,%r17
1486 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1487 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1488 BL get_register,%r25
1489 extrw,u %r9,15,5,%r8 /* Get index register # */
1490 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1491 copy %r1,%r24
1492 BL get_register,%r25
1493 extrw,u %r9,10,5,%r8 /* Get base register # */
1494 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1495 BL set_register,%r25
1496 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1497
1498 nadtlb_nullify:
1499 mfctl %ipsw,%r8
1500 ldil L%PSW_N,%r9
1501 or %r8,%r9,%r8 /* Set PSW_N */
1502 mtctl %r8,%ipsw
1503
1504 rfir
1505 nop
1506
1507 /*
1508 When there is no translation for the probe address then we
1509 must nullify the insn and return zero in the target regsiter.
1510 This will indicate to the calling code that it does not have
1511 write/read privileges to this address.
1512
1513 This should technically work for prober and probew in PA 1.1,
1514 and also probe,r and probe,w in PA 2.0
1515
1516 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1517 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1518
1519 */
1520 nadtlb_probe_check:
1521 ldi 0x80,%r16
1522 and %r9,%r16,%r17
1523 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1524 BL get_register,%r25 /* Find the target register */
1525 extrw,u %r9,31,5,%r8 /* Get target register */
1526 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1527 BL set_register,%r25
1528 copy %r0,%r1 /* Write zero to target register */
1529 b nadtlb_nullify /* Nullify return insn */
1530 nop
1531
1532
1533 #ifdef CONFIG_64BIT
1534 itlb_miss_20w:
1535
1536 /*
1537 * I miss is a little different, since we allow users to fault
1538 * on the gateway page which is in the kernel address space.
1539 */
1540
1541 space_adjust spc,va,t0
1542 get_pgd spc,ptp
1543 space_check spc,t0,itlb_fault
1544
1545 L3_ptep ptp,pte,t0,va,itlb_fault
1546
1547 update_ptep ptp,pte,t0,t1
1548
1549 make_insert_tlb spc,pte,prot
1550
1551 iitlbt pte,prot
1552
1553 rfir
1554 nop
1555
1556 #else
1557
1558 itlb_miss_11:
1559 get_pgd spc,ptp
1560
1561 space_check spc,t0,itlb_fault
1562
1563 L2_ptep ptp,pte,t0,va,itlb_fault
1564
1565 update_ptep ptp,pte,t0,t1
1566
1567 make_insert_tlb_11 spc,pte,prot
1568
1569 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1570 mtsp spc,%sr1
1571
1572 iitlba pte,(%sr1,va)
1573 iitlbp prot,(%sr1,va)
1574
1575 mtsp t0, %sr1 /* Restore sr1 */
1576
1577 rfir
1578 nop
1579
1580 itlb_miss_20:
1581 get_pgd spc,ptp
1582
1583 space_check spc,t0,itlb_fault
1584
1585 L2_ptep ptp,pte,t0,va,itlb_fault
1586
1587 update_ptep ptp,pte,t0,t1
1588
1589 make_insert_tlb spc,pte,prot
1590
1591 f_extend pte,t0
1592
1593 iitlbt pte,prot
1594
1595 rfir
1596 nop
1597
1598 #endif
1599
1600 #ifdef CONFIG_64BIT
1601
1602 dbit_trap_20w:
1603 space_adjust spc,va,t0
1604 get_pgd spc,ptp
1605 space_check spc,t0,dbit_fault
1606
1607 L3_ptep ptp,pte,t0,va,dbit_fault
1608
1609 #ifdef CONFIG_SMP
1610 CMPIB=,n 0,spc,dbit_nolock_20w
1611 load32 PA(pa_dbit_lock),t0
1612
1613 dbit_spin_20w:
1614 ldcw 0(t0),t1
1615 cmpib,= 0,t1,dbit_spin_20w
1616 nop
1617
1618 dbit_nolock_20w:
1619 #endif
1620 update_dirty ptp,pte,t1
1621
1622 make_insert_tlb spc,pte,prot
1623
1624 idtlbt pte,prot
1625 #ifdef CONFIG_SMP
1626 CMPIB=,n 0,spc,dbit_nounlock_20w
1627 ldi 1,t1
1628 stw t1,0(t0)
1629
1630 dbit_nounlock_20w:
1631 #endif
1632
1633 rfir
1634 nop
1635 #else
1636
1637 dbit_trap_11:
1638
1639 get_pgd spc,ptp
1640
1641 space_check spc,t0,dbit_fault
1642
1643 L2_ptep ptp,pte,t0,va,dbit_fault
1644
1645 #ifdef CONFIG_SMP
1646 CMPIB=,n 0,spc,dbit_nolock_11
1647 load32 PA(pa_dbit_lock),t0
1648
1649 dbit_spin_11:
1650 ldcw 0(t0),t1
1651 cmpib,= 0,t1,dbit_spin_11
1652 nop
1653
1654 dbit_nolock_11:
1655 #endif
1656 update_dirty ptp,pte,t1
1657
1658 make_insert_tlb_11 spc,pte,prot
1659
1660 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1661 mtsp spc,%sr1
1662
1663 idtlba pte,(%sr1,va)
1664 idtlbp prot,(%sr1,va)
1665
1666 mtsp t1, %sr1 /* Restore sr1 */
1667 #ifdef CONFIG_SMP
1668 CMPIB=,n 0,spc,dbit_nounlock_11
1669 ldi 1,t1
1670 stw t1,0(t0)
1671
1672 dbit_nounlock_11:
1673 #endif
1674
1675 rfir
1676 nop
1677
1678 dbit_trap_20:
1679 get_pgd spc,ptp
1680
1681 space_check spc,t0,dbit_fault
1682
1683 L2_ptep ptp,pte,t0,va,dbit_fault
1684
1685 #ifdef CONFIG_SMP
1686 CMPIB=,n 0,spc,dbit_nolock_20
1687 load32 PA(pa_dbit_lock),t0
1688
1689 dbit_spin_20:
1690 ldcw 0(t0),t1
1691 cmpib,= 0,t1,dbit_spin_20
1692 nop
1693
1694 dbit_nolock_20:
1695 #endif
1696 update_dirty ptp,pte,t1
1697
1698 make_insert_tlb spc,pte,prot
1699
1700 f_extend pte,t1
1701
1702 idtlbt pte,prot
1703
1704 #ifdef CONFIG_SMP
1705 CMPIB=,n 0,spc,dbit_nounlock_20
1706 ldi 1,t1
1707 stw t1,0(t0)
1708
1709 dbit_nounlock_20:
1710 #endif
1711
1712 rfir
1713 nop
1714 #endif
1715
1716 .import handle_interruption,code
1717
1718 kernel_bad_space:
1719 b intr_save
1720 ldi 31,%r8 /* Use an unused code */
1721
1722 dbit_fault:
1723 b intr_save
1724 ldi 20,%r8
1725
1726 itlb_fault:
1727 b intr_save
1728 ldi 6,%r8
1729
1730 nadtlb_fault:
1731 b intr_save
1732 ldi 17,%r8
1733
1734 dtlb_fault:
1735 b intr_save
1736 ldi 15,%r8
1737
1738 /* Register saving semantics for system calls:
1739
1740 %r1 clobbered by system call macro in userspace
1741 %r2 saved in PT_REGS by gateway page
1742 %r3 - %r18 preserved by C code (saved by signal code)
1743 %r19 - %r20 saved in PT_REGS by gateway page
1744 %r21 - %r22 non-standard syscall args
1745 stored in kernel stack by gateway page
1746 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1747 %r27 - %r30 saved in PT_REGS by gateway page
1748 %r31 syscall return pointer
1749 */
1750
1751 /* Floating point registers (FIXME: what do we do with these?)
1752
1753 %fr0 - %fr3 status/exception, not preserved
1754 %fr4 - %fr7 arguments
1755 %fr8 - %fr11 not preserved by C code
1756 %fr12 - %fr21 preserved by C code
1757 %fr22 - %fr31 not preserved by C code
1758 */
1759
1760 .macro reg_save regs
1761 STREG %r3, PT_GR3(\regs)
1762 STREG %r4, PT_GR4(\regs)
1763 STREG %r5, PT_GR5(\regs)
1764 STREG %r6, PT_GR6(\regs)
1765 STREG %r7, PT_GR7(\regs)
1766 STREG %r8, PT_GR8(\regs)
1767 STREG %r9, PT_GR9(\regs)
1768 STREG %r10,PT_GR10(\regs)
1769 STREG %r11,PT_GR11(\regs)
1770 STREG %r12,PT_GR12(\regs)
1771 STREG %r13,PT_GR13(\regs)
1772 STREG %r14,PT_GR14(\regs)
1773 STREG %r15,PT_GR15(\regs)
1774 STREG %r16,PT_GR16(\regs)
1775 STREG %r17,PT_GR17(\regs)
1776 STREG %r18,PT_GR18(\regs)
1777 .endm
1778
1779 .macro reg_restore regs
1780 LDREG PT_GR3(\regs), %r3
1781 LDREG PT_GR4(\regs), %r4
1782 LDREG PT_GR5(\regs), %r5
1783 LDREG PT_GR6(\regs), %r6
1784 LDREG PT_GR7(\regs), %r7
1785 LDREG PT_GR8(\regs), %r8
1786 LDREG PT_GR9(\regs), %r9
1787 LDREG PT_GR10(\regs),%r10
1788 LDREG PT_GR11(\regs),%r11
1789 LDREG PT_GR12(\regs),%r12
1790 LDREG PT_GR13(\regs),%r13
1791 LDREG PT_GR14(\regs),%r14
1792 LDREG PT_GR15(\regs),%r15
1793 LDREG PT_GR16(\regs),%r16
1794 LDREG PT_GR17(\regs),%r17
1795 LDREG PT_GR18(\regs),%r18
1796 .endm
1797
1798 .export sys_fork_wrapper
1799 .export child_return
1800 sys_fork_wrapper:
1801 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1802 ldo TASK_REGS(%r1),%r1
1803 reg_save %r1
1804 mfctl %cr27, %r3
1805 STREG %r3, PT_CR27(%r1)
1806
1807 STREG %r2,-RP_OFFSET(%r30)
1808 ldo FRAME_SIZE(%r30),%r30
1809 #ifdef CONFIG_64BIT
1810 ldo -16(%r30),%r29 /* Reference param save area */
1811 #endif
1812
1813 /* These are call-clobbered registers and therefore
1814 also syscall-clobbered (we hope). */
1815 STREG %r2,PT_GR19(%r1) /* save for child */
1816 STREG %r30,PT_GR21(%r1)
1817
1818 LDREG PT_GR30(%r1),%r25
1819 copy %r1,%r24
1820 BL sys_clone,%r2
1821 ldi SIGCHLD,%r26
1822
1823 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1824 wrapper_exit:
1825 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1826 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1827 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1828
1829 LDREG PT_CR27(%r1), %r3
1830 mtctl %r3, %cr27
1831 reg_restore %r1
1832
1833 /* strace expects syscall # to be preserved in r20 */
1834 ldi __NR_fork,%r20
1835 bv %r0(%r2)
1836 STREG %r20,PT_GR20(%r1)
1837
1838 /* Set the return value for the child */
1839 child_return:
1840 BL schedule_tail, %r2
1841 nop
1842
1843 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1844 LDREG TASK_PT_GR19(%r1),%r2
1845 b wrapper_exit
1846 copy %r0,%r28
1847
1848
1849 .export sys_clone_wrapper
1850 sys_clone_wrapper:
1851 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1852 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1853 reg_save %r1
1854 mfctl %cr27, %r3
1855 STREG %r3, PT_CR27(%r1)
1856
1857 STREG %r2,-RP_OFFSET(%r30)
1858 ldo FRAME_SIZE(%r30),%r30
1859 #ifdef CONFIG_64BIT
1860 ldo -16(%r30),%r29 /* Reference param save area */
1861 #endif
1862
1863 STREG %r2,PT_GR19(%r1) /* save for child */
1864 STREG %r30,PT_GR21(%r1)
1865 BL sys_clone,%r2
1866 copy %r1,%r24
1867
1868 b wrapper_exit
1869 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1870
1871 .export sys_vfork_wrapper
1872 sys_vfork_wrapper:
1873 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1874 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1875 reg_save %r1
1876 mfctl %cr27, %r3
1877 STREG %r3, PT_CR27(%r1)
1878
1879 STREG %r2,-RP_OFFSET(%r30)
1880 ldo FRAME_SIZE(%r30),%r30
1881 #ifdef CONFIG_64BIT
1882 ldo -16(%r30),%r29 /* Reference param save area */
1883 #endif
1884
1885 STREG %r2,PT_GR19(%r1) /* save for child */
1886 STREG %r30,PT_GR21(%r1)
1887
1888 BL sys_vfork,%r2
1889 copy %r1,%r26
1890
1891 b wrapper_exit
1892 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1893
1894
1895 .macro execve_wrapper execve
1896 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1897 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1898
1899 /*
1900 * Do we need to save/restore r3-r18 here?
1901 * I don't think so. why would new thread need old
1902 * threads registers?
1903 */
1904
1905 /* %arg0 - %arg3 are already saved for us. */
1906
1907 STREG %r2,-RP_OFFSET(%r30)
1908 ldo FRAME_SIZE(%r30),%r30
1909 #ifdef CONFIG_64BIT
1910 ldo -16(%r30),%r29 /* Reference param save area */
1911 #endif
1912 BL \execve,%r2
1913 copy %r1,%arg0
1914
1915 ldo -FRAME_SIZE(%r30),%r30
1916 LDREG -RP_OFFSET(%r30),%r2
1917
1918 /* If exec succeeded we need to load the args */
1919
1920 ldo -1024(%r0),%r1
1921 cmpb,>>= %r28,%r1,error_\execve
1922 copy %r2,%r19
1923
1924 error_\execve:
1925 bv %r0(%r19)
1926 nop
1927 .endm
1928
1929 .export sys_execve_wrapper
1930 .import sys_execve
1931
1932 sys_execve_wrapper:
1933 execve_wrapper sys_execve
1934
1935 #ifdef CONFIG_64BIT
1936 .export sys32_execve_wrapper
1937 .import sys32_execve
1938
1939 sys32_execve_wrapper:
1940 execve_wrapper sys32_execve
1941 #endif
1942
1943 .export sys_rt_sigreturn_wrapper
1944 sys_rt_sigreturn_wrapper:
1945 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1946 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1947 /* Don't save regs, we are going to restore them from sigcontext. */
1948 STREG %r2, -RP_OFFSET(%r30)
1949 #ifdef CONFIG_64BIT
1950 ldo FRAME_SIZE(%r30), %r30
1951 BL sys_rt_sigreturn,%r2
1952 ldo -16(%r30),%r29 /* Reference param save area */
1953 #else
1954 BL sys_rt_sigreturn,%r2
1955 ldo FRAME_SIZE(%r30), %r30
1956 #endif
1957
1958 ldo -FRAME_SIZE(%r30), %r30
1959 LDREG -RP_OFFSET(%r30), %r2
1960
1961 /* FIXME: I think we need to restore a few more things here. */
1962 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1963 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1964 reg_restore %r1
1965
1966 /* If the signal was received while the process was blocked on a
1967 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1968 * take us to syscall_exit_rfi and on to intr_return.
1969 */
1970 bv %r0(%r2)
1971 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1972
1973 .export sys_sigaltstack_wrapper
1974 sys_sigaltstack_wrapper:
1975 /* Get the user stack pointer */
1976 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1977 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1978 LDREG TASK_PT_GR30(%r24),%r24
1979 STREG %r2, -RP_OFFSET(%r30)
1980 #ifdef CONFIG_64BIT
1981 ldo FRAME_SIZE(%r30), %r30
1982 b,l do_sigaltstack,%r2
1983 ldo -16(%r30),%r29 /* Reference param save area */
1984 #else
1985 bl do_sigaltstack,%r2
1986 ldo FRAME_SIZE(%r30), %r30
1987 #endif
1988
1989 ldo -FRAME_SIZE(%r30), %r30
1990 LDREG -RP_OFFSET(%r30), %r2
1991 bv %r0(%r2)
1992 nop
1993
1994 #ifdef CONFIG_64BIT
1995 .export sys32_sigaltstack_wrapper
1996 sys32_sigaltstack_wrapper:
1997 /* Get the user stack pointer */
1998 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1999 LDREG TASK_PT_GR30(%r24),%r24
2000 STREG %r2, -RP_OFFSET(%r30)
2001 ldo FRAME_SIZE(%r30), %r30
2002 b,l do_sigaltstack32,%r2
2003 ldo -16(%r30),%r29 /* Reference param save area */
2004
2005 ldo -FRAME_SIZE(%r30), %r30
2006 LDREG -RP_OFFSET(%r30), %r2
2007 bv %r0(%r2)
2008 nop
2009 #endif
2010
2011 .export sys_rt_sigsuspend_wrapper
2012 sys_rt_sigsuspend_wrapper:
2013 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2014 ldo TASK_REGS(%r1),%r24
2015 reg_save %r24
2016
2017 STREG %r2, -RP_OFFSET(%r30)
2018 #ifdef CONFIG_64BIT
2019 ldo FRAME_SIZE(%r30), %r30
2020 b,l sys_rt_sigsuspend,%r2
2021 ldo -16(%r30),%r29 /* Reference param save area */
2022 #else
2023 bl sys_rt_sigsuspend,%r2
2024 ldo FRAME_SIZE(%r30), %r30
2025 #endif
2026
2027 ldo -FRAME_SIZE(%r30), %r30
2028 LDREG -RP_OFFSET(%r30), %r2
2029
2030 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2031 ldo TASK_REGS(%r1),%r1
2032 reg_restore %r1
2033
2034 bv %r0(%r2)
2035 nop
2036
2037 .export syscall_exit
2038 syscall_exit:
2039
2040 /* NOTE: HP-UX syscalls also come through here
2041 * after hpux_syscall_exit fixes up return
2042 * values. */
2043
2044 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
2045 * via syscall_exit_rfi if the signal was received while the process
2046 * was running.
2047 */
2048
2049 /* save return value now */
2050
2051 mfctl %cr30, %r1
2052 LDREG TI_TASK(%r1),%r1
2053 STREG %r28,TASK_PT_GR28(%r1)
2054
2055 #ifdef CONFIG_HPUX
2056
2057 /* <linux/personality.h> cannot be easily included */
2058 #define PER_HPUX 0x10
2059 LDREG TASK_PERSONALITY(%r1),%r19
2060
2061 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2062 ldo -PER_HPUX(%r19), %r19
2063 CMPIB<>,n 0,%r19,1f
2064
2065 /* Save other hpux returns if personality is PER_HPUX */
2066 STREG %r22,TASK_PT_GR22(%r1)
2067 STREG %r29,TASK_PT_GR29(%r1)
2068 1:
2069
2070 #endif /* CONFIG_HPUX */
2071
2072 /* Seems to me that dp could be wrong here, if the syscall involved
2073 * calling a module, and nothing got round to restoring dp on return.
2074 */
2075 loadgp
2076
2077 syscall_check_bh:
2078
2079 /* Check for software interrupts */
2080
2081 .import irq_stat,data
2082
2083 load32 irq_stat,%r19
2084
2085 #ifdef CONFIG_SMP
2086 /* sched.h: int processor */
2087 /* %r26 is used as scratch register to index into irq_stat[] */
2088 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2089
2090 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2091 #ifdef CONFIG_64BIT
2092 shld %r26, 6, %r20
2093 #else
2094 shlw %r26, 5, %r20
2095 #endif
2096 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
2097 #endif /* CONFIG_SMP */
2098
2099 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
2100 cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */
2101
2102 syscall_check_resched:
2103
2104 /* check for reschedule */
2105
2106 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2107 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2108
2109 syscall_check_sig:
2110 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */
2111 bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
2112
2113 syscall_restore:
2114 /* Are we being ptraced? */
2115 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2116
2117 LDREG TASK_PTRACE(%r1), %r19
2118 bb,< %r19,31,syscall_restore_rfi
2119 nop
2120
2121 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2122 rest_fp %r19
2123
2124 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2125 mtsar %r19
2126
2127 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2128 LDREG TASK_PT_GR19(%r1),%r19
2129 LDREG TASK_PT_GR20(%r1),%r20
2130 LDREG TASK_PT_GR21(%r1),%r21
2131 LDREG TASK_PT_GR22(%r1),%r22
2132 LDREG TASK_PT_GR23(%r1),%r23
2133 LDREG TASK_PT_GR24(%r1),%r24
2134 LDREG TASK_PT_GR25(%r1),%r25
2135 LDREG TASK_PT_GR26(%r1),%r26
2136 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2137 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2138 LDREG TASK_PT_GR29(%r1),%r29
2139 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2140
2141 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2142 rsm PSW_SM_I, %r0
2143 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
2144 mfsp %sr3,%r1 /* Get users space id */
2145 mtsp %r1,%sr7 /* Restore sr7 */
2146 ssm PSW_SM_I, %r0
2147
2148 /* Set sr2 to zero for userspace syscalls to work. */
2149 mtsp %r0,%sr2
2150 mtsp %r1,%sr4 /* Restore sr4 */
2151 mtsp %r1,%sr5 /* Restore sr5 */
2152 mtsp %r1,%sr6 /* Restore sr6 */
2153
2154 depi 3,31,2,%r31 /* ensure return to user mode. */
2155
2156 #ifdef CONFIG_64BIT
2157 /* decide whether to reset the wide mode bit
2158 *
2159 * For a syscall, the W bit is stored in the lowest bit
2160 * of sp. Extract it and reset W if it is zero */
2161 extrd,u,*<> %r30,63,1,%r1
2162 rsm PSW_SM_W, %r0
2163 /* now reset the lowest bit of sp if it was set */
2164 xor %r30,%r1,%r30
2165 #endif
2166 be,n 0(%sr3,%r31) /* return to user space */
2167
2168 /* We have to return via an RFI, so that PSW T and R bits can be set
2169 * appropriately.
2170 * This sets up pt_regs so we can return via intr_restore, which is not
2171 * the most efficient way of doing things, but it works.
2172 */
2173 syscall_restore_rfi:
2174 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2175 mtctl %r2,%cr0 /* for immediate trap */
2176 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2177 ldi 0x0b,%r20 /* Create new PSW */
2178 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2179
2180 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2181 * set in include/linux/ptrace.h and converted to PA bitmap
2182 * numbers in asm-offsets.c */
2183
2184 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2185 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
2186 depi -1,27,1,%r20 /* R bit */
2187
2188 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2189 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2190 depi -1,7,1,%r20 /* T bit */
2191
2192 STREG %r20,TASK_PT_PSW(%r1)
2193
2194 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2195
2196 mfsp %sr3,%r25
2197 STREG %r25,TASK_PT_SR3(%r1)
2198 STREG %r25,TASK_PT_SR4(%r1)
2199 STREG %r25,TASK_PT_SR5(%r1)
2200 STREG %r25,TASK_PT_SR6(%r1)
2201 STREG %r25,TASK_PT_SR7(%r1)
2202 STREG %r25,TASK_PT_IASQ0(%r1)
2203 STREG %r25,TASK_PT_IASQ1(%r1)
2204
2205 /* XXX W bit??? */
2206 /* Now if old D bit is clear, it means we didn't save all registers
2207 * on syscall entry, so do that now. This only happens on TRACEME
2208 * calls, or if someone attached to us while we were on a syscall.
2209 * We could make this more efficient by not saving r3-r18, but
2210 * then we wouldn't be able to use the common intr_restore path.
2211 * It is only for traced processes anyway, so performance is not
2212 * an issue.
2213 */
2214 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2215 ldo TASK_REGS(%r1),%r25
2216 reg_save %r25 /* Save r3 to r18 */
2217
2218 /* Save the current sr */
2219 mfsp %sr0,%r2
2220 STREG %r2,TASK_PT_SR0(%r1)
2221
2222 /* Save the scratch sr */
2223 mfsp %sr1,%r2
2224 STREG %r2,TASK_PT_SR1(%r1)
2225
2226 /* sr2 should be set to zero for userspace syscalls */
2227 STREG %r0,TASK_PT_SR2(%r1)
2228
2229 pt_regs_ok:
2230 LDREG TASK_PT_GR31(%r1),%r2
2231 depi 3,31,2,%r2 /* ensure return to user mode. */
2232 STREG %r2,TASK_PT_IAOQ0(%r1)
2233 ldo 4(%r2),%r2
2234 STREG %r2,TASK_PT_IAOQ1(%r1)
2235 copy %r25,%r16
2236 b intr_restore
2237 nop
2238
2239 .import do_softirq,code
2240 syscall_do_softirq:
2241 BL do_softirq,%r2
2242 nop
2243 /* NOTE: We enable I-bit incase we schedule later,
2244 * and we might be going back to userspace if we were
2245 * traced. */
2246 b syscall_check_resched
2247 ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */
2248
2249 .import schedule,code
2250 syscall_do_resched:
2251 BL schedule,%r2
2252 #ifdef CONFIG_64BIT
2253 ldo -16(%r30),%r29 /* Reference param save area */
2254 #else
2255 nop
2256 #endif
2257 b syscall_check_bh /* if resched, we start over again */
2258 nop
2259
2260 .import do_signal,code
2261 syscall_do_signal:
2262 /* Save callee-save registers (for sigcontext).
2263 FIXME: After this point the process structure should be
2264 consistent with all the relevant state of the process
2265 before the syscall. We need to verify this. */
2266 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2267 ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */
2268 reg_save %r25
2269
2270 ldi 1, %r24 /* unsigned long in_syscall */
2271
2272 #ifdef CONFIG_64BIT
2273 ldo -16(%r30),%r29 /* Reference param save area */
2274 #endif
2275 BL do_signal,%r2
2276 copy %r0, %r26 /* sigset_t *oldset = NULL */
2277
2278 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2279 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2280 reg_restore %r20
2281
2282 b,n syscall_check_sig
2283
2284 /*
2285 * get_register is used by the non access tlb miss handlers to
2286 * copy the value of the general register specified in r8 into
2287 * r1. This routine can't be used for shadowed registers, since
2288 * the rfir will restore the original value. So, for the shadowed
2289 * registers we put a -1 into r1 to indicate that the register
2290 * should not be used (the register being copied could also have
2291 * a -1 in it, but that is OK, it just means that we will have
2292 * to use the slow path instead).
2293 */
2294
2295 get_register:
2296 blr %r8,%r0
2297 nop
2298 bv %r0(%r25) /* r0 */
2299 copy %r0,%r1
2300 bv %r0(%r25) /* r1 - shadowed */
2301 ldi -1,%r1
2302 bv %r0(%r25) /* r2 */
2303 copy %r2,%r1
2304 bv %r0(%r25) /* r3 */
2305 copy %r3,%r1
2306 bv %r0(%r25) /* r4 */
2307 copy %r4,%r1
2308 bv %r0(%r25) /* r5 */
2309 copy %r5,%r1
2310 bv %r0(%r25) /* r6 */
2311 copy %r6,%r1
2312 bv %r0(%r25) /* r7 */
2313 copy %r7,%r1
2314 bv %r0(%r25) /* r8 - shadowed */
2315 ldi -1,%r1
2316 bv %r0(%r25) /* r9 - shadowed */
2317 ldi -1,%r1
2318 bv %r0(%r25) /* r10 */
2319 copy %r10,%r1
2320 bv %r0(%r25) /* r11 */
2321 copy %r11,%r1
2322 bv %r0(%r25) /* r12 */
2323 copy %r12,%r1
2324 bv %r0(%r25) /* r13 */
2325 copy %r13,%r1
2326 bv %r0(%r25) /* r14 */
2327 copy %r14,%r1
2328 bv %r0(%r25) /* r15 */
2329 copy %r15,%r1
2330 bv %r0(%r25) /* r16 - shadowed */
2331 ldi -1,%r1
2332 bv %r0(%r25) /* r17 - shadowed */
2333 ldi -1,%r1
2334 bv %r0(%r25) /* r18 */
2335 copy %r18,%r1
2336 bv %r0(%r25) /* r19 */
2337 copy %r19,%r1
2338 bv %r0(%r25) /* r20 */
2339 copy %r20,%r1
2340 bv %r0(%r25) /* r21 */
2341 copy %r21,%r1
2342 bv %r0(%r25) /* r22 */
2343 copy %r22,%r1
2344 bv %r0(%r25) /* r23 */
2345 copy %r23,%r1
2346 bv %r0(%r25) /* r24 - shadowed */
2347 ldi -1,%r1
2348 bv %r0(%r25) /* r25 - shadowed */
2349 ldi -1,%r1
2350 bv %r0(%r25) /* r26 */
2351 copy %r26,%r1
2352 bv %r0(%r25) /* r27 */
2353 copy %r27,%r1
2354 bv %r0(%r25) /* r28 */
2355 copy %r28,%r1
2356 bv %r0(%r25) /* r29 */
2357 copy %r29,%r1
2358 bv %r0(%r25) /* r30 */
2359 copy %r30,%r1
2360 bv %r0(%r25) /* r31 */
2361 copy %r31,%r1
2362
2363 /*
2364 * set_register is used by the non access tlb miss handlers to
2365 * copy the value of r1 into the general register specified in
2366 * r8.
2367 */
2368
2369 set_register:
2370 blr %r8,%r0
2371 nop
2372 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2373 copy %r1,%r0
2374 bv %r0(%r25) /* r1 */
2375 copy %r1,%r1
2376 bv %r0(%r25) /* r2 */
2377 copy %r1,%r2
2378 bv %r0(%r25) /* r3 */
2379 copy %r1,%r3
2380 bv %r0(%r25) /* r4 */
2381 copy %r1,%r4
2382 bv %r0(%r25) /* r5 */
2383 copy %r1,%r5
2384 bv %r0(%r25) /* r6 */
2385 copy %r1,%r6
2386 bv %r0(%r25) /* r7 */
2387 copy %r1,%r7
2388 bv %r0(%r25) /* r8 */
2389 copy %r1,%r8
2390 bv %r0(%r25) /* r9 */
2391 copy %r1,%r9
2392 bv %r0(%r25) /* r10 */
2393 copy %r1,%r10
2394 bv %r0(%r25) /* r11 */
2395 copy %r1,%r11
2396 bv %r0(%r25) /* r12 */
2397 copy %r1,%r12
2398 bv %r0(%r25) /* r13 */
2399 copy %r1,%r13
2400 bv %r0(%r25) /* r14 */
2401 copy %r1,%r14
2402 bv %r0(%r25) /* r15 */
2403 copy %r1,%r15
2404 bv %r0(%r25) /* r16 */
2405 copy %r1,%r16
2406 bv %r0(%r25) /* r17 */
2407 copy %r1,%r17
2408 bv %r0(%r25) /* r18 */
2409 copy %r1,%r18
2410 bv %r0(%r25) /* r19 */
2411 copy %r1,%r19
2412 bv %r0(%r25) /* r20 */
2413 copy %r1,%r20
2414 bv %r0(%r25) /* r21 */
2415 copy %r1,%r21
2416 bv %r0(%r25) /* r22 */
2417 copy %r1,%r22
2418 bv %r0(%r25) /* r23 */
2419 copy %r1,%r23
2420 bv %r0(%r25) /* r24 */
2421 copy %r1,%r24
2422 bv %r0(%r25) /* r25 */
2423 copy %r1,%r25
2424 bv %r0(%r25) /* r26 */
2425 copy %r1,%r26
2426 bv %r0(%r25) /* r27 */
2427 copy %r1,%r27
2428 bv %r0(%r25) /* r28 */
2429 copy %r1,%r28
2430 bv %r0(%r25) /* r29 */
2431 copy %r1,%r29
2432 bv %r0(%r25) /* r30 */
2433 copy %r1,%r30
2434 bv %r0(%r25) /* r31 */
2435 copy %r1,%r31