Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/ia64/kernel/ivt.S | |
3 | * | |
060561ff | 4 | * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co |
1da177e4 LT |
5 | * Stephane Eranian <eranian@hpl.hp.com> |
6 | * David Mosberger <davidm@hpl.hp.com> | |
7 | * Copyright (C) 2000, 2002-2003 Intel Co | |
8 | * Asit Mallick <asit.k.mallick@intel.com> | |
9 | * Suresh Siddha <suresh.b.siddha@intel.com> | |
10 | * Kenneth Chen <kenneth.w.chen@intel.com> | |
11 | * Fenghua Yu <fenghua.yu@intel.com> | |
12 | * | |
13 | * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP | |
14 | * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. | |
498c5170 IY |
15 | * |
16 | * Copyright (C) 2005 Hewlett-Packard Co | |
17 | * Dan Magenheimer <dan.magenheimer@hp.com> | |
18 | * Xen paravirtualization | |
19 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | |
20 | * VA Linux Systems Japan K.K. | |
21 | * pv_ops. | |
22 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> | |
1da177e4 LT |
23 | */ |
24 | /* | |
25 | * This file defines the interruption vector table used by the CPU. | |
26 | * It does not include one entry per possible cause of interruption. | |
27 | * | |
28 | * The first 20 entries of the table contain 64 bundles each while the | |
29 | * remaining 48 entries contain only 16 bundles each. | |
30 | * | |
31 | * The 64 bundles are used to allow inlining the whole handler for critical | |
32 | * interruptions like TLB misses. | |
33 | * | |
34 | * For each entry, the comment is as follows: | |
35 | * | |
36 | * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) | |
37 | * entry offset ----/ / / / / | |
38 | * entry number ---------/ / / / | |
39 | * size of the entry -------------/ / / | |
40 | * vector name -------------------------------------/ / | |
41 | * interruptions triggering this vector ----------------------/ | |
42 | * | |
43 | * The table is 32KB in size and must be aligned on 32KB boundary. | |
44 | * (The CPU ignores the 15 lower bits of the address) | |
45 | * | |
46 | * Table is based upon EAS2.6 (Oct 1999) | |
47 | */ | |
48 | ||
1da177e4 LT |
49 | |
50 | #include <asm/asmmacro.h> | |
51 | #include <asm/break.h> | |
52 | #include <asm/ia32.h> | |
53 | #include <asm/kregs.h> | |
39e01cb8 | 54 | #include <asm/asm-offsets.h> |
1da177e4 LT |
55 | #include <asm/pgtable.h> |
56 | #include <asm/processor.h> | |
57 | #include <asm/ptrace.h> | |
58 | #include <asm/system.h> | |
59 | #include <asm/thread_info.h> | |
60 | #include <asm/unistd.h> | |
61 | #include <asm/errno.h> | |
62 | ||
63 | #if 1 | |
64 | # define PSR_DEFAULT_BITS psr.ac | |
65 | #else | |
66 | # define PSR_DEFAULT_BITS 0 | |
67 | #endif | |
68 | ||
69 | #if 0 | |
70 | /* | |
71 | * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't | |
72 | * needed for something else before enabling this... | |
73 | */ | |
74 | # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16 | |
75 | #else | |
76 | # define DBG_FAULT(i) | |
77 | #endif | |
78 | ||
1da177e4 LT |
79 | #include "minstate.h" |
80 | ||
81 | #define FAULT(n) \ | |
82 | mov r31=pr; \ | |
83 | mov r19=n;; /* prepare to save predicates */ \ | |
84 | br.sptk.many dispatch_to_fault_handler | |
85 | ||
86 | .section .text.ivt,"ax" | |
87 | ||
88 | .align 32768 // align on 32KB boundary | |
89 | .global ia64_ivt | |
90 | ia64_ivt: | |
91 | ///////////////////////////////////////////////////////////////////////////////////////// | |
92 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) | |
93 | ENTRY(vhpt_miss) | |
94 | DBG_FAULT(0) | |
95 | /* | |
96 | * The VHPT vector is invoked when the TLB entry for the virtual page table | |
97 | * is missing. This happens only as a result of a previous | |
98 | * (the "original") TLB miss, which may either be caused by an instruction | |
99 | * fetch or a data access (or non-access). | |
100 | * | |
e8aabc47 KC |
101 | * What we do here is normal TLB miss handing for the _original_ miss, |
102 | * followed by inserting the TLB entry for the virtual page table page | |
103 | * that the VHPT walker was attempting to access. The latter gets | |
104 | * inserted as long as page table entry above pte level have valid | |
105 | * mappings for the faulting address. The TLB entry for the original | |
106 | * miss gets inserted only if the pte entry indicates that the page is | |
107 | * present. | |
1da177e4 LT |
108 | * |
109 | * do_page_fault gets invoked in the following cases: | |
110 | * - the faulting virtual address uses unimplemented address bits | |
e8aabc47 | 111 | * - the faulting virtual address has no valid page table mapping |
1da177e4 | 112 | */ |
498c5170 | 113 | MOV_FROM_IFA(r16) // get address that caused the TLB miss |
1da177e4 LT |
114 | #ifdef CONFIG_HUGETLB_PAGE |
115 | movl r18=PAGE_SHIFT | |
498c5170 | 116 | MOV_FROM_ITIR(r25) |
1da177e4 LT |
117 | #endif |
118 | ;; | |
498c5170 | 119 | RSM_PSR_DT // use physical addressing for data |
1da177e4 LT |
120 | mov r31=pr // save the predicate registers |
121 | mov r19=IA64_KR(PT_BASE) // get page table base address | |
122 | shl r21=r16,3 // shift bit 60 into sign bit | |
123 | shr.u r17=r16,61 // get the region number into r17 | |
124 | ;; | |
837cd0bd | 125 | shr.u r22=r21,3 |
1da177e4 LT |
126 | #ifdef CONFIG_HUGETLB_PAGE |
127 | extr.u r26=r25,2,6 | |
128 | ;; | |
129 | cmp.ne p8,p0=r18,r26 | |
130 | sub r27=r26,r18 | |
131 | ;; | |
132 | (p8) dep r25=r18,r25,2,6 | |
133 | (p8) shr r22=r22,r27 | |
134 | #endif | |
135 | ;; | |
136 | cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? | |
e8aabc47 | 137 | shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit |
1da177e4 LT |
138 | ;; |
139 | (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place | |
140 | ||
141 | srlz.d | |
142 | LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir | |
143 | ||
144 | .pred.rel "mutex", p6, p7 | |
145 | (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT | |
146 | (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 | |
147 | ;; | |
e8aabc47 KC |
148 | (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 |
149 | (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] | |
1da177e4 | 150 | cmp.eq p7,p6=0,r21 // unused address bits all zeroes? |
837cd0bd | 151 | #ifdef CONFIG_PGTABLE_4 |
e8aabc47 | 152 | shr.u r28=r22,PUD_SHIFT // shift pud index into position |
837cd0bd | 153 | #else |
e8aabc47 | 154 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position |
837cd0bd | 155 | #endif |
1da177e4 | 156 | ;; |
e8aabc47 | 157 | ld8 r17=[r17] // get *pgd (may be 0) |
1da177e4 | 158 | ;; |
e8aabc47 | 159 | (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? |
837cd0bd | 160 | #ifdef CONFIG_PGTABLE_4 |
e8aabc47 | 161 | dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr) |
837cd0bd | 162 | ;; |
e8aabc47 KC |
163 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position |
164 | (p7) ld8 r29=[r28] // get *pud (may be 0) | |
1da177e4 | 165 | ;; |
e8aabc47 KC |
166 | (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL? |
167 | dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) | |
837cd0bd | 168 | #else |
e8aabc47 | 169 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr) |
837cd0bd | 170 | #endif |
1da177e4 | 171 | ;; |
e8aabc47 KC |
172 | (p7) ld8 r20=[r17] // get *pmd (may be 0) |
173 | shr.u r19=r22,PAGE_SHIFT // shift pte index into position | |
1da177e4 | 174 | ;; |
e8aabc47 KC |
175 | (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL? |
176 | dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) | |
837cd0bd | 177 | ;; |
e8aabc47 | 178 | (p7) ld8 r18=[r21] // read *pte |
498c5170 | 179 | MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss |
1da177e4 LT |
180 | ;; |
181 | (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? | |
498c5170 | 182 | MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss |
1da177e4 LT |
183 | ;; // avoid RAW on p7 |
184 | (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? | |
185 | dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address | |
186 | ;; | |
498c5170 IY |
187 | ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and |
188 | // insert the data TLB entry | |
1da177e4 | 189 | (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) |
498c5170 | 190 | MOV_TO_IFA(r22, r24) |
1da177e4 LT |
191 | |
192 | #ifdef CONFIG_HUGETLB_PAGE | |
498c5170 | 193 | MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT |
1da177e4 LT |
194 | #endif |
195 | ||
196 | /* | |
197 | * Now compute and insert the TLB entry for the virtual page table. We never | |
198 | * execute in a page table page so there is no need to set the exception deferral | |
199 | * bit. | |
200 | */ | |
201 | adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 | |
202 | ;; | |
498c5170 | 203 | ITC_D(p7, r24, r25) |
1da177e4 LT |
204 | ;; |
205 | #ifdef CONFIG_SMP | |
206 | /* | |
207 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | |
208 | * cannot possibly affect the following loads: | |
209 | */ | |
210 | dv_serialize_data | |
211 | ||
212 | /* | |
e8aabc47 | 213 | * Re-check pagetable entry. If they changed, we may have received a ptc.g |
1da177e4 | 214 | * between reading the pagetable and the "itc". If so, flush the entry we |
e8aabc47 KC |
215 | * inserted and retry. At this point, we have: |
216 | * | |
217 | * r28 = equivalent of pud_offset(pgd, ifa) | |
218 | * r17 = equivalent of pmd_offset(pud, ifa) | |
219 | * r21 = equivalent of pte_offset(pmd, ifa) | |
220 | * | |
221 | * r29 = *pud | |
222 | * r20 = *pmd | |
223 | * r18 = *pte | |
1da177e4 | 224 | */ |
e8aabc47 KC |
225 | ld8 r25=[r21] // read *pte again |
226 | ld8 r26=[r17] // read *pmd again | |
837cd0bd | 227 | #ifdef CONFIG_PGTABLE_4 |
e8aabc47 | 228 | ld8 r19=[r28] // read *pud again |
837cd0bd RH |
229 | #endif |
230 | cmp.ne p6,p7=r0,r0 | |
1da177e4 | 231 | ;; |
e8aabc47 | 232 | cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change |
837cd0bd | 233 | #ifdef CONFIG_PGTABLE_4 |
e8aabc47 | 234 | cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change |
837cd0bd | 235 | #endif |
1da177e4 LT |
236 | mov r27=PAGE_SHIFT<<2 |
237 | ;; | |
238 | (p6) ptc.l r22,r27 // purge PTE page translation | |
e8aabc47 | 239 | (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change |
1da177e4 LT |
240 | ;; |
241 | (p6) ptc.l r16,r27 // purge translation | |
242 | #endif | |
243 | ||
244 | mov pr=r31,-1 // restore predicate registers | |
498c5170 | 245 | RFI |
1da177e4 LT |
246 | END(vhpt_miss) |
247 | ||
248 | .org ia64_ivt+0x400 | |
249 | ///////////////////////////////////////////////////////////////////////////////////////// | |
250 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) | |
251 | ENTRY(itlb_miss) | |
252 | DBG_FAULT(1) | |
253 | /* | |
e8aabc47 | 254 | * The ITLB handler accesses the PTE via the virtually mapped linear |
1da177e4 | 255 | * page table. If a nested TLB miss occurs, we switch into physical |
e8aabc47 KC |
256 | * mode, walk the page table, and then re-execute the PTE read and |
257 | * go on normally after that. | |
1da177e4 | 258 | */ |
498c5170 | 259 | MOV_FROM_IFA(r16) // get virtual address |
1da177e4 LT |
260 | mov r29=b0 // save b0 |
261 | mov r31=pr // save predicates | |
262 | .itlb_fault: | |
498c5170 | 263 | MOV_FROM_IHA(r17) // get virtual address of PTE |
1da177e4 LT |
264 | movl r30=1f // load nested fault continuation point |
265 | ;; | |
e8aabc47 | 266 | 1: ld8 r18=[r17] // read *pte |
1da177e4 LT |
267 | ;; |
268 | mov b0=r29 | |
269 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? | |
270 | (p6) br.cond.spnt page_fault | |
271 | ;; | |
498c5170 | 272 | ITC_I(p0, r18, r19) |
1da177e4 LT |
273 | ;; |
274 | #ifdef CONFIG_SMP | |
275 | /* | |
276 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | |
277 | * cannot possibly affect the following loads: | |
278 | */ | |
279 | dv_serialize_data | |
280 | ||
e8aabc47 | 281 | ld8 r19=[r17] // read *pte again and see if same |
1da177e4 LT |
282 | mov r20=PAGE_SHIFT<<2 // setup page size for purge |
283 | ;; | |
284 | cmp.ne p7,p0=r18,r19 | |
285 | ;; | |
286 | (p7) ptc.l r16,r20 | |
287 | #endif | |
288 | mov pr=r31,-1 | |
498c5170 | 289 | RFI |
1da177e4 LT |
290 | END(itlb_miss) |
291 | ||
292 | .org ia64_ivt+0x0800 | |
293 | ///////////////////////////////////////////////////////////////////////////////////////// | |
294 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) | |
295 | ENTRY(dtlb_miss) | |
296 | DBG_FAULT(2) | |
297 | /* | |
e8aabc47 | 298 | * The DTLB handler accesses the PTE via the virtually mapped linear |
1da177e4 | 299 | * page table. If a nested TLB miss occurs, we switch into physical |
e8aabc47 KC |
300 | * mode, walk the page table, and then re-execute the PTE read and |
301 | * go on normally after that. | |
1da177e4 | 302 | */ |
498c5170 | 303 | MOV_FROM_IFA(r16) // get virtual address |
1da177e4 LT |
304 | mov r29=b0 // save b0 |
305 | mov r31=pr // save predicates | |
306 | dtlb_fault: | |
498c5170 | 307 | MOV_FROM_IHA(r17) // get virtual address of PTE |
1da177e4 LT |
308 | movl r30=1f // load nested fault continuation point |
309 | ;; | |
e8aabc47 | 310 | 1: ld8 r18=[r17] // read *pte |
1da177e4 LT |
311 | ;; |
312 | mov b0=r29 | |
313 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? | |
314 | (p6) br.cond.spnt page_fault | |
315 | ;; | |
498c5170 | 316 | ITC_D(p0, r18, r19) |
1da177e4 LT |
317 | ;; |
318 | #ifdef CONFIG_SMP | |
319 | /* | |
320 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | |
321 | * cannot possibly affect the following loads: | |
322 | */ | |
323 | dv_serialize_data | |
324 | ||
e8aabc47 | 325 | ld8 r19=[r17] // read *pte again and see if same |
1da177e4 LT |
326 | mov r20=PAGE_SHIFT<<2 // setup page size for purge |
327 | ;; | |
328 | cmp.ne p7,p0=r18,r19 | |
329 | ;; | |
330 | (p7) ptc.l r16,r20 | |
331 | #endif | |
332 | mov pr=r31,-1 | |
498c5170 | 333 | RFI |
1da177e4 LT |
334 | END(dtlb_miss) |
335 | ||
336 | .org ia64_ivt+0x0c00 | |
337 | ///////////////////////////////////////////////////////////////////////////////////////// | |
338 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) | |
339 | ENTRY(alt_itlb_miss) | |
340 | DBG_FAULT(3) | |
498c5170 | 341 | MOV_FROM_IFA(r16) // get address that caused the TLB miss |
1da177e4 | 342 | movl r17=PAGE_KERNEL |
498c5170 | 343 | MOV_FROM_IPSR(p0, r21) |
1da177e4 LT |
344 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
345 | mov r31=pr | |
346 | ;; | |
347 | #ifdef CONFIG_DISABLE_VHPT | |
348 | shr.u r22=r16,61 // get the region number into r21 | |
349 | ;; | |
350 | cmp.gt p8,p0=6,r22 // user mode | |
351 | ;; | |
498c5170 | 352 | THASH(p8, r17, r16, r23) |
1da177e4 | 353 | ;; |
498c5170 | 354 | MOV_TO_IHA(p8, r17, r23) |
1da177e4 LT |
355 | (p8) mov r29=b0 // save b0 |
356 | (p8) br.cond.dptk .itlb_fault | |
357 | #endif | |
358 | extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl | |
359 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | |
360 | shr.u r18=r16,57 // move address bit 61 to bit 4 | |
361 | ;; | |
362 | andcm r18=0x10,r18 // bit 4=~address-bit(61) | |
363 | cmp.ne p8,p0=r0,r23 // psr.cpl != 0? | |
364 | or r19=r17,r19 // insert PTE control bits into r19 | |
365 | ;; | |
366 | or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 | |
367 | (p8) br.cond.spnt page_fault | |
368 | ;; | |
498c5170 | 369 | ITC_I(p0, r19, r18) // insert the TLB entry |
1da177e4 | 370 | mov pr=r31,-1 |
498c5170 | 371 | RFI |
1da177e4 LT |
372 | END(alt_itlb_miss) |
373 | ||
374 | .org ia64_ivt+0x1000 | |
375 | ///////////////////////////////////////////////////////////////////////////////////////// | |
376 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) | |
377 | ENTRY(alt_dtlb_miss) | |
378 | DBG_FAULT(4) | |
498c5170 | 379 | MOV_FROM_IFA(r16) // get address that caused the TLB miss |
1da177e4 | 380 | movl r17=PAGE_KERNEL |
498c5170 | 381 | MOV_FROM_ISR(r20) |
1da177e4 | 382 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
498c5170 | 383 | MOV_FROM_IPSR(p0, r21) |
1da177e4 | 384 | mov r31=pr |
00b65985 | 385 | mov r24=PERCPU_ADDR |
1da177e4 LT |
386 | ;; |
387 | #ifdef CONFIG_DISABLE_VHPT | |
388 | shr.u r22=r16,61 // get the region number into r21 | |
389 | ;; | |
390 | cmp.gt p8,p0=6,r22 // access to region 0-5 | |
391 | ;; | |
498c5170 | 392 | THASH(p8, r17, r16, r25) |
1da177e4 | 393 | ;; |
498c5170 | 394 | MOV_TO_IHA(p8, r17, r25) |
1da177e4 LT |
395 | (p8) mov r29=b0 // save b0 |
396 | (p8) br.cond.dptk dtlb_fault | |
397 | #endif | |
00b65985 KC |
398 | cmp.ge p10,p11=r16,r24 // access to per_cpu_data? |
399 | tbit.z p12,p0=r16,61 // access to region 6? | |
400 | mov r25=PERCPU_PAGE_SHIFT << 2 | |
401 | mov r26=PERCPU_PAGE_SIZE | |
402 | nop.m 0 | |
403 | nop.b 0 | |
404 | ;; | |
405 | (p10) mov r19=IA64_KR(PER_CPU_DATA) | |
406 | (p11) and r19=r19,r16 // clear non-ppn fields | |
1da177e4 LT |
407 | extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl |
408 | and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field | |
409 | tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? | |
1da177e4 LT |
410 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? |
411 | ;; | |
00b65985 | 412 | (p10) sub r19=r19,r26 |
498c5170 | 413 | MOV_TO_ITIR(p10, r25, r24) |
1da177e4 LT |
414 | cmp.ne p8,p0=r0,r23 |
415 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field | |
00b65985 | 416 | (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr |
1da177e4 LT |
417 | (p8) br.cond.spnt page_fault |
418 | ||
419 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 | |
1da177e4 | 420 | ;; |
00b65985 | 421 | or r19=r19,r17 // insert PTE control bits into r19 |
498c5170 | 422 | MOV_TO_IPSR(p6, r21, r24) |
1da177e4 | 423 | ;; |
498c5170 | 424 | ITC_D(p7, r19, r18) // insert the TLB entry |
1da177e4 | 425 | mov pr=r31,-1 |
498c5170 | 426 | RFI |
1da177e4 LT |
427 | END(alt_dtlb_miss) |
428 | ||
429 | .org ia64_ivt+0x1400 | |
430 | ///////////////////////////////////////////////////////////////////////////////////////// | |
431 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) | |
432 | ENTRY(nested_dtlb_miss) | |
433 | /* | |
434 | * In the absence of kernel bugs, we get here when the virtually mapped linear | |
435 | * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction | |
436 | * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page | |
437 | * table is missing, a nested TLB miss fault is triggered and control is | |
438 | * transferred to this point. When this happens, we lookup the pte for the | |
439 | * faulting address by walking the page table in physical mode and return to the | |
440 | * continuation point passed in register r30 (or call page_fault if the address is | |
441 | * not mapped). | |
442 | * | |
443 | * Input: r16: faulting address | |
444 | * r29: saved b0 | |
445 | * r30: continuation address | |
446 | * r31: saved pr | |
447 | * | |
e8aabc47 | 448 | * Output: r17: physical address of PTE of faulting address |
1da177e4 LT |
449 | * r29: saved b0 |
450 | * r30: continuation address | |
451 | * r31: saved pr | |
452 | * | |
0393eed5 | 453 | * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) |
1da177e4 | 454 | */ |
498c5170 | 455 | RSM_PSR_DT // switch to using physical data addressing |
1da177e4 LT |
456 | mov r19=IA64_KR(PT_BASE) // get the page table base address |
457 | shl r21=r16,3 // shift bit 60 into sign bit | |
498c5170 | 458 | MOV_FROM_ITIR(r18) |
1da177e4 LT |
459 | ;; |
460 | shr.u r17=r16,61 // get the region number into r17 | |
0393eed5 | 461 | extr.u r18=r18,2,6 // get the faulting page size |
1da177e4 LT |
462 | ;; |
463 | cmp.eq p6,p7=5,r17 // is faulting address in region 5? | |
0393eed5 KC |
464 | add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address |
465 | add r18=PGDIR_SHIFT-PAGE_SHIFT,r18 | |
1da177e4 | 466 | ;; |
0393eed5 KC |
467 | shr.u r22=r16,r22 |
468 | shr.u r18=r16,r18 | |
1da177e4 LT |
469 | (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place |
470 | ||
471 | srlz.d | |
472 | LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir | |
473 | ||
474 | .pred.rel "mutex", p6, p7 | |
475 | (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT | |
476 | (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 | |
477 | ;; | |
e8aabc47 KC |
478 | (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 |
479 | (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] | |
1da177e4 | 480 | cmp.eq p7,p6=0,r21 // unused address bits all zeroes? |
837cd0bd | 481 | #ifdef CONFIG_PGTABLE_4 |
e8aabc47 | 482 | shr.u r18=r22,PUD_SHIFT // shift pud index into position |
837cd0bd | 483 | #else |
e8aabc47 | 484 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position |
837cd0bd | 485 | #endif |
1da177e4 | 486 | ;; |
e8aabc47 | 487 | ld8 r17=[r17] // get *pgd (may be 0) |
1da177e4 | 488 | ;; |
e8aabc47 KC |
489 | (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? |
490 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr) | |
1da177e4 | 491 | ;; |
837cd0bd | 492 | #ifdef CONFIG_PGTABLE_4 |
e8aabc47 KC |
493 | (p7) ld8 r17=[r17] // get *pud (may be 0) |
494 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position | |
1da177e4 | 495 | ;; |
e8aabc47 KC |
496 | (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL? |
497 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) | |
837cd0bd RH |
498 | ;; |
499 | #endif | |
e8aabc47 KC |
500 | (p7) ld8 r17=[r17] // get *pmd (may be 0) |
501 | shr.u r19=r22,PAGE_SHIFT // shift pte index into position | |
837cd0bd | 502 | ;; |
e8aabc47 KC |
503 | (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL? |
504 | dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr); | |
1da177e4 LT |
505 | (p6) br.cond.spnt page_fault |
506 | mov b0=r30 | |
507 | br.sptk.many b0 // return to continuation point | |
508 | END(nested_dtlb_miss) | |
509 | ||
510 | .org ia64_ivt+0x1800 | |
511 | ///////////////////////////////////////////////////////////////////////////////////////// | |
512 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) | |
513 | ENTRY(ikey_miss) | |
514 | DBG_FAULT(6) | |
515 | FAULT(6) | |
516 | END(ikey_miss) | |
517 | ||
1da177e4 LT |
518 | .org ia64_ivt+0x1c00 |
519 | ///////////////////////////////////////////////////////////////////////////////////////// | |
520 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) | |
521 | ENTRY(dkey_miss) | |
522 | DBG_FAULT(7) | |
523 | FAULT(7) | |
524 | END(dkey_miss) | |
525 | ||
526 | .org ia64_ivt+0x2000 | |
527 | ///////////////////////////////////////////////////////////////////////////////////////// | |
528 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) | |
529 | ENTRY(dirty_bit) | |
530 | DBG_FAULT(8) | |
531 | /* | |
532 | * What we do here is to simply turn on the dirty bit in the PTE. We need to | |
533 | * update both the page-table and the TLB entry. To efficiently access the PTE, | |
534 | * we address it through the virtual page table. Most likely, the TLB entry for | |
535 | * the relevant virtual page table page is still present in the TLB so we can | |
536 | * normally do this without additional TLB misses. In case the necessary virtual | |
537 | * page table TLB entry isn't present, we take a nested TLB miss hit where we look | |
538 | * up the physical address of the L3 PTE and then continue at label 1 below. | |
539 | */ | |
498c5170 | 540 | MOV_FROM_IFA(r16) // get the address that caused the fault |
1da177e4 LT |
541 | movl r30=1f // load continuation point in case of nested fault |
542 | ;; | |
498c5170 | 543 | THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE |
1da177e4 LT |
544 | mov r29=b0 // save b0 in case of nested fault |
545 | mov r31=pr // save pr | |
546 | #ifdef CONFIG_SMP | |
547 | mov r28=ar.ccv // save ar.ccv | |
548 | ;; | |
549 | 1: ld8 r18=[r17] | |
550 | ;; // avoid RAW on r18 | |
551 | mov ar.ccv=r18 // set compare value for cmpxchg | |
552 | or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits | |
d8117ce5 | 553 | tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit |
1da177e4 | 554 | ;; |
d8117ce5 | 555 | (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present |
1da177e4 LT |
556 | mov r24=PAGE_SHIFT<<2 |
557 | ;; | |
d8117ce5 | 558 | (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present |
1da177e4 | 559 | ;; |
498c5170 | 560 | ITC_D(p6, r25, r18) // install updated PTE |
1da177e4 LT |
561 | ;; |
562 | /* | |
563 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | |
564 | * cannot possibly affect the following loads: | |
565 | */ | |
566 | dv_serialize_data | |
567 | ||
568 | ld8 r18=[r17] // read PTE again | |
569 | ;; | |
570 | cmp.eq p6,p7=r18,r25 // is it same as the newly installed | |
571 | ;; | |
572 | (p7) ptc.l r16,r24 | |
573 | mov b0=r29 // restore b0 | |
574 | mov ar.ccv=r28 | |
575 | #else | |
576 | ;; | |
577 | 1: ld8 r18=[r17] | |
578 | ;; // avoid RAW on r18 | |
579 | or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits | |
580 | mov b0=r29 // restore b0 | |
581 | ;; | |
582 | st8 [r17]=r18 // store back updated PTE | |
749da791 | 583 | ITC_D(p0, r18, r16) // install updated PTE |
1da177e4 LT |
584 | #endif |
585 | mov pr=r31,-1 // restore pr | |
498c5170 | 586 | RFI |
1da177e4 LT |
587 | END(dirty_bit) |
588 | ||
589 | .org ia64_ivt+0x2400 | |
590 | ///////////////////////////////////////////////////////////////////////////////////////// | |
591 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) | |
592 | ENTRY(iaccess_bit) | |
593 | DBG_FAULT(9) | |
594 | // Like Entry 8, except for instruction access | |
498c5170 | 595 | MOV_FROM_IFA(r16) // get the address that caused the fault |
1da177e4 LT |
596 | movl r30=1f // load continuation point in case of nested fault |
597 | mov r31=pr // save predicates | |
598 | #ifdef CONFIG_ITANIUM | |
599 | /* | |
600 | * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. | |
601 | */ | |
498c5170 | 602 | MOV_FROM_IPSR(p0, r17) |
1da177e4 | 603 | ;; |
498c5170 | 604 | MOV_FROM_IIP(r18) |
1da177e4 LT |
605 | tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? |
606 | ;; | |
607 | (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa | |
608 | #endif /* CONFIG_ITANIUM */ | |
609 | ;; | |
498c5170 | 610 | THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE |
1da177e4 LT |
611 | mov r29=b0 // save b0 in case of nested fault) |
612 | #ifdef CONFIG_SMP | |
613 | mov r28=ar.ccv // save ar.ccv | |
614 | ;; | |
615 | 1: ld8 r18=[r17] | |
616 | ;; | |
617 | mov ar.ccv=r18 // set compare value for cmpxchg | |
618 | or r25=_PAGE_A,r18 // set the accessed bit | |
d8117ce5 | 619 | tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit |
1da177e4 | 620 | ;; |
d8117ce5 | 621 | (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present |
1da177e4 LT |
622 | mov r24=PAGE_SHIFT<<2 |
623 | ;; | |
d8117ce5 | 624 | (p6) cmp.eq p6,p7=r26,r18 // Only if page present |
1da177e4 | 625 | ;; |
498c5170 | 626 | ITC_I(p6, r25, r26) // install updated PTE |
1da177e4 LT |
627 | ;; |
628 | /* | |
629 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | |
630 | * cannot possibly affect the following loads: | |
631 | */ | |
632 | dv_serialize_data | |
633 | ||
634 | ld8 r18=[r17] // read PTE again | |
635 | ;; | |
636 | cmp.eq p6,p7=r18,r25 // is it same as the newly installed | |
637 | ;; | |
638 | (p7) ptc.l r16,r24 | |
639 | mov b0=r29 // restore b0 | |
640 | mov ar.ccv=r28 | |
641 | #else /* !CONFIG_SMP */ | |
642 | ;; | |
643 | 1: ld8 r18=[r17] | |
644 | ;; | |
645 | or r18=_PAGE_A,r18 // set the accessed bit | |
646 | mov b0=r29 // restore b0 | |
647 | ;; | |
648 | st8 [r17]=r18 // store back updated PTE | |
749da791 | 649 | ITC_I(p0, r18, r16) // install updated PTE |
1da177e4 LT |
650 | #endif /* !CONFIG_SMP */ |
651 | mov pr=r31,-1 | |
498c5170 | 652 | RFI |
1da177e4 LT |
653 | END(iaccess_bit) |
654 | ||
655 | .org ia64_ivt+0x2800 | |
656 | ///////////////////////////////////////////////////////////////////////////////////////// | |
657 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) | |
658 | ENTRY(daccess_bit) | |
659 | DBG_FAULT(10) | |
660 | // Like Entry 8, except for data access | |
498c5170 | 661 | MOV_FROM_IFA(r16) // get the address that caused the fault |
1da177e4 LT |
662 | movl r30=1f // load continuation point in case of nested fault |
663 | ;; | |
498c5170 | 664 | THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE |
1da177e4 LT |
665 | mov r31=pr |
666 | mov r29=b0 // save b0 in case of nested fault) | |
667 | #ifdef CONFIG_SMP | |
668 | mov r28=ar.ccv // save ar.ccv | |
669 | ;; | |
670 | 1: ld8 r18=[r17] | |
671 | ;; // avoid RAW on r18 | |
672 | mov ar.ccv=r18 // set compare value for cmpxchg | |
673 | or r25=_PAGE_A,r18 // set the dirty bit | |
d8117ce5 | 674 | tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit |
1da177e4 | 675 | ;; |
d8117ce5 | 676 | (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present |
1da177e4 LT |
677 | mov r24=PAGE_SHIFT<<2 |
678 | ;; | |
d8117ce5 | 679 | (p6) cmp.eq p6,p7=r26,r18 // Only if page is present |
1da177e4 | 680 | ;; |
498c5170 | 681 | ITC_D(p6, r25, r26) // install updated PTE |
1da177e4 LT |
682 | /* |
683 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | |
684 | * cannot possibly affect the following loads: | |
685 | */ | |
686 | dv_serialize_data | |
687 | ;; | |
688 | ld8 r18=[r17] // read PTE again | |
689 | ;; | |
690 | cmp.eq p6,p7=r18,r25 // is it same as the newly installed | |
691 | ;; | |
692 | (p7) ptc.l r16,r24 | |
693 | mov ar.ccv=r28 | |
694 | #else | |
695 | ;; | |
696 | 1: ld8 r18=[r17] | |
697 | ;; // avoid RAW on r18 | |
698 | or r18=_PAGE_A,r18 // set the accessed bit | |
699 | ;; | |
700 | st8 [r17]=r18 // store back updated PTE | |
749da791 | 701 | ITC_D(p0, r18, r16) // install updated PTE |
1da177e4 LT |
702 | #endif |
703 | mov b0=r29 // restore b0 | |
704 | mov pr=r31,-1 | |
498c5170 | 705 | RFI |
1da177e4 LT |
706 | END(daccess_bit) |
707 | ||
708 | .org ia64_ivt+0x2c00 | |
709 | ///////////////////////////////////////////////////////////////////////////////////////// | |
710 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) | |
711 | ENTRY(break_fault) | |
712 | /* | |
713 | * The streamlined system call entry/exit paths only save/restore the initial part | |
714 | * of pt_regs. This implies that the callers of system-calls must adhere to the | |
715 | * normal procedure calling conventions. | |
716 | * | |
717 | * Registers to be saved & restored: | |
718 | * CR registers: cr.ipsr, cr.iip, cr.ifs | |
719 | * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr | |
720 | * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15 | |
721 | * Registers to be restored only: | |
722 | * r8-r11: output value from the system call. | |
723 | * | |
724 | * During system call exit, scratch registers (including r15) are modified/cleared | |
725 | * to prevent leaking bits from kernel to user level. | |
726 | */ | |
727 | DBG_FAULT(11) | |
f8fa5448 | 728 | mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) |
498c5170 | 729 | MOV_FROM_IPSR(p0, r29) // M2 (12 cyc) |
f8fa5448 | 730 | mov r31=pr // I0 (2 cyc) |
1da177e4 | 731 | |
498c5170 | 732 | MOV_FROM_IIM(r17) // M2 (2 cyc) |
f8fa5448 DMT |
733 | mov.m r27=ar.rsc // M2 (12 cyc) |
734 | mov r18=__IA64_BREAK_SYSCALL // A | |
1da177e4 | 735 | |
f8fa5448 DMT |
736 | mov.m ar.rsc=0 // M2 |
737 | mov.m r21=ar.fpsr // M2 (12 cyc) | |
738 | mov r19=b6 // I0 (2 cyc) | |
1da177e4 | 739 | ;; |
f8fa5448 DMT |
740 | mov.m r23=ar.bspstore // M2 (12 cyc) |
741 | mov.m r24=ar.rnat // M2 (5 cyc) | |
742 | mov.i r26=ar.pfs // I0 (2 cyc) | |
1da177e4 | 743 | |
f8fa5448 DMT |
744 | invala // M0|1 |
745 | nop.m 0 // M | |
746 | mov r20=r1 // A save r1 | |
747 | ||
748 | nop.m 0 | |
749 | movl r30=sys_call_table // X | |
750 | ||
498c5170 | 751 | MOV_FROM_IIP(r28) // M2 (2 cyc) |
f8fa5448 DMT |
752 | cmp.eq p0,p7=r18,r17 // I0 is this a system call? |
753 | (p7) br.cond.spnt non_syscall // B no -> | |
754 | // | |
755 | // From this point on, we are definitely on the syscall-path | |
756 | // and we can use (non-banked) scratch registers. | |
757 | // | |
758 | /////////////////////////////////////////////////////////////////////// | |
759 | mov r1=r16 // A move task-pointer to "addl"-addressable reg | |
760 | mov r2=r16 // A setup r2 for ia64_syscall_setup | |
761 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = ¤t_thread_info()->flags | |
762 | ||
763 | adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 | |
764 | adds r15=-1024,r15 // A subtract 1024 from syscall number | |
1da177e4 LT |
765 | mov r3=NR_syscalls - 1 |
766 | ;; | |
f8fa5448 DMT |
767 | ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag |
768 | ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags | |
769 | extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr | |
1da177e4 | 770 | |
f8fa5448 DMT |
771 | shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024) |
772 | addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS | |
773 | cmp.leu p6,p7=r15,r3 // A syscall number in range? | |
1da177e4 | 774 | ;; |
1da177e4 | 775 | |
f8fa5448 DMT |
776 | lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS |
777 | (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point | |
778 | tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT? | |
779 | ||
780 | mov.m ar.bspstore=r22 // M2 switch to kernel RBS | |
781 | cmp.eq p8,p9=2,r8 // A isr.ei==2? | |
1da177e4 | 782 | ;; |
f8fa5448 DMT |
783 | |
784 | (p8) mov r8=0 // A clear ei to 0 | |
785 | (p7) movl r30=sys_ni_syscall // X | |
786 | ||
787 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle | |
788 | (p9) adds r8=1,r8 // A increment ei to next slot | |
b64f34cd HS |
789 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
790 | ;; | |
791 | mov b6=r30 // I0 setup syscall handler branch reg early | |
792 | #else | |
f8fa5448 | 793 | nop.i 0 |
1da177e4 | 794 | ;; |
b64f34cd | 795 | #endif |
f8fa5448 DMT |
796 | |
797 | mov.m r25=ar.unat // M2 (5 cyc) | |
798 | dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr | |
799 | adds r15=1024,r15 // A restore original syscall number | |
800 | // | |
801 | // If any of the above loads miss in L1D, we'll stall here until | |
802 | // the data arrives. | |
803 | // | |
804 | /////////////////////////////////////////////////////////////////////// | |
805 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag | |
b64f34cd | 806 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
94752a79 | 807 | MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting |
b64f34cd | 808 | #else |
f8fa5448 | 809 | mov b6=r30 // I0 setup syscall handler branch reg early |
b64f34cd | 810 | #endif |
f8fa5448 DMT |
811 | cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? |
812 | ||
813 | and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit | |
814 | mov r18=ar.bsp // M2 (12 cyc) | |
815 | (pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS | |
816 | ;; | |
817 | .back_from_break_fixup: | |
818 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack | |
819 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? | |
820 | br.call.sptk.many b7=ia64_syscall_setup // B | |
821 | 1: | |
b64f34cd HS |
822 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
823 | // mov.m r30=ar.itc is called in advance, and r13 is current | |
824 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A | |
825 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A | |
826 | (pKStk) br.cond.spnt .skip_accounting // B unlikely skip | |
827 | ;; | |
828 | ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp | |
829 | ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave | |
830 | ;; | |
831 | ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime | |
832 | ld8 r21=[r17] // M cumulated utime | |
833 | sub r22=r19,r18 // A stime before leave | |
834 | ;; | |
835 | st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp | |
836 | sub r18=r30,r19 // A elapsed time in user | |
837 | ;; | |
838 | add r20=r20,r22 // A sum stime | |
839 | add r21=r21,r18 // A sum utime | |
840 | ;; | |
841 | st8 [r16]=r20 // M update stime | |
842 | st8 [r17]=r21 // M update utime | |
843 | ;; | |
844 | .skip_accounting: | |
845 | #endif | |
f8fa5448 DMT |
846 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 |
847 | nop 0 | |
498c5170 | 848 | BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1 |
1da177e4 | 849 | ;; |
f8fa5448 | 850 | |
498c5170 IY |
851 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection |
852 | // M0 ensure interruption collection is on | |
f8fa5448 | 853 | movl r3=ia64_ret_from_syscall // X |
1da177e4 | 854 | ;; |
f8fa5448 DMT |
855 | mov rp=r3 // I0 set the real return addr |
856 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT | |
857 | ||
498c5170 | 858 | SSM_PSR_I(p15, p15, r16) // M2 restore psr.i |
f8fa5448 DMT |
859 | (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) |
860 | br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic | |
1da177e4 | 861 | // NOT REACHED |
f8fa5448 DMT |
862 | /////////////////////////////////////////////////////////////////////// |
863 | // On entry, we optimistically assumed that we're coming from user-space. | |
864 | // For the rare cases where a system-call is done from within the kernel, | |
865 | // we fix things up at this point: | |
866 | .break_fixup: | |
867 | add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure | |
868 | mov ar.rnat=r24 // M2 restore kernel's AR.RNAT | |
869 | ;; | |
870 | mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE | |
871 | br.cond.sptk .back_from_break_fixup | |
1da177e4 LT |
872 | END(break_fault) |
873 | ||
874 | .org ia64_ivt+0x3000 | |
875 | ///////////////////////////////////////////////////////////////////////////////////////// | |
876 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) | |
877 | ENTRY(interrupt) | |
4d58bbcc IY |
878 | /* interrupt handler has become too big to fit this area. */ |
879 | br.sptk.many __interrupt | |
1da177e4 LT |
880 | END(interrupt) |
881 | ||
882 | .org ia64_ivt+0x3400 | |
883 | ///////////////////////////////////////////////////////////////////////////////////////// | |
884 | // 0x3400 Entry 13 (size 64 bundles) Reserved | |
885 | DBG_FAULT(13) | |
886 | FAULT(13) | |
887 | ||
888 | .org ia64_ivt+0x3800 | |
889 | ///////////////////////////////////////////////////////////////////////////////////////// | |
890 | // 0x3800 Entry 14 (size 64 bundles) Reserved | |
891 | DBG_FAULT(14) | |
892 | FAULT(14) | |
893 | ||
894 | /* | |
895 | * There is no particular reason for this code to be here, other than that | |
896 | * there happens to be space here that would go unused otherwise. If this | |
897 | * fault ever gets "unreserved", simply moved the following code to a more | |
898 | * suitable spot... | |
899 | * | |
900 | * ia64_syscall_setup() is a separate subroutine so that it can | |
901 | * allocate stacked registers so it can safely demine any | |
902 | * potential NaT values from the input registers. | |
903 | * | |
904 | * On entry: | |
905 | * - executing on bank 0 or bank 1 register set (doesn't matter) | |
906 | * - r1: stack pointer | |
907 | * - r2: current task pointer | |
908 | * - r3: preserved | |
909 | * - r11: original contents (saved ar.pfs to be saved) | |
910 | * - r12: original contents (sp to be saved) | |
911 | * - r13: original contents (tp to be saved) | |
912 | * - r15: original contents (syscall # to be saved) | |
913 | * - r18: saved bsp (after switching to kernel stack) | |
914 | * - r19: saved b6 | |
915 | * - r20: saved r1 (gp) | |
916 | * - r21: saved ar.fpsr | |
917 | * - r22: kernel's register backing store base (krbs_base) | |
918 | * - r23: saved ar.bspstore | |
919 | * - r24: saved ar.rnat | |
920 | * - r25: saved ar.unat | |
921 | * - r26: saved ar.pfs | |
922 | * - r27: saved ar.rsc | |
923 | * - r28: saved cr.iip | |
924 | * - r29: saved cr.ipsr | |
b64f34cd | 925 | * - r30: ar.itc for accounting (don't touch) |
1da177e4 LT |
926 | * - r31: saved pr |
927 | * - b0: original contents (to be saved) | |
928 | * On exit: | |
1da177e4 LT |
929 | * - p10: TRUE if syscall is invoked with more than 8 out |
930 | * registers or r15's Nat is true | |
931 | * - r1: kernel's gp | |
932 | * - r3: preserved (same as on entry) | |
933 | * - r8: -EINVAL if p10 is true | |
934 | * - r12: points to kernel stack | |
935 | * - r13: points to current task | |
f8fa5448 DMT |
936 | * - r14: preserved (same as on entry) |
937 | * - p13: preserved | |
1da177e4 LT |
938 | * - p15: TRUE if interrupts need to be re-enabled |
939 | * - ar.fpsr: set to kernel settings | |
f8fa5448 | 940 | * - b6: preserved (same as on entry) |
1da177e4 | 941 | */ |
498c5170 | 942 | #ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE |
1da177e4 LT |
943 | GLOBAL_ENTRY(ia64_syscall_setup) |
944 | #if PT(B6) != 0 | |
945 | # error This code assumes that b6 is the first field in pt_regs. | |
946 | #endif | |
947 | st8 [r1]=r19 // save b6 | |
948 | add r16=PT(CR_IPSR),r1 // initialize first base pointer | |
949 | add r17=PT(R11),r1 // initialize second base pointer | |
950 | ;; | |
951 | alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable | |
952 | st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr | |
953 | tnat.nz p8,p0=in0 | |
954 | ||
955 | st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11 | |
956 | tnat.nz p9,p0=in1 | |
957 | (pKStk) mov r18=r0 // make sure r18 isn't NaT | |
958 | ;; | |
959 | ||
960 | st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs | |
961 | st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip | |
962 | mov r28=b0 // save b0 (2 cyc) | |
963 | ;; | |
964 | ||
965 | st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat | |
966 | dep r19=0,r19,38,26 // clear all bits but 0..37 [I0] | |
967 | (p8) mov in0=-1 | |
968 | ;; | |
969 | ||
970 | st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs | |
971 | extr.u r11=r19,7,7 // I0 // get sol of ar.pfs | |
972 | and r8=0x7f,r19 // A // get sof of ar.pfs | |
973 | ||
974 | st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc | |
975 | tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0 | |
976 | (p9) mov in1=-1 | |
977 | ;; | |
978 | ||
979 | (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8 | |
980 | tnat.nz p10,p0=in2 | |
981 | add r11=8,r11 | |
982 | ;; | |
983 | (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field | |
984 | (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field | |
985 | tnat.nz p11,p0=in3 | |
986 | ;; | |
987 | (p10) mov in2=-1 | |
988 | tnat.nz p12,p0=in4 // [I0] | |
989 | (p11) mov in3=-1 | |
990 | ;; | |
991 | (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat | |
992 | (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore | |
993 | shl r18=r18,16 // compute ar.rsc to be used for "loadrs" | |
994 | ;; | |
995 | st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates | |
996 | st8 [r17]=r28,PT(R1)-PT(B0) // save b0 | |
997 | tnat.nz p13,p0=in5 // [I0] | |
998 | ;; | |
999 | st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs" | |
1000 | st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1 | |
1001 | (p12) mov in4=-1 | |
1002 | ;; | |
1003 | ||
1004 | .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12 | |
1005 | .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13 | |
1006 | (p13) mov in5=-1 | |
1007 | ;; | |
1008 | st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr | |
f8fa5448 | 1009 | tnat.nz p13,p0=in6 |
1da177e4 LT |
1010 | cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 |
1011 | ;; | |
060561ff | 1012 | mov r8=1 |
1da177e4 LT |
1013 | (p9) tnat.nz p10,p0=r15 |
1014 | adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) | |
1015 | ||
1016 | st8.spill [r17]=r15 // save r15 | |
1017 | tnat.nz p8,p0=in7 | |
1018 | nop.i 0 | |
1019 | ||
1020 | mov r13=r2 // establish `current' | |
1021 | movl r1=__gp // establish kernel global pointer | |
1022 | ;; | |
060561ff | 1023 | st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error) |
f8fa5448 | 1024 | (p13) mov in6=-1 |
1da177e4 | 1025 | (p8) mov in7=-1 |
1da177e4 LT |
1026 | |
1027 | cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 | |
1028 | movl r17=FPSR_DEFAULT | |
1029 | ;; | |
1030 | mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value | |
1031 | (p10) mov r8=-EINVAL | |
1032 | br.ret.sptk.many b7 | |
1033 | END(ia64_syscall_setup) | |
498c5170 | 1034 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ |
1da177e4 LT |
1035 | |
1036 | .org ia64_ivt+0x3c00 | |
1037 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1038 | // 0x3c00 Entry 15 (size 64 bundles) Reserved | |
1039 | DBG_FAULT(15) | |
1040 | FAULT(15) | |
1041 | ||
1da177e4 LT |
1042 | .org ia64_ivt+0x4000 |
1043 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1044 | // 0x4000 Entry 16 (size 64 bundles) Reserved | |
1045 | DBG_FAULT(16) | |
1046 | FAULT(16) | |
1047 | ||
498c5170 | 1048 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) |
b64f34cd HS |
1049 | /* |
1050 | * There is no particular reason for this code to be here, other than | |
1051 | * that there happens to be space here that would go unused otherwise. | |
1052 | * If this fault ever gets "unreserved", simply moved the following | |
1053 | * code to a more suitable spot... | |
1054 | * | |
1055 | * account_sys_enter is called from SAVE_MIN* macros if accounting is | |
1056 | * enabled and if the macro is entered from user mode. | |
1057 | */ | |
498c5170 | 1058 | GLOBAL_ENTRY(account_sys_enter) |
b64f34cd HS |
1059 | // mov.m r20=ar.itc is called in advance, and r13 is current |
1060 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 | |
1061 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 | |
1062 | ;; | |
1063 | ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel | |
1064 | ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel | |
1065 | ;; | |
1066 | ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime | |
1067 | ld8 r21=[r17] // cumulated utime | |
1068 | sub r22=r19,r18 // stime before leave kernel | |
1069 | ;; | |
1070 | st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp | |
1071 | sub r18=r20,r19 // elapsed time in user mode | |
1072 | ;; | |
1073 | add r23=r23,r22 // sum stime | |
1074 | add r21=r21,r18 // sum utime | |
1075 | ;; | |
1076 | st8 [r16]=r23 // update stime | |
1077 | st8 [r17]=r21 // update utime | |
1078 | ;; | |
1079 | br.ret.sptk.many rp | |
1080 | END(account_sys_enter) | |
1081 | #endif | |
1082 | ||
1da177e4 LT |
1083 | .org ia64_ivt+0x4400 |
1084 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1085 | // 0x4400 Entry 17 (size 64 bundles) Reserved | |
1086 | DBG_FAULT(17) | |
1087 | FAULT(17) | |
1088 | ||
1da177e4 LT |
1089 | .org ia64_ivt+0x4800 |
1090 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1091 | // 0x4800 Entry 18 (size 64 bundles) Reserved | |
1092 | DBG_FAULT(18) | |
1093 | FAULT(18) | |
1094 | ||
1da177e4 LT |
1095 | .org ia64_ivt+0x4c00 |
1096 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1097 | // 0x4c00 Entry 19 (size 64 bundles) Reserved | |
1098 | DBG_FAULT(19) | |
1099 | FAULT(19) | |
1100 | ||
1da177e4 LT |
1101 | // |
1102 | // --- End of long entries, Beginning of short entries | |
1103 | // | |
1104 | ||
1105 | .org ia64_ivt+0x5000 | |
1106 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1107 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) | |
1108 | ENTRY(page_not_present) | |
1109 | DBG_FAULT(20) | |
498c5170 IY |
1110 | MOV_FROM_IFA(r16) |
1111 | RSM_PSR_DT | |
1da177e4 LT |
1112 | /* |
1113 | * The Linux page fault handler doesn't expect non-present pages to be in | |
1114 | * the TLB. Flush the existing entry now, so we meet that expectation. | |
1115 | */ | |
1116 | mov r17=PAGE_SHIFT<<2 | |
1117 | ;; | |
1118 | ptc.l r16,r17 | |
1119 | ;; | |
1120 | mov r31=pr | |
1121 | srlz.d | |
1122 | br.sptk.many page_fault | |
1123 | END(page_not_present) | |
1124 | ||
1125 | .org ia64_ivt+0x5100 | |
1126 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1127 | // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) | |
1128 | ENTRY(key_permission) | |
1129 | DBG_FAULT(21) | |
498c5170 IY |
1130 | MOV_FROM_IFA(r16) |
1131 | RSM_PSR_DT | |
1da177e4 LT |
1132 | mov r31=pr |
1133 | ;; | |
1134 | srlz.d | |
1135 | br.sptk.many page_fault | |
1136 | END(key_permission) | |
1137 | ||
1138 | .org ia64_ivt+0x5200 | |
1139 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1140 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) | |
1141 | ENTRY(iaccess_rights) | |
1142 | DBG_FAULT(22) | |
498c5170 IY |
1143 | MOV_FROM_IFA(r16) |
1144 | RSM_PSR_DT | |
1da177e4 LT |
1145 | mov r31=pr |
1146 | ;; | |
1147 | srlz.d | |
1148 | br.sptk.many page_fault | |
1149 | END(iaccess_rights) | |
1150 | ||
1151 | .org ia64_ivt+0x5300 | |
1152 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1153 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) | |
1154 | ENTRY(daccess_rights) | |
1155 | DBG_FAULT(23) | |
498c5170 IY |
1156 | MOV_FROM_IFA(r16) |
1157 | RSM_PSR_DT | |
1da177e4 LT |
1158 | mov r31=pr |
1159 | ;; | |
1160 | srlz.d | |
1161 | br.sptk.many page_fault | |
1162 | END(daccess_rights) | |
1163 | ||
1164 | .org ia64_ivt+0x5400 | |
1165 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1166 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) | |
1167 | ENTRY(general_exception) | |
1168 | DBG_FAULT(24) | |
498c5170 | 1169 | MOV_FROM_ISR(r16) |
1da177e4 LT |
1170 | mov r31=pr |
1171 | ;; | |
1172 | cmp4.eq p6,p0=0,r16 | |
1173 | (p6) br.sptk.many dispatch_illegal_op_fault | |
1174 | ;; | |
1175 | mov r19=24 // fault number | |
1176 | br.sptk.many dispatch_to_fault_handler | |
1177 | END(general_exception) | |
1178 | ||
1179 | .org ia64_ivt+0x5500 | |
1180 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1181 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) | |
1182 | ENTRY(disabled_fp_reg) | |
1183 | DBG_FAULT(25) | |
1184 | rsm psr.dfh // ensure we can access fph | |
1185 | ;; | |
1186 | srlz.d | |
1187 | mov r31=pr | |
1188 | mov r19=25 | |
1189 | br.sptk.many dispatch_to_fault_handler | |
1190 | END(disabled_fp_reg) | |
1191 | ||
1192 | .org ia64_ivt+0x5600 | |
1193 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1194 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) | |
1195 | ENTRY(nat_consumption) | |
1196 | DBG_FAULT(26) | |
458f9355 | 1197 | |
498c5170 IY |
1198 | MOV_FROM_IPSR(p0, r16) |
1199 | MOV_FROM_ISR(r17) | |
458f9355 DMT |
1200 | mov r31=pr // save PR |
1201 | ;; | |
1202 | and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} | |
1203 | tbit.z p6,p0=r17,IA64_ISR_NA_BIT | |
1204 | ;; | |
1205 | cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18 | |
1206 | dep r16=-1,r16,IA64_PSR_ED_BIT,1 | |
1207 | (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) | |
1208 | ;; | |
498c5170 | 1209 | MOV_TO_IPSR(p0, r16, r18) |
458f9355 DMT |
1210 | mov pr=r31,-1 |
1211 | ;; | |
498c5170 | 1212 | RFI |
458f9355 DMT |
1213 | |
1214 | 1: mov pr=r31,-1 | |
1215 | ;; | |
1da177e4 LT |
1216 | FAULT(26) |
1217 | END(nat_consumption) | |
1218 | ||
1219 | .org ia64_ivt+0x5700 | |
1220 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1221 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) | |
1222 | ENTRY(speculation_vector) | |
1223 | DBG_FAULT(27) | |
1224 | /* | |
1225 | * A [f]chk.[as] instruction needs to take the branch to the recovery code but | |
1226 | * this part of the architecture is not implemented in hardware on some CPUs, such | |
1227 | * as Itanium. Thus, in general we need to emulate the behavior. IIM contains | |
1228 | * the relative target (not yet sign extended). So after sign extending it we | |
1229 | * simply add it to IIP. We also need to reset the EI field of the IPSR to zero, | |
1230 | * i.e., the slot to restart into. | |
1231 | * | |
1232 | * cr.imm contains zero_ext(imm21) | |
1233 | */ | |
498c5170 | 1234 | MOV_FROM_IIM(r18) |
1da177e4 | 1235 | ;; |
498c5170 | 1236 | MOV_FROM_IIP(r17) |
1da177e4 LT |
1237 | shl r18=r18,43 // put sign bit in position (43=64-21) |
1238 | ;; | |
1239 | ||
498c5170 | 1240 | MOV_FROM_IPSR(p0, r16) |
1da177e4 LT |
1241 | shr r18=r18,39 // sign extend (39=43-4) |
1242 | ;; | |
1243 | ||
1244 | add r17=r17,r18 // now add the offset | |
1245 | ;; | |
9b3cbf72 | 1246 | MOV_TO_IIP(r17, r19) |
1da177e4 LT |
1247 | dep r16=0,r16,41,2 // clear EI |
1248 | ;; | |
1249 | ||
9b3cbf72 | 1250 | MOV_TO_IPSR(p0, r16, r19) |
1da177e4 LT |
1251 | ;; |
1252 | ||
498c5170 | 1253 | RFI |
1da177e4 LT |
1254 | END(speculation_vector) |
1255 | ||
1256 | .org ia64_ivt+0x5800 | |
1257 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1258 | // 0x5800 Entry 28 (size 16 bundles) Reserved | |
1259 | DBG_FAULT(28) | |
1260 | FAULT(28) | |
1261 | ||
1262 | .org ia64_ivt+0x5900 | |
1263 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1264 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) | |
1265 | ENTRY(debug_vector) | |
1266 | DBG_FAULT(29) | |
1267 | FAULT(29) | |
1268 | END(debug_vector) | |
1269 | ||
1270 | .org ia64_ivt+0x5a00 | |
1271 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1272 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) | |
1273 | ENTRY(unaligned_access) | |
1274 | DBG_FAULT(30) | |
1da177e4 LT |
1275 | mov r31=pr // prepare to save predicates |
1276 | ;; | |
1277 | br.sptk.many dispatch_unaligned_handler | |
1278 | END(unaligned_access) | |
1279 | ||
1280 | .org ia64_ivt+0x5b00 | |
1281 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1282 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) | |
1283 | ENTRY(unsupported_data_reference) | |
1284 | DBG_FAULT(31) | |
1285 | FAULT(31) | |
1286 | END(unsupported_data_reference) | |
1287 | ||
1288 | .org ia64_ivt+0x5c00 | |
1289 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1290 | // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) | |
1291 | ENTRY(floating_point_fault) | |
1292 | DBG_FAULT(32) | |
1293 | FAULT(32) | |
1294 | END(floating_point_fault) | |
1295 | ||
1296 | .org ia64_ivt+0x5d00 | |
1297 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1298 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) | |
1299 | ENTRY(floating_point_trap) | |
1300 | DBG_FAULT(33) | |
1301 | FAULT(33) | |
1302 | END(floating_point_trap) | |
1303 | ||
1304 | .org ia64_ivt+0x5e00 | |
1305 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1306 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) | |
1307 | ENTRY(lower_privilege_trap) | |
1308 | DBG_FAULT(34) | |
1309 | FAULT(34) | |
1310 | END(lower_privilege_trap) | |
1311 | ||
1312 | .org ia64_ivt+0x5f00 | |
1313 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1314 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) | |
1315 | ENTRY(taken_branch_trap) | |
1316 | DBG_FAULT(35) | |
1317 | FAULT(35) | |
1318 | END(taken_branch_trap) | |
1319 | ||
1320 | .org ia64_ivt+0x6000 | |
1321 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1322 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) | |
1323 | ENTRY(single_step_trap) | |
1324 | DBG_FAULT(36) | |
1325 | FAULT(36) | |
1326 | END(single_step_trap) | |
1327 | ||
1328 | .org ia64_ivt+0x6100 | |
1329 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1330 | // 0x6100 Entry 37 (size 16 bundles) Reserved | |
1331 | DBG_FAULT(37) | |
1332 | FAULT(37) | |
1333 | ||
1334 | .org ia64_ivt+0x6200 | |
1335 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1336 | // 0x6200 Entry 38 (size 16 bundles) Reserved | |
1337 | DBG_FAULT(38) | |
1338 | FAULT(38) | |
1339 | ||
1340 | .org ia64_ivt+0x6300 | |
1341 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1342 | // 0x6300 Entry 39 (size 16 bundles) Reserved | |
1343 | DBG_FAULT(39) | |
1344 | FAULT(39) | |
1345 | ||
1346 | .org ia64_ivt+0x6400 | |
1347 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1348 | // 0x6400 Entry 40 (size 16 bundles) Reserved | |
1349 | DBG_FAULT(40) | |
1350 | FAULT(40) | |
1351 | ||
1352 | .org ia64_ivt+0x6500 | |
1353 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1354 | // 0x6500 Entry 41 (size 16 bundles) Reserved | |
1355 | DBG_FAULT(41) | |
1356 | FAULT(41) | |
1357 | ||
1358 | .org ia64_ivt+0x6600 | |
1359 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1360 | // 0x6600 Entry 42 (size 16 bundles) Reserved | |
1361 | DBG_FAULT(42) | |
1362 | FAULT(42) | |
1363 | ||
1364 | .org ia64_ivt+0x6700 | |
1365 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1366 | // 0x6700 Entry 43 (size 16 bundles) Reserved | |
1367 | DBG_FAULT(43) | |
1368 | FAULT(43) | |
1369 | ||
1370 | .org ia64_ivt+0x6800 | |
1371 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1372 | // 0x6800 Entry 44 (size 16 bundles) Reserved | |
1373 | DBG_FAULT(44) | |
1374 | FAULT(44) | |
1375 | ||
1376 | .org ia64_ivt+0x6900 | |
1377 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1378 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) | |
1379 | ENTRY(ia32_exception) | |
1380 | DBG_FAULT(45) | |
1381 | FAULT(45) | |
1382 | END(ia32_exception) | |
1383 | ||
1384 | .org ia64_ivt+0x6a00 | |
1385 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1386 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) | |
1387 | ENTRY(ia32_intercept) | |
1388 | DBG_FAULT(46) | |
1389 | #ifdef CONFIG_IA32_SUPPORT | |
1390 | mov r31=pr | |
498c5170 | 1391 | MOV_FROM_ISR(r16) |
1da177e4 LT |
1392 | ;; |
1393 | extr.u r17=r16,16,8 // get ISR.code | |
1394 | mov r18=ar.eflag | |
498c5170 | 1395 | MOV_FROM_IIM(r19) // old eflag value |
1da177e4 LT |
1396 | ;; |
1397 | cmp.ne p6,p0=2,r17 | |
1398 | (p6) br.cond.spnt 1f // not a system flag fault | |
1399 | xor r16=r18,r19 | |
1400 | ;; | |
1401 | extr.u r17=r16,18,1 // get the eflags.ac bit | |
1402 | ;; | |
1403 | cmp.eq p6,p0=0,r17 | |
1404 | (p6) br.cond.spnt 1f // eflags.ac bit didn't change | |
1405 | ;; | |
1406 | mov pr=r31,-1 // restore predicate registers | |
498c5170 | 1407 | RFI |
1da177e4 LT |
1408 | |
1409 | 1: | |
1410 | #endif // CONFIG_IA32_SUPPORT | |
1411 | FAULT(46) | |
1412 | END(ia32_intercept) | |
1413 | ||
1414 | .org ia64_ivt+0x6b00 | |
1415 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1416 | // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) | |
1417 | ENTRY(ia32_interrupt) | |
1418 | DBG_FAULT(47) | |
1419 | #ifdef CONFIG_IA32_SUPPORT | |
1420 | mov r31=pr | |
1421 | br.sptk.many dispatch_to_ia32_handler | |
1422 | #else | |
1423 | FAULT(47) | |
1424 | #endif | |
1425 | END(ia32_interrupt) | |
1426 | ||
1427 | .org ia64_ivt+0x6c00 | |
1428 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1429 | // 0x6c00 Entry 48 (size 16 bundles) Reserved | |
1430 | DBG_FAULT(48) | |
1431 | FAULT(48) | |
1432 | ||
1433 | .org ia64_ivt+0x6d00 | |
1434 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1435 | // 0x6d00 Entry 49 (size 16 bundles) Reserved | |
1436 | DBG_FAULT(49) | |
1437 | FAULT(49) | |
1438 | ||
1439 | .org ia64_ivt+0x6e00 | |
1440 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1441 | // 0x6e00 Entry 50 (size 16 bundles) Reserved | |
1442 | DBG_FAULT(50) | |
1443 | FAULT(50) | |
1444 | ||
1445 | .org ia64_ivt+0x6f00 | |
1446 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1447 | // 0x6f00 Entry 51 (size 16 bundles) Reserved | |
1448 | DBG_FAULT(51) | |
1449 | FAULT(51) | |
1450 | ||
1451 | .org ia64_ivt+0x7000 | |
1452 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1453 | // 0x7000 Entry 52 (size 16 bundles) Reserved | |
1454 | DBG_FAULT(52) | |
1455 | FAULT(52) | |
1456 | ||
1457 | .org ia64_ivt+0x7100 | |
1458 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1459 | // 0x7100 Entry 53 (size 16 bundles) Reserved | |
1460 | DBG_FAULT(53) | |
1461 | FAULT(53) | |
1462 | ||
1463 | .org ia64_ivt+0x7200 | |
1464 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1465 | // 0x7200 Entry 54 (size 16 bundles) Reserved | |
1466 | DBG_FAULT(54) | |
1467 | FAULT(54) | |
1468 | ||
1469 | .org ia64_ivt+0x7300 | |
1470 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1471 | // 0x7300 Entry 55 (size 16 bundles) Reserved | |
1472 | DBG_FAULT(55) | |
1473 | FAULT(55) | |
1474 | ||
1475 | .org ia64_ivt+0x7400 | |
1476 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1477 | // 0x7400 Entry 56 (size 16 bundles) Reserved | |
1478 | DBG_FAULT(56) | |
1479 | FAULT(56) | |
1480 | ||
1481 | .org ia64_ivt+0x7500 | |
1482 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1483 | // 0x7500 Entry 57 (size 16 bundles) Reserved | |
1484 | DBG_FAULT(57) | |
1485 | FAULT(57) | |
1486 | ||
1487 | .org ia64_ivt+0x7600 | |
1488 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1489 | // 0x7600 Entry 58 (size 16 bundles) Reserved | |
1490 | DBG_FAULT(58) | |
1491 | FAULT(58) | |
1492 | ||
1493 | .org ia64_ivt+0x7700 | |
1494 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1495 | // 0x7700 Entry 59 (size 16 bundles) Reserved | |
1496 | DBG_FAULT(59) | |
1497 | FAULT(59) | |
1498 | ||
1499 | .org ia64_ivt+0x7800 | |
1500 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1501 | // 0x7800 Entry 60 (size 16 bundles) Reserved | |
1502 | DBG_FAULT(60) | |
1503 | FAULT(60) | |
1504 | ||
1505 | .org ia64_ivt+0x7900 | |
1506 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1507 | // 0x7900 Entry 61 (size 16 bundles) Reserved | |
1508 | DBG_FAULT(61) | |
1509 | FAULT(61) | |
1510 | ||
1511 | .org ia64_ivt+0x7a00 | |
1512 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1513 | // 0x7a00 Entry 62 (size 16 bundles) Reserved | |
1514 | DBG_FAULT(62) | |
1515 | FAULT(62) | |
1516 | ||
1517 | .org ia64_ivt+0x7b00 | |
1518 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1519 | // 0x7b00 Entry 63 (size 16 bundles) Reserved | |
1520 | DBG_FAULT(63) | |
1521 | FAULT(63) | |
1522 | ||
1523 | .org ia64_ivt+0x7c00 | |
1524 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1525 | // 0x7c00 Entry 64 (size 16 bundles) Reserved | |
1526 | DBG_FAULT(64) | |
1527 | FAULT(64) | |
1528 | ||
1529 | .org ia64_ivt+0x7d00 | |
1530 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1531 | // 0x7d00 Entry 65 (size 16 bundles) Reserved | |
1532 | DBG_FAULT(65) | |
1533 | FAULT(65) | |
1534 | ||
1535 | .org ia64_ivt+0x7e00 | |
1536 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1537 | // 0x7e00 Entry 66 (size 16 bundles) Reserved | |
1538 | DBG_FAULT(66) | |
1539 | FAULT(66) | |
1540 | ||
1541 | .org ia64_ivt+0x7f00 | |
1542 | ///////////////////////////////////////////////////////////////////////////////////////// | |
1543 | // 0x7f00 Entry 67 (size 16 bundles) Reserved | |
1544 | DBG_FAULT(67) | |
1545 | FAULT(67) | |
1546 | ||
4d58bbcc IY |
1547 | //----------------------------------------------------------------------------------- |
1548 | // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) | |
1549 | ENTRY(page_fault) | |
1550 | SSM_PSR_DT_AND_SRLZ_I | |
1551 | ;; | |
1552 | SAVE_MIN_WITH_COVER | |
1553 | alloc r15=ar.pfs,0,0,3,0 | |
1554 | MOV_FROM_IFA(out0) | |
1555 | MOV_FROM_ISR(out1) | |
1556 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3) | |
1557 | adds r3=8,r2 // set up second base pointer | |
1558 | SSM_PSR_I(p15, p15, r14) // restore psr.i | |
1559 | movl r14=ia64_leave_kernel | |
1560 | ;; | |
1561 | SAVE_REST | |
1562 | mov rp=r14 | |
1563 | ;; | |
1564 | adds out2=16,r12 // out2 = pointer to pt_regs | |
1565 | br.call.sptk.many b6=ia64_do_page_fault // ignore return address | |
1566 | END(page_fault) | |
1567 | ||
1568 | ENTRY(non_syscall) | |
1569 | mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER | |
1570 | ;; | |
1571 | SAVE_MIN_WITH_COVER | |
1572 | ||
1573 | // There is no particular reason for this code to be here, other than that | |
1574 | // there happens to be space here that would go unused otherwise. If this | |
1575 | // fault ever gets "unreserved", simply moved the following code to a more | |
1576 | // suitable spot... | |
1577 | ||
1578 | alloc r14=ar.pfs,0,0,2,0 | |
1579 | MOV_FROM_IIM(out0) | |
1580 | add out1=16,sp | |
1581 | adds r3=8,r2 // set up second base pointer for SAVE_REST | |
1582 | ||
1583 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24) | |
1584 | // guarantee that interruption collection is on | |
1585 | SSM_PSR_I(p15, p15, r15) // restore psr.i | |
1586 | movl r15=ia64_leave_kernel | |
1587 | ;; | |
1588 | SAVE_REST | |
1589 | mov rp=r15 | |
1590 | ;; | |
1591 | br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr | |
1592 | END(non_syscall) | |
1593 | ||
1594 | ENTRY(__interrupt) | |
1595 | DBG_FAULT(12) | |
1596 | mov r31=pr // prepare to save predicates | |
1597 | ;; | |
1598 | SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 | |
1599 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14) | |
1600 | // ensure everybody knows psr.ic is back on | |
1601 | adds r3=8,r2 // set up second base pointer for SAVE_REST | |
1602 | ;; | |
1603 | SAVE_REST | |
1604 | ;; | |
1605 | MCA_RECOVER_RANGE(interrupt) | |
1606 | alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group | |
1607 | MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg | |
1608 | add out1=16,sp // pass pointer to pt_regs as second arg | |
1609 | ;; | |
1610 | srlz.d // make sure we see the effect of cr.ivr | |
1611 | movl r14=ia64_leave_kernel | |
1612 | ;; | |
1613 | mov rp=r14 | |
1614 | br.call.sptk.many b6=ia64_handle_irq | |
1615 | END(__interrupt) | |
1616 | ||
1617 | /* | |
1618 | * There is no particular reason for this code to be here, other than that | |
1619 | * there happens to be space here that would go unused otherwise. If this | |
1620 | * fault ever gets "unreserved", simply moved the following code to a more | |
1621 | * suitable spot... | |
1622 | */ | |
1623 | ||
1624 | ENTRY(dispatch_unaligned_handler) | |
1625 | SAVE_MIN_WITH_COVER | |
1626 | ;; | |
1627 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | |
1628 | MOV_FROM_IFA(out0) | |
1629 | adds out1=16,sp | |
1630 | ||
1631 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) | |
1632 | // guarantee that interruption collection is on | |
1633 | SSM_PSR_I(p15, p15, r3) // restore psr.i | |
1634 | adds r3=8,r2 // set up second base pointer | |
1635 | ;; | |
1636 | SAVE_REST | |
1637 | movl r14=ia64_leave_kernel | |
1638 | ;; | |
1639 | mov rp=r14 | |
1640 | br.sptk.many ia64_prepare_handle_unaligned | |
1641 | END(dispatch_unaligned_handler) | |
1642 | ||
1643 | /* | |
1644 | * There is no particular reason for this code to be here, other than that | |
1645 | * there happens to be space here that would go unused otherwise. If this | |
1646 | * fault ever gets "unreserved", simply moved the following code to a more | |
1647 | * suitable spot... | |
1648 | */ | |
1649 | ||
1650 | ENTRY(dispatch_to_fault_handler) | |
1651 | /* | |
1652 | * Input: | |
1653 | * psr.ic: off | |
1654 | * r19: fault vector number (e.g., 24 for General Exception) | |
1655 | * r31: contains saved predicates (pr) | |
1656 | */ | |
1657 | SAVE_MIN_WITH_COVER_R19 | |
1658 | alloc r14=ar.pfs,0,0,5,0 | |
1659 | MOV_FROM_ISR(out1) | |
1660 | MOV_FROM_IFA(out2) | |
1661 | MOV_FROM_IIM(out3) | |
1662 | MOV_FROM_ITIR(out4) | |
1663 | ;; | |
1664 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0) | |
1665 | // guarantee that interruption collection is on | |
1666 | mov out0=r15 | |
1667 | ;; | |
1668 | SSM_PSR_I(p15, p15, r3) // restore psr.i | |
1669 | adds r3=8,r2 // set up second base pointer for SAVE_REST | |
1670 | ;; | |
1671 | SAVE_REST | |
1672 | movl r14=ia64_leave_kernel | |
1673 | ;; | |
1674 | mov rp=r14 | |
1675 | br.call.sptk.many b6=ia64_fault | |
1676 | END(dispatch_to_fault_handler) | |
1677 | ||
4dcc29e1 TL |
1678 | /* |
1679 | * Squatting in this space ... | |
1680 | * | |
1681 | * This special case dispatcher for illegal operation faults allows preserved | |
1682 | * registers to be modified through a callback function (asm only) that is handed | |
1683 | * back from the fault handler in r8. Up to three arguments can be passed to the | |
1684 | * callback function by returning an aggregate with the callback as its first | |
1685 | * element, followed by the arguments. | |
1686 | */ | |
1687 | ENTRY(dispatch_illegal_op_fault) | |
1688 | .prologue | |
1689 | .body | |
1690 | SAVE_MIN_WITH_COVER | |
498c5170 IY |
1691 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) |
1692 | // guarantee that interruption collection is on | |
4dcc29e1 | 1693 | ;; |
498c5170 | 1694 | SSM_PSR_I(p15, p15, r3) // restore psr.i |
4dcc29e1 TL |
1695 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
1696 | ;; | |
1697 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group | |
1698 | mov out0=ar.ec | |
1699 | ;; | |
1700 | SAVE_REST | |
1701 | PT_REGS_UNWIND_INFO(0) | |
1702 | ;; | |
1703 | br.call.sptk.many rp=ia64_illegal_op_fault | |
1704 | .ret0: ;; | |
1705 | alloc r14=ar.pfs,0,0,3,0 // must be first in insn group | |
1706 | mov out0=r9 | |
1707 | mov out1=r10 | |
1708 | mov out2=r11 | |
1709 | movl r15=ia64_leave_kernel | |
1710 | ;; | |
1711 | mov rp=r15 | |
1712 | mov b6=r8 | |
1713 | ;; | |
1714 | cmp.ne p6,p0=0,r8 | |
1715 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel | |
1716 | br.sptk.many ia64_leave_kernel | |
1717 | END(dispatch_illegal_op_fault) | |
1718 | ||
1da177e4 LT |
1719 | #ifdef CONFIG_IA32_SUPPORT |
1720 | ||
1721 | /* | |
1722 | * There is no particular reason for this code to be here, other than that | |
1723 | * there happens to be space here that would go unused otherwise. If this | |
1724 | * fault ever gets "unreserved", simply moved the following code to a more | |
1725 | * suitable spot... | |
1726 | */ | |
1727 | ||
1728 | // IA32 interrupt entry point | |
1729 | ||
1730 | ENTRY(dispatch_to_ia32_handler) | |
1731 | SAVE_MIN | |
1732 | ;; | |
498c5170 IY |
1733 | MOV_FROM_ISR(r14) |
1734 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) | |
1735 | // guarantee that interruption collection is on | |
1da177e4 | 1736 | ;; |
498c5170 | 1737 | SSM_PSR_I(p15, p15, r3) |
1da177e4 LT |
1738 | adds r3=8,r2 // Base pointer for SAVE_REST |
1739 | ;; | |
1740 | SAVE_REST | |
1741 | ;; | |
1742 | mov r15=0x80 | |
1743 | shr r14=r14,16 // Get interrupt number | |
1744 | ;; | |
1745 | cmp.ne p6,p0=r14,r15 | |
1746 | (p6) br.call.dpnt.many b6=non_ia32_syscall | |
1747 | ||
1748 | adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions | |
1749 | adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp | |
1750 | ;; | |
1751 | cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 | |
1752 | ld8 r8=[r14] // get r8 | |
1753 | ;; | |
1754 | st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP) | |
1755 | ;; | |
1756 | alloc r15=ar.pfs,0,0,6,0 // must first in an insn group | |
1757 | ;; | |
1758 | ld4 r8=[r14],8 // r8 == eax (syscall number) | |
1759 | mov r15=IA32_NR_syscalls | |
1760 | ;; | |
1761 | cmp.ltu.unc p6,p7=r8,r15 | |
1762 | ld4 out1=[r14],8 // r9 == ecx | |
1763 | ;; | |
1764 | ld4 out2=[r14],8 // r10 == edx | |
1765 | ;; | |
1766 | ld4 out0=[r14] // r11 == ebx | |
1767 | adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp | |
1768 | ;; | |
1769 | ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp | |
1770 | ;; | |
1771 | ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi | |
1772 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 | |
1773 | ;; | |
1774 | ld4 out4=[r14] // r15 == edi | |
1775 | movl r16=ia32_syscall_table | |
1776 | ;; | |
1777 | (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number | |
1778 | ld4 r2=[r2] // r2 = current_thread_info()->flags | |
1779 | ;; | |
1780 | ld8 r16=[r16] | |
1781 | and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit | |
1782 | ;; | |
1783 | mov b6=r16 | |
1784 | movl r15=ia32_ret_from_syscall | |
1785 | cmp.eq p8,p0=r2,r0 | |
1786 | ;; | |
1787 | mov rp=r15 | |
1788 | (p8) br.call.sptk.many b6=b6 | |
1789 | br.cond.sptk ia32_trace_syscall | |
1790 | ||
1791 | non_ia32_syscall: | |
1792 | alloc r15=ar.pfs,0,0,2,0 | |
1793 | mov out0=r14 // interrupt # | |
1794 | add out1=16,sp // pointer to pt_regs | |
1795 | ;; // avoid WAW on CFM | |
1796 | br.call.sptk.many rp=ia32_bad_interrupt | |
1797 | .ret1: movl r15=ia64_leave_kernel | |
1798 | ;; | |
1799 | mov rp=r15 | |
1800 | br.ret.sptk.many rp | |
1801 | END(dispatch_to_ia32_handler) | |
1802 | ||
1803 | #endif /* CONFIG_IA32_SUPPORT */ |