arch/x86/kernel/io_apic_{64,32}.c: use time_before
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / mm / fault_32.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
6
7#include <linux/signal.h>
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/ptrace.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
1da177e4
LT
17#include <linux/interrupt.h>
18#include <linux/init.h>
19#include <linux/tty.h>
20#include <linux/vt_kern.h> /* For unblank_screen() */
21#include <linux/highmem.h>
28609f6e 22#include <linux/bootmem.h> /* for max_low_pfn */
1eeb66a1 23#include <linux/vmalloc.h>
1da177e4 24#include <linux/module.h>
3d97ae5b 25#include <linux/kprobes.h>
11a4180c 26#include <linux/uaccess.h>
1eeb66a1 27#include <linux/kdebug.h>
1da177e4
LT
28
29#include <asm/system.h>
1da177e4 30#include <asm/desc.h>
78be3706 31#include <asm/segment.h>
1da177e4
LT
32
33extern void die(const char *,struct pt_regs *,long);
34
74a0b576
CH
35#ifdef CONFIG_KPROBES
36static inline int notify_page_fault(struct pt_regs *regs)
b71b5b65 37{
74a0b576
CH
38 int ret = 0;
39
40 /* kprobe_running() needs smp_processor_id() */
41 if (!user_mode_vm(regs)) {
42 preempt_disable();
43 if (kprobe_running() && kprobe_fault_handler(regs, 14))
44 ret = 1;
45 preempt_enable();
46 }
b71b5b65 47
74a0b576 48 return ret;
b71b5b65 49}
74a0b576
CH
50#else
51static inline int notify_page_fault(struct pt_regs *regs)
b71b5b65 52{
74a0b576 53 return 0;
b71b5b65 54}
74a0b576 55#endif
b71b5b65 56
1da177e4
LT
57/*
58 * Return EIP plus the CS segment base. The segment limit is also
59 * adjusted, clamped to the kernel/user address space (whichever is
60 * appropriate), and returned in *eip_limit.
61 *
62 * The segment is checked, because it might have been changed by another
63 * task between the original faulting instruction and here.
64 *
65 * If CS is no longer a valid code segment, or if EIP is beyond the
66 * limit, or if it is a kernel address when CS is not a kernel segment,
67 * then the returned value will be greater than *eip_limit.
68 *
69 * This is slow, but is very rarely executed.
70 */
71static inline unsigned long get_segment_eip(struct pt_regs *regs,
72 unsigned long *eip_limit)
73{
65ea5b03
PA
74 unsigned long ip = regs->ip;
75 unsigned seg = regs->cs & 0xffff;
1da177e4
LT
76 u32 seg_ar, seg_limit, base, *desc;
77
19964fec 78 /* Unlikely, but must come before segment checks. */
65ea5b03 79 if (unlikely(regs->flags & VM_MASK)) {
19964fec
CE
80 base = seg << 4;
81 *eip_limit = base + 0xffff;
65ea5b03 82 return base + (ip & 0xffff);
19964fec
CE
83 }
84
1da177e4 85 /* The standard kernel/user address space limit. */
78be3706 86 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
1da177e4
LT
87
88 /* By far the most common cases. */
78be3706 89 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
65ea5b03 90 return ip;
1da177e4
LT
91
92 /* Check the segment exists, is within the current LDT/GDT size,
93 that kernel/user (ring 0..3) has the appropriate privilege,
94 that it's a code segment, and get the limit. */
95 __asm__ ("larl %3,%0; lsll %3,%1"
96 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
65ea5b03 97 if ((~seg_ar & 0x9800) || ip > seg_limit) {
1da177e4 98 *eip_limit = 0;
65ea5b03 99 return 1; /* So that returned ip > *eip_limit. */
1da177e4
LT
100 }
101
102 /* Get the GDT/LDT descriptor base.
103 When you look for races in this code remember that
104 LDT and other horrors are only used in user space. */
105 if (seg & (1<<2)) {
106 /* Must lock the LDT while reading it. */
de8aacbe 107 mutex_lock(&current->mm->context.lock);
1da177e4
LT
108 desc = current->mm->context.ldt;
109 desc = (void *)desc + (seg & ~7);
110 } else {
111 /* Must disable preemption while reading the GDT. */
251e6912 112 desc = (u32 *)get_cpu_gdt_table(get_cpu());
1da177e4
LT
113 desc = (void *)desc + (seg & ~7);
114 }
115
116 /* Decode the code segment base from the descriptor */
cc697852 117 base = get_desc_base((struct desc_struct *)desc);
1da177e4
LT
118
119 if (seg & (1<<2)) {
de8aacbe 120 mutex_unlock(&current->mm->context.lock);
1da177e4
LT
121 } else
122 put_cpu();
123
124 /* Adjust EIP and segment limit, and clamp at the kernel limit.
125 It's legitimate for segments to wrap at 0xffffffff. */
126 seg_limit += base;
127 if (seg_limit < *eip_limit && seg_limit >= base)
128 *eip_limit = seg_limit;
65ea5b03 129 return ip + base;
1da177e4
LT
130}
131
132/*
133 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
134 * Check that here and ignore it.
135 */
136static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
137{
138 unsigned long limit;
11a4180c 139 unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
1da177e4
LT
140 int scan_more = 1;
141 int prefetch = 0;
142 int i;
143
144 for (i = 0; scan_more && i < 15; i++) {
145 unsigned char opcode;
146 unsigned char instr_hi;
147 unsigned char instr_lo;
148
11a4180c 149 if (instr > (unsigned char *)limit)
1da177e4 150 break;
11a4180c 151 if (probe_kernel_address(instr, opcode))
1da177e4
LT
152 break;
153
154 instr_hi = opcode & 0xf0;
155 instr_lo = opcode & 0x0f;
156 instr++;
157
158 switch (instr_hi) {
159 case 0x20:
160 case 0x30:
161 /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
162 scan_more = ((instr_lo & 7) == 0x6);
163 break;
164
165 case 0x60:
166 /* 0x64 thru 0x67 are valid prefixes in all modes. */
167 scan_more = (instr_lo & 0xC) == 0x4;
168 break;
169 case 0xF0:
170 /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
171 scan_more = !instr_lo || (instr_lo>>1) == 1;
172 break;
173 case 0x00:
174 /* Prefetch instruction is 0x0F0D or 0x0F18 */
175 scan_more = 0;
11a4180c 176 if (instr > (unsigned char *)limit)
1da177e4 177 break;
11a4180c 178 if (probe_kernel_address(instr, opcode))
1da177e4
LT
179 break;
180 prefetch = (instr_lo == 0xF) &&
181 (opcode == 0x0D || opcode == 0x18);
182 break;
183 default:
184 scan_more = 0;
185 break;
186 }
187 }
188 return prefetch;
189}
190
191static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
192 unsigned long error_code)
193{
194 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
195 boot_cpu_data.x86 >= 6)) {
196 /* Catch an obscure case of prefetch inside an NX page. */
197 if (nx_enabled && (error_code & 16))
198 return 0;
199 return __is_prefetch(regs, addr);
200 }
201 return 0;
202}
203
869f96a0
IM
204static noinline void force_sig_info_fault(int si_signo, int si_code,
205 unsigned long address, struct task_struct *tsk)
206{
207 siginfo_t info;
208
209 info.si_signo = si_signo;
210 info.si_errno = 0;
211 info.si_code = si_code;
212 info.si_addr = (void __user *)address;
213 force_sig_info(si_signo, &info, tsk);
214}
215
75604d7f 216void do_invalid_op(struct pt_regs *, unsigned long);
1da177e4 217
101f12af
JB
218static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
219{
220 unsigned index = pgd_index(address);
221 pgd_t *pgd_k;
222 pud_t *pud, *pud_k;
223 pmd_t *pmd, *pmd_k;
224
225 pgd += index;
226 pgd_k = init_mm.pgd + index;
227
228 if (!pgd_present(*pgd_k))
229 return NULL;
230
231 /*
232 * set_pgd(pgd, *pgd_k); here would be useless on PAE
233 * and redundant with the set_pmd() on non-PAE. As would
234 * set_pud.
235 */
236
237 pud = pud_offset(pgd, address);
238 pud_k = pud_offset(pgd_k, address);
239 if (!pud_present(*pud_k))
240 return NULL;
241
242 pmd = pmd_offset(pud, address);
243 pmd_k = pmd_offset(pud_k, address);
244 if (!pmd_present(*pmd_k))
245 return NULL;
8b14cb99 246 if (!pmd_present(*pmd)) {
101f12af 247 set_pmd(pmd, *pmd_k);
8b14cb99
ZA
248 arch_flush_lazy_mmu_mode();
249 } else
101f12af
JB
250 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
251 return pmd_k;
252}
253
254/*
255 * Handle a fault on the vmalloc or module mapping area
256 *
257 * This assumes no large pages in there.
258 */
259static inline int vmalloc_fault(unsigned long address)
260{
261 unsigned long pgd_paddr;
262 pmd_t *pmd_k;
263 pte_t *pte_k;
264 /*
265 * Synchronize this task's top level page-table
266 * with the 'reference' page table.
267 *
268 * Do _not_ use "current" here. We might be inside
269 * an interrupt in the middle of a task switch..
270 */
271 pgd_paddr = read_cr3();
272 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
273 if (!pmd_k)
274 return -1;
275 pte_k = pte_offset_kernel(pmd_k, address);
276 if (!pte_present(*pte_k))
277 return -1;
278 return 0;
279}
280
abd4f750
MAS
281int show_unhandled_signals = 1;
282
1da177e4
LT
283/*
284 * This routine handles page faults. It determines the address,
285 * and the problem, and then passes it off to one of the appropriate
286 * routines.
287 *
288 * error_code:
289 * bit 0 == 0 means no page found, 1 means protection fault
290 * bit 1 == 0 means read, 1 means write
291 * bit 2 == 0 means kernel, 1 means user-mode
101f12af
JB
292 * bit 3 == 1 means use of reserved bit detected
293 * bit 4 == 1 means fault was an instruction fetch
1da177e4 294 */
75604d7f 295void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
1da177e4
LT
296{
297 struct task_struct *tsk;
298 struct mm_struct *mm;
299 struct vm_area_struct * vma;
300 unsigned long address;
869f96a0 301 int write, si_code;
83c54070 302 int fault;
1da177e4 303
143a5d32
PZ
304 /*
305 * We can fault from pretty much anywhere, with unknown IRQ state.
306 */
307 trace_hardirqs_fixup();
308
1da177e4 309 /* get the address */
4bb0d3ec 310 address = read_cr2();
1da177e4 311
1da177e4
LT
312 tsk = current;
313
869f96a0 314 si_code = SEGV_MAPERR;
1da177e4
LT
315
316 /*
317 * We fault-in kernel-space virtual memory on-demand. The
318 * 'reference' page table is init_mm.pgd.
319 *
320 * NOTE! We MUST NOT take any locks for this case. We may
321 * be in an interrupt or a critical region, and should
322 * only copy the information from the master page table,
323 * nothing more.
324 *
325 * This verifies that the fault happens in kernel space
326 * (error_code & 4) == 0, and that the fault was not a
101f12af 327 * protection error (error_code & 9) == 0.
1da177e4 328 */
101f12af
JB
329 if (unlikely(address >= TASK_SIZE)) {
330 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
331 return;
74a0b576 332 if (notify_page_fault(regs))
101f12af
JB
333 return;
334 /*
1da177e4
LT
335 * Don't take the mm semaphore here. If we fixup a prefetch
336 * fault we could otherwise deadlock.
337 */
338 goto bad_area_nosemaphore;
101f12af
JB
339 }
340
74a0b576 341 if (notify_page_fault(regs))
101f12af
JB
342 return;
343
344 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
345 fault has been handled. */
65ea5b03 346 if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
101f12af 347 local_irq_enable();
1da177e4
LT
348
349 mm = tsk->mm;
350
351 /*
352 * If we're in an interrupt, have no user context or are running in an
353 * atomic region then we must not take the fault..
354 */
355 if (in_atomic() || !mm)
356 goto bad_area_nosemaphore;
357
358 /* When running in the kernel we expect faults to occur only to
359 * addresses in user space. All other faults represent errors in the
27b46d76 360 * kernel and should generate an OOPS. Unfortunately, in the case of an
80f7228b 361 * erroneous fault occurring in a code path which already holds mmap_sem
1da177e4
LT
362 * we will deadlock attempting to validate the fault against the
363 * address space. Luckily the kernel only validly references user
364 * space from well defined areas of code, which are listed in the
365 * exceptions table.
366 *
367 * As the vast majority of faults will be valid we will only perform
27b46d76 368 * the source reference check when there is a possibility of a deadlock.
1da177e4
LT
369 * Attempt to lock the address space, if we cannot we then validate the
370 * source. If this is invalid we can skip the address space check,
371 * thus avoiding the deadlock.
372 */
373 if (!down_read_trylock(&mm->mmap_sem)) {
374 if ((error_code & 4) == 0 &&
65ea5b03 375 !search_exception_tables(regs->ip))
1da177e4
LT
376 goto bad_area_nosemaphore;
377 down_read(&mm->mmap_sem);
378 }
379
380 vma = find_vma(mm, address);
381 if (!vma)
382 goto bad_area;
383 if (vma->vm_start <= address)
384 goto good_area;
385 if (!(vma->vm_flags & VM_GROWSDOWN))
386 goto bad_area;
387 if (error_code & 4) {
388 /*
65ea5b03 389 * Accessing the stack below %sp is always a bug.
21528454
CE
390 * The large cushion allows instructions like enter
391 * and pusha to work. ("enter $65535,$31" pushes
65ea5b03 392 * 32 pointers and then decrements %sp by 65535.)
1da177e4 393 */
65ea5b03 394 if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
1da177e4
LT
395 goto bad_area;
396 }
397 if (expand_stack(vma, address))
398 goto bad_area;
399/*
400 * Ok, we have a good vm_area for this memory access, so
401 * we can handle it..
402 */
403good_area:
869f96a0 404 si_code = SEGV_ACCERR;
1da177e4
LT
405 write = 0;
406 switch (error_code & 3) {
407 default: /* 3: write, present */
78be3706 408 /* fall through */
1da177e4
LT
409 case 2: /* write, not present */
410 if (!(vma->vm_flags & VM_WRITE))
411 goto bad_area;
412 write++;
413 break;
414 case 1: /* read, present */
415 goto bad_area;
416 case 0: /* read, not present */
df67b3da 417 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1da177e4
LT
418 goto bad_area;
419 }
420
421 survive:
422 /*
423 * If for any reason at all we couldn't handle the fault,
424 * make sure we exit gracefully rather than endlessly redo
425 * the fault.
426 */
83c54070
NP
427 fault = handle_mm_fault(mm, vma, address, write);
428 if (unlikely(fault & VM_FAULT_ERROR)) {
429 if (fault & VM_FAULT_OOM)
1da177e4 430 goto out_of_memory;
83c54070
NP
431 else if (fault & VM_FAULT_SIGBUS)
432 goto do_sigbus;
433 BUG();
1da177e4 434 }
83c54070
NP
435 if (fault & VM_FAULT_MAJOR)
436 tsk->maj_flt++;
437 else
438 tsk->min_flt++;
1da177e4
LT
439
440 /*
441 * Did it hit the DOS screen memory VA from vm86 mode?
442 */
65ea5b03 443 if (regs->flags & VM_MASK) {
1da177e4
LT
444 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
445 if (bit < 32)
446 tsk->thread.screen_bitmap |= 1 << bit;
447 }
448 up_read(&mm->mmap_sem);
449 return;
450
451/*
452 * Something tried to access memory that isn't in our memory map..
453 * Fix it, but check if it's kernel or user first..
454 */
455bad_area:
456 up_read(&mm->mmap_sem);
457
458bad_area_nosemaphore:
459 /* User mode accesses just cause a SIGSEGV */
460 if (error_code & 4) {
e5e3c84b
SR
461 /*
462 * It's possible to have interrupts off here.
463 */
464 local_irq_enable();
465
1da177e4
LT
466 /*
467 * Valid to do another page fault here because this one came
468 * from user space.
469 */
470 if (is_prefetch(regs, address, error_code))
471 return;
472
abd4f750
MAS
473 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
474 printk_ratelimit()) {
65ea5b03
PA
475 printk("%s%s[%d]: segfault at %08lx ip %08lx "
476 "sp %08lx error %lx\n",
19c5870c 477 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
65ea5b03
PA
478 tsk->comm, task_pid_nr(tsk), address, regs->ip,
479 regs->sp, error_code);
abd4f750 480 }
1da177e4
LT
481 tsk->thread.cr2 = address;
482 /* Kernel addresses are always protection faults */
483 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
484 tsk->thread.trap_no = 14;
869f96a0 485 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
1da177e4
LT
486 return;
487 }
488
489#ifdef CONFIG_X86_F00F_BUG
490 /*
491 * Pentium F0 0F C7 C8 bug workaround.
492 */
493 if (boot_cpu_data.f00f_bug) {
494 unsigned long nr;
495
496 nr = (address - idt_descr.address) >> 3;
497
498 if (nr == 6) {
499 do_invalid_op(regs, 0);
500 return;
501 }
502 }
503#endif
504
505no_context:
506 /* Are we prepared to handle this kernel fault? */
507 if (fixup_exception(regs))
508 return;
509
510 /*
511 * Valid to do another page fault here, because if this fault
512 * had been triggered by is_prefetch fixup_exception would have
513 * handled it.
514 */
515 if (is_prefetch(regs, address, error_code))
516 return;
517
518/*
519 * Oops. The kernel tried to access some bad page. We'll have to
520 * terminate things with extreme prejudice.
521 */
522
523 bust_spinlocks(1);
524
dd287796 525 if (oops_may_print()) {
28609f6e
JB
526 __typeof__(pte_val(__pte(0))) page;
527
528#ifdef CONFIG_X86_PAE
dd287796
AM
529 if (error_code & 16) {
530 pte_t *pte = lookup_address(address);
531
532 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
533 printk(KERN_CRIT "kernel tried to execute "
534 "NX-protected page - exploit attempt? "
535 "(uid: %d)\n", current->uid);
536 }
28609f6e 537#endif
dd287796
AM
538 if (address < PAGE_SIZE)
539 printk(KERN_ALERT "BUG: unable to handle kernel NULL "
540 "pointer dereference");
541 else
542 printk(KERN_ALERT "BUG: unable to handle kernel paging"
543 " request");
544 printk(" at virtual address %08lx\n",address);
65ea5b03 545 printk(KERN_ALERT "printing ip: %08lx ", regs->ip);
28609f6e
JB
546
547 page = read_cr3();
548 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
549#ifdef CONFIG_X86_PAE
9aa8d719 550 printk("*pdpt = %016Lx ", page);
28609f6e
JB
551 if ((page >> PAGE_SHIFT) < max_low_pfn
552 && page & _PAGE_PRESENT) {
553 page &= PAGE_MASK;
554 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
555 & (PTRS_PER_PMD - 1)];
eec407c9 556 printk(KERN_CONT "*pde = %016Lx ", page);
28609f6e
JB
557 page &= ~_PAGE_NX;
558 }
559#else
9aa8d719 560 printk("*pde = %08lx ", page);
1da177e4 561#endif
28609f6e
JB
562
563 /*
564 * We must not directly access the pte in the highpte
565 * case if the page table is located in highmem.
566 * And let's rather not kmap-atomic the pte, just in case
567 * it's allocated already.
568 */
569 if ((page >> PAGE_SHIFT) < max_low_pfn
b1992df3
JB
570 && (page & _PAGE_PRESENT)
571 && !(page & _PAGE_PSE)) {
28609f6e
JB
572 page &= PAGE_MASK;
573 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
574 & (PTRS_PER_PTE - 1)];
9aa8d719 575 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
28609f6e 576 }
9aa8d719
PE
577
578 printk("\n");
28609f6e
JB
579 }
580
4f339ecb
AN
581 tsk->thread.cr2 = address;
582 tsk->thread.trap_no = 14;
583 tsk->thread.error_code = error_code;
1da177e4
LT
584 die("Oops", regs, error_code);
585 bust_spinlocks(0);
586 do_exit(SIGKILL);
587
588/*
589 * We ran out of memory, or some other thing happened to us that made
590 * us unable to handle the page fault gracefully.
591 */
592out_of_memory:
593 up_read(&mm->mmap_sem);
b460cbc5 594 if (is_global_init(tsk)) {
1da177e4
LT
595 yield();
596 down_read(&mm->mmap_sem);
597 goto survive;
598 }
599 printk("VM: killing process %s\n", tsk->comm);
600 if (error_code & 4)
dcca2bde 601 do_group_exit(SIGKILL);
1da177e4
LT
602 goto no_context;
603
604do_sigbus:
605 up_read(&mm->mmap_sem);
606
607 /* Kernel mode? Handle exceptions or die */
608 if (!(error_code & 4))
609 goto no_context;
610
611 /* User space => ok to do another page fault */
612 if (is_prefetch(regs, address, error_code))
613 return;
614
615 tsk->thread.cr2 = address;
616 tsk->thread.error_code = error_code;
617 tsk->thread.trap_no = 14;
869f96a0 618 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
101f12af 619}
1da177e4 620
101f12af
JB
621void vmalloc_sync_all(void)
622{
623 /*
624 * Note that races in the updates of insync and start aren't
625 * problematic: insync can only get set bits added, and updates to
626 * start are only improving performance (without affecting correctness
627 * if undone).
628 */
629 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
630 static unsigned long start = TASK_SIZE;
631 unsigned long address;
1da177e4 632
5311ab62
JF
633 if (SHARED_KERNEL_PMD)
634 return;
635
101f12af
JB
636 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
637 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
638 if (!test_bit(pgd_index(address), insync)) {
639 unsigned long flags;
640 struct page *page;
641
642 spin_lock_irqsave(&pgd_lock, flags);
643 for (page = pgd_list; page; page =
644 (struct page *)page->index)
645 if (!vmalloc_sync_one(page_address(page),
646 address)) {
647 BUG_ON(page != pgd_list);
648 break;
649 }
650 spin_unlock_irqrestore(&pgd_lock, flags);
651 if (!page)
652 set_bit(pgd_index(address), insync);
653 }
654 if (address == start && test_bit(pgd_index(address), insync))
655 start = address + PGDIR_SIZE;
1da177e4
LT
656 }
657}