x86: remove all definitions with fastcall
[deliverable/linux.git] / arch / x86 / mm / fault_32.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
6
7#include <linux/signal.h>
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/ptrace.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
1da177e4
LT
17#include <linux/interrupt.h>
18#include <linux/init.h>
19#include <linux/tty.h>
20#include <linux/vt_kern.h> /* For unblank_screen() */
21#include <linux/highmem.h>
28609f6e 22#include <linux/bootmem.h> /* for max_low_pfn */
1eeb66a1 23#include <linux/vmalloc.h>
1da177e4 24#include <linux/module.h>
3d97ae5b 25#include <linux/kprobes.h>
11a4180c 26#include <linux/uaccess.h>
1eeb66a1 27#include <linux/kdebug.h>
74a0b576 28#include <linux/kprobes.h>
1da177e4
LT
29
30#include <asm/system.h>
1da177e4 31#include <asm/desc.h>
78be3706 32#include <asm/segment.h>
1da177e4
LT
33
34extern void die(const char *,struct pt_regs *,long);
35
74a0b576
CH
36#ifdef CONFIG_KPROBES
37static inline int notify_page_fault(struct pt_regs *regs)
b71b5b65 38{
74a0b576
CH
39 int ret = 0;
40
41 /* kprobe_running() needs smp_processor_id() */
42 if (!user_mode_vm(regs)) {
43 preempt_disable();
44 if (kprobe_running() && kprobe_fault_handler(regs, 14))
45 ret = 1;
46 preempt_enable();
47 }
b71b5b65 48
74a0b576 49 return ret;
b71b5b65 50}
74a0b576
CH
51#else
52static inline int notify_page_fault(struct pt_regs *regs)
b71b5b65 53{
74a0b576 54 return 0;
b71b5b65 55}
74a0b576 56#endif
b71b5b65 57
1da177e4
LT
58/*
59 * Return EIP plus the CS segment base. The segment limit is also
60 * adjusted, clamped to the kernel/user address space (whichever is
61 * appropriate), and returned in *eip_limit.
62 *
63 * The segment is checked, because it might have been changed by another
64 * task between the original faulting instruction and here.
65 *
66 * If CS is no longer a valid code segment, or if EIP is beyond the
67 * limit, or if it is a kernel address when CS is not a kernel segment,
68 * then the returned value will be greater than *eip_limit.
69 *
70 * This is slow, but is very rarely executed.
71 */
72static inline unsigned long get_segment_eip(struct pt_regs *regs,
73 unsigned long *eip_limit)
74{
65ea5b03
PA
75 unsigned long ip = regs->ip;
76 unsigned seg = regs->cs & 0xffff;
1da177e4
LT
77 u32 seg_ar, seg_limit, base, *desc;
78
19964fec 79 /* Unlikely, but must come before segment checks. */
65ea5b03 80 if (unlikely(regs->flags & VM_MASK)) {
19964fec
CE
81 base = seg << 4;
82 *eip_limit = base + 0xffff;
65ea5b03 83 return base + (ip & 0xffff);
19964fec
CE
84 }
85
1da177e4 86 /* The standard kernel/user address space limit. */
78be3706 87 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
1da177e4
LT
88
89 /* By far the most common cases. */
78be3706 90 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
65ea5b03 91 return ip;
1da177e4
LT
92
93 /* Check the segment exists, is within the current LDT/GDT size,
94 that kernel/user (ring 0..3) has the appropriate privilege,
95 that it's a code segment, and get the limit. */
96 __asm__ ("larl %3,%0; lsll %3,%1"
97 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
65ea5b03 98 if ((~seg_ar & 0x9800) || ip > seg_limit) {
1da177e4 99 *eip_limit = 0;
65ea5b03 100 return 1; /* So that returned ip > *eip_limit. */
1da177e4
LT
101 }
102
103 /* Get the GDT/LDT descriptor base.
104 When you look for races in this code remember that
105 LDT and other horrors are only used in user space. */
106 if (seg & (1<<2)) {
107 /* Must lock the LDT while reading it. */
de8aacbe 108 mutex_lock(&current->mm->context.lock);
1da177e4
LT
109 desc = current->mm->context.ldt;
110 desc = (void *)desc + (seg & ~7);
111 } else {
112 /* Must disable preemption while reading the GDT. */
251e6912 113 desc = (u32 *)get_cpu_gdt_table(get_cpu());
1da177e4
LT
114 desc = (void *)desc + (seg & ~7);
115 }
116
117 /* Decode the code segment base from the descriptor */
cc697852 118 base = get_desc_base((struct desc_struct *)desc);
1da177e4
LT
119
120 if (seg & (1<<2)) {
de8aacbe 121 mutex_unlock(&current->mm->context.lock);
1da177e4
LT
122 } else
123 put_cpu();
124
125 /* Adjust EIP and segment limit, and clamp at the kernel limit.
126 It's legitimate for segments to wrap at 0xffffffff. */
127 seg_limit += base;
128 if (seg_limit < *eip_limit && seg_limit >= base)
129 *eip_limit = seg_limit;
65ea5b03 130 return ip + base;
1da177e4
LT
131}
132
133/*
134 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
135 * Check that here and ignore it.
136 */
137static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
138{
139 unsigned long limit;
11a4180c 140 unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
1da177e4
LT
141 int scan_more = 1;
142 int prefetch = 0;
143 int i;
144
145 for (i = 0; scan_more && i < 15; i++) {
146 unsigned char opcode;
147 unsigned char instr_hi;
148 unsigned char instr_lo;
149
11a4180c 150 if (instr > (unsigned char *)limit)
1da177e4 151 break;
11a4180c 152 if (probe_kernel_address(instr, opcode))
1da177e4
LT
153 break;
154
155 instr_hi = opcode & 0xf0;
156 instr_lo = opcode & 0x0f;
157 instr++;
158
159 switch (instr_hi) {
160 case 0x20:
161 case 0x30:
162 /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
163 scan_more = ((instr_lo & 7) == 0x6);
164 break;
165
166 case 0x60:
167 /* 0x64 thru 0x67 are valid prefixes in all modes. */
168 scan_more = (instr_lo & 0xC) == 0x4;
169 break;
170 case 0xF0:
171 /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
172 scan_more = !instr_lo || (instr_lo>>1) == 1;
173 break;
174 case 0x00:
175 /* Prefetch instruction is 0x0F0D or 0x0F18 */
176 scan_more = 0;
11a4180c 177 if (instr > (unsigned char *)limit)
1da177e4 178 break;
11a4180c 179 if (probe_kernel_address(instr, opcode))
1da177e4
LT
180 break;
181 prefetch = (instr_lo == 0xF) &&
182 (opcode == 0x0D || opcode == 0x18);
183 break;
184 default:
185 scan_more = 0;
186 break;
187 }
188 }
189 return prefetch;
190}
191
192static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
193 unsigned long error_code)
194{
195 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
196 boot_cpu_data.x86 >= 6)) {
197 /* Catch an obscure case of prefetch inside an NX page. */
198 if (nx_enabled && (error_code & 16))
199 return 0;
200 return __is_prefetch(regs, addr);
201 }
202 return 0;
203}
204
869f96a0
IM
205static noinline void force_sig_info_fault(int si_signo, int si_code,
206 unsigned long address, struct task_struct *tsk)
207{
208 siginfo_t info;
209
210 info.si_signo = si_signo;
211 info.si_errno = 0;
212 info.si_code = si_code;
213 info.si_addr = (void __user *)address;
214 force_sig_info(si_signo, &info, tsk);
215}
216
75604d7f 217void do_invalid_op(struct pt_regs *, unsigned long);
1da177e4 218
101f12af
JB
219static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
220{
221 unsigned index = pgd_index(address);
222 pgd_t *pgd_k;
223 pud_t *pud, *pud_k;
224 pmd_t *pmd, *pmd_k;
225
226 pgd += index;
227 pgd_k = init_mm.pgd + index;
228
229 if (!pgd_present(*pgd_k))
230 return NULL;
231
232 /*
233 * set_pgd(pgd, *pgd_k); here would be useless on PAE
234 * and redundant with the set_pmd() on non-PAE. As would
235 * set_pud.
236 */
237
238 pud = pud_offset(pgd, address);
239 pud_k = pud_offset(pgd_k, address);
240 if (!pud_present(*pud_k))
241 return NULL;
242
243 pmd = pmd_offset(pud, address);
244 pmd_k = pmd_offset(pud_k, address);
245 if (!pmd_present(*pmd_k))
246 return NULL;
8b14cb99 247 if (!pmd_present(*pmd)) {
101f12af 248 set_pmd(pmd, *pmd_k);
8b14cb99
ZA
249 arch_flush_lazy_mmu_mode();
250 } else
101f12af
JB
251 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
252 return pmd_k;
253}
254
255/*
256 * Handle a fault on the vmalloc or module mapping area
257 *
258 * This assumes no large pages in there.
259 */
260static inline int vmalloc_fault(unsigned long address)
261{
262 unsigned long pgd_paddr;
263 pmd_t *pmd_k;
264 pte_t *pte_k;
265 /*
266 * Synchronize this task's top level page-table
267 * with the 'reference' page table.
268 *
269 * Do _not_ use "current" here. We might be inside
270 * an interrupt in the middle of a task switch..
271 */
272 pgd_paddr = read_cr3();
273 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
274 if (!pmd_k)
275 return -1;
276 pte_k = pte_offset_kernel(pmd_k, address);
277 if (!pte_present(*pte_k))
278 return -1;
279 return 0;
280}
281
abd4f750
MAS
282int show_unhandled_signals = 1;
283
1da177e4
LT
284/*
285 * This routine handles page faults. It determines the address,
286 * and the problem, and then passes it off to one of the appropriate
287 * routines.
288 *
289 * error_code:
290 * bit 0 == 0 means no page found, 1 means protection fault
291 * bit 1 == 0 means read, 1 means write
292 * bit 2 == 0 means kernel, 1 means user-mode
101f12af
JB
293 * bit 3 == 1 means use of reserved bit detected
294 * bit 4 == 1 means fault was an instruction fetch
1da177e4 295 */
75604d7f 296void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
1da177e4
LT
297{
298 struct task_struct *tsk;
299 struct mm_struct *mm;
300 struct vm_area_struct * vma;
301 unsigned long address;
869f96a0 302 int write, si_code;
83c54070 303 int fault;
1da177e4 304
143a5d32
PZ
305 /*
306 * We can fault from pretty much anywhere, with unknown IRQ state.
307 */
308 trace_hardirqs_fixup();
309
1da177e4 310 /* get the address */
4bb0d3ec 311 address = read_cr2();
1da177e4 312
1da177e4
LT
313 tsk = current;
314
869f96a0 315 si_code = SEGV_MAPERR;
1da177e4
LT
316
317 /*
318 * We fault-in kernel-space virtual memory on-demand. The
319 * 'reference' page table is init_mm.pgd.
320 *
321 * NOTE! We MUST NOT take any locks for this case. We may
322 * be in an interrupt or a critical region, and should
323 * only copy the information from the master page table,
324 * nothing more.
325 *
326 * This verifies that the fault happens in kernel space
327 * (error_code & 4) == 0, and that the fault was not a
101f12af 328 * protection error (error_code & 9) == 0.
1da177e4 329 */
101f12af
JB
330 if (unlikely(address >= TASK_SIZE)) {
331 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
332 return;
74a0b576 333 if (notify_page_fault(regs))
101f12af
JB
334 return;
335 /*
1da177e4
LT
336 * Don't take the mm semaphore here. If we fixup a prefetch
337 * fault we could otherwise deadlock.
338 */
339 goto bad_area_nosemaphore;
101f12af
JB
340 }
341
74a0b576 342 if (notify_page_fault(regs))
101f12af
JB
343 return;
344
345 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
346 fault has been handled. */
65ea5b03 347 if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
101f12af 348 local_irq_enable();
1da177e4
LT
349
350 mm = tsk->mm;
351
352 /*
353 * If we're in an interrupt, have no user context or are running in an
354 * atomic region then we must not take the fault..
355 */
356 if (in_atomic() || !mm)
357 goto bad_area_nosemaphore;
358
359 /* When running in the kernel we expect faults to occur only to
360 * addresses in user space. All other faults represent errors in the
27b46d76 361 * kernel and should generate an OOPS. Unfortunately, in the case of an
80f7228b 362 * erroneous fault occurring in a code path which already holds mmap_sem
1da177e4
LT
363 * we will deadlock attempting to validate the fault against the
364 * address space. Luckily the kernel only validly references user
365 * space from well defined areas of code, which are listed in the
366 * exceptions table.
367 *
368 * As the vast majority of faults will be valid we will only perform
27b46d76 369 * the source reference check when there is a possibility of a deadlock.
1da177e4
LT
370 * Attempt to lock the address space, if we cannot we then validate the
371 * source. If this is invalid we can skip the address space check,
372 * thus avoiding the deadlock.
373 */
374 if (!down_read_trylock(&mm->mmap_sem)) {
375 if ((error_code & 4) == 0 &&
65ea5b03 376 !search_exception_tables(regs->ip))
1da177e4
LT
377 goto bad_area_nosemaphore;
378 down_read(&mm->mmap_sem);
379 }
380
381 vma = find_vma(mm, address);
382 if (!vma)
383 goto bad_area;
384 if (vma->vm_start <= address)
385 goto good_area;
386 if (!(vma->vm_flags & VM_GROWSDOWN))
387 goto bad_area;
388 if (error_code & 4) {
389 /*
65ea5b03 390 * Accessing the stack below %sp is always a bug.
21528454
CE
391 * The large cushion allows instructions like enter
392 * and pusha to work. ("enter $65535,$31" pushes
65ea5b03 393 * 32 pointers and then decrements %sp by 65535.)
1da177e4 394 */
65ea5b03 395 if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
1da177e4
LT
396 goto bad_area;
397 }
398 if (expand_stack(vma, address))
399 goto bad_area;
400/*
401 * Ok, we have a good vm_area for this memory access, so
402 * we can handle it..
403 */
404good_area:
869f96a0 405 si_code = SEGV_ACCERR;
1da177e4
LT
406 write = 0;
407 switch (error_code & 3) {
408 default: /* 3: write, present */
78be3706 409 /* fall through */
1da177e4
LT
410 case 2: /* write, not present */
411 if (!(vma->vm_flags & VM_WRITE))
412 goto bad_area;
413 write++;
414 break;
415 case 1: /* read, present */
416 goto bad_area;
417 case 0: /* read, not present */
df67b3da 418 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1da177e4
LT
419 goto bad_area;
420 }
421
422 survive:
423 /*
424 * If for any reason at all we couldn't handle the fault,
425 * make sure we exit gracefully rather than endlessly redo
426 * the fault.
427 */
83c54070
NP
428 fault = handle_mm_fault(mm, vma, address, write);
429 if (unlikely(fault & VM_FAULT_ERROR)) {
430 if (fault & VM_FAULT_OOM)
1da177e4 431 goto out_of_memory;
83c54070
NP
432 else if (fault & VM_FAULT_SIGBUS)
433 goto do_sigbus;
434 BUG();
1da177e4 435 }
83c54070
NP
436 if (fault & VM_FAULT_MAJOR)
437 tsk->maj_flt++;
438 else
439 tsk->min_flt++;
1da177e4
LT
440
441 /*
442 * Did it hit the DOS screen memory VA from vm86 mode?
443 */
65ea5b03 444 if (regs->flags & VM_MASK) {
1da177e4
LT
445 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
446 if (bit < 32)
447 tsk->thread.screen_bitmap |= 1 << bit;
448 }
449 up_read(&mm->mmap_sem);
450 return;
451
452/*
453 * Something tried to access memory that isn't in our memory map..
454 * Fix it, but check if it's kernel or user first..
455 */
456bad_area:
457 up_read(&mm->mmap_sem);
458
459bad_area_nosemaphore:
460 /* User mode accesses just cause a SIGSEGV */
461 if (error_code & 4) {
e5e3c84b
SR
462 /*
463 * It's possible to have interrupts off here.
464 */
465 local_irq_enable();
466
1da177e4
LT
467 /*
468 * Valid to do another page fault here because this one came
469 * from user space.
470 */
471 if (is_prefetch(regs, address, error_code))
472 return;
473
abd4f750
MAS
474 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
475 printk_ratelimit()) {
65ea5b03
PA
476 printk("%s%s[%d]: segfault at %08lx ip %08lx "
477 "sp %08lx error %lx\n",
19c5870c 478 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
65ea5b03
PA
479 tsk->comm, task_pid_nr(tsk), address, regs->ip,
480 regs->sp, error_code);
abd4f750 481 }
1da177e4
LT
482 tsk->thread.cr2 = address;
483 /* Kernel addresses are always protection faults */
484 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
485 tsk->thread.trap_no = 14;
869f96a0 486 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
1da177e4
LT
487 return;
488 }
489
490#ifdef CONFIG_X86_F00F_BUG
491 /*
492 * Pentium F0 0F C7 C8 bug workaround.
493 */
494 if (boot_cpu_data.f00f_bug) {
495 unsigned long nr;
496
497 nr = (address - idt_descr.address) >> 3;
498
499 if (nr == 6) {
500 do_invalid_op(regs, 0);
501 return;
502 }
503 }
504#endif
505
506no_context:
507 /* Are we prepared to handle this kernel fault? */
508 if (fixup_exception(regs))
509 return;
510
511 /*
512 * Valid to do another page fault here, because if this fault
513 * had been triggered by is_prefetch fixup_exception would have
514 * handled it.
515 */
516 if (is_prefetch(regs, address, error_code))
517 return;
518
519/*
520 * Oops. The kernel tried to access some bad page. We'll have to
521 * terminate things with extreme prejudice.
522 */
523
524 bust_spinlocks(1);
525
dd287796 526 if (oops_may_print()) {
28609f6e
JB
527 __typeof__(pte_val(__pte(0))) page;
528
529#ifdef CONFIG_X86_PAE
dd287796
AM
530 if (error_code & 16) {
531 pte_t *pte = lookup_address(address);
532
533 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
534 printk(KERN_CRIT "kernel tried to execute "
535 "NX-protected page - exploit attempt? "
536 "(uid: %d)\n", current->uid);
537 }
28609f6e 538#endif
dd287796
AM
539 if (address < PAGE_SIZE)
540 printk(KERN_ALERT "BUG: unable to handle kernel NULL "
541 "pointer dereference");
542 else
543 printk(KERN_ALERT "BUG: unable to handle kernel paging"
544 " request");
545 printk(" at virtual address %08lx\n",address);
65ea5b03 546 printk(KERN_ALERT "printing ip: %08lx ", regs->ip);
28609f6e
JB
547
548 page = read_cr3();
549 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
550#ifdef CONFIG_X86_PAE
9aa8d719 551 printk("*pdpt = %016Lx ", page);
28609f6e
JB
552 if ((page >> PAGE_SHIFT) < max_low_pfn
553 && page & _PAGE_PRESENT) {
554 page &= PAGE_MASK;
555 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
556 & (PTRS_PER_PMD - 1)];
eec407c9 557 printk(KERN_CONT "*pde = %016Lx ", page);
28609f6e
JB
558 page &= ~_PAGE_NX;
559 }
560#else
9aa8d719 561 printk("*pde = %08lx ", page);
1da177e4 562#endif
28609f6e
JB
563
564 /*
565 * We must not directly access the pte in the highpte
566 * case if the page table is located in highmem.
567 * And let's rather not kmap-atomic the pte, just in case
568 * it's allocated already.
569 */
570 if ((page >> PAGE_SHIFT) < max_low_pfn
b1992df3
JB
571 && (page & _PAGE_PRESENT)
572 && !(page & _PAGE_PSE)) {
28609f6e
JB
573 page &= PAGE_MASK;
574 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
575 & (PTRS_PER_PTE - 1)];
9aa8d719 576 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
28609f6e 577 }
9aa8d719
PE
578
579 printk("\n");
28609f6e
JB
580 }
581
4f339ecb
AN
582 tsk->thread.cr2 = address;
583 tsk->thread.trap_no = 14;
584 tsk->thread.error_code = error_code;
1da177e4
LT
585 die("Oops", regs, error_code);
586 bust_spinlocks(0);
587 do_exit(SIGKILL);
588
589/*
590 * We ran out of memory, or some other thing happened to us that made
591 * us unable to handle the page fault gracefully.
592 */
593out_of_memory:
594 up_read(&mm->mmap_sem);
b460cbc5 595 if (is_global_init(tsk)) {
1da177e4
LT
596 yield();
597 down_read(&mm->mmap_sem);
598 goto survive;
599 }
600 printk("VM: killing process %s\n", tsk->comm);
601 if (error_code & 4)
dcca2bde 602 do_group_exit(SIGKILL);
1da177e4
LT
603 goto no_context;
604
605do_sigbus:
606 up_read(&mm->mmap_sem);
607
608 /* Kernel mode? Handle exceptions or die */
609 if (!(error_code & 4))
610 goto no_context;
611
612 /* User space => ok to do another page fault */
613 if (is_prefetch(regs, address, error_code))
614 return;
615
616 tsk->thread.cr2 = address;
617 tsk->thread.error_code = error_code;
618 tsk->thread.trap_no = 14;
869f96a0 619 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
101f12af 620}
1da177e4 621
101f12af
JB
622void vmalloc_sync_all(void)
623{
624 /*
625 * Note that races in the updates of insync and start aren't
626 * problematic: insync can only get set bits added, and updates to
627 * start are only improving performance (without affecting correctness
628 * if undone).
629 */
630 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
631 static unsigned long start = TASK_SIZE;
632 unsigned long address;
1da177e4 633
5311ab62
JF
634 if (SHARED_KERNEL_PMD)
635 return;
636
101f12af
JB
637 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
638 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
639 if (!test_bit(pgd_index(address), insync)) {
640 unsigned long flags;
641 struct page *page;
642
643 spin_lock_irqsave(&pgd_lock, flags);
644 for (page = pgd_list; page; page =
645 (struct page *)page->index)
646 if (!vmalloc_sync_one(page_address(page),
647 address)) {
648 BUG_ON(page != pgd_list);
649 break;
650 }
651 spin_unlock_irqrestore(&pgd_lock, flags);
652 if (!page)
653 set_bit(pgd_index(address), insync);
654 }
655 if (address == start && test_bit(pgd_index(address), insync))
656 start = address + PGDIR_SIZE;
1da177e4
LT
657 }
658}
This page took 0.513446 seconds and 5 git commands to generate.