[PATCH] x86_64: Increase NR_IRQ_VECTORS to 32 * NR_CPUS
[deliverable/linux.git] / arch / x86_64 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9#include <linux/config.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
59170891 25#include <linux/pci.h>
17a941d8 26#include <linux/dma-mapping.h>
1da177e4
LT
27
28#include <asm/processor.h>
29#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32#include <asm/pgalloc.h>
33#include <asm/dma.h>
34#include <asm/fixmap.h>
35#include <asm/e820.h>
36#include <asm/apic.h>
37#include <asm/tlb.h>
38#include <asm/mmu_context.h>
39#include <asm/proto.h>
40#include <asm/smp.h>
2bc0414e 41#include <asm/sections.h>
17a941d8
MBY
42#include <asm/dma-mapping.h>
43#include <asm/swiotlb.h>
1da177e4
LT
44
45#ifndef Dprintk
46#define Dprintk(x...)
47#endif
48
17a941d8
MBY
49struct dma_mapping_ops* dma_ops;
50EXPORT_SYMBOL(dma_ops);
51
e18c6874
AK
52static unsigned long dma_reserve __initdata;
53
1da177e4
LT
54DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55
56/*
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
60 */
61
62void show_mem(void)
63{
e92343cc
AK
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
1da177e4
LT
66 pg_data_t *pgdat;
67 struct page *page;
68
e92343cc 69 printk(KERN_INFO "Mem-info:\n");
1da177e4 70 show_free_areas();
e92343cc 71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
1da177e4
LT
72
73 for_each_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
e92343cc
AK
77 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
1da177e4
LT
83 }
84 }
e92343cc
AK
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
1da177e4
LT
89}
90
91/* References to section boundaries */
92
1da177e4
LT
93int after_bootmem;
94
95static void *spp_getpage(void)
96{
97 void *ptr;
98 if (after_bootmem)
99 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
100 else
101 ptr = alloc_bootmem_pages(PAGE_SIZE);
102 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
103 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
104
105 Dprintk("spp_getpage %p\n", ptr);
106 return ptr;
107}
108
109static void set_pte_phys(unsigned long vaddr,
110 unsigned long phys, pgprot_t prot)
111{
112 pgd_t *pgd;
113 pud_t *pud;
114 pmd_t *pmd;
115 pte_t *pte, new_pte;
116
117 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
118
119 pgd = pgd_offset_k(vaddr);
120 if (pgd_none(*pgd)) {
121 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
122 return;
123 }
124 pud = pud_offset(pgd, vaddr);
125 if (pud_none(*pud)) {
126 pmd = (pmd_t *) spp_getpage();
127 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
128 if (pmd != pmd_offset(pud, 0)) {
129 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
130 return;
131 }
132 }
133 pmd = pmd_offset(pud, vaddr);
134 if (pmd_none(*pmd)) {
135 pte = (pte_t *) spp_getpage();
136 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
137 if (pte != pte_offset_kernel(pmd, 0)) {
138 printk("PAGETABLE BUG #02!\n");
139 return;
140 }
141 }
142 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
143
144 pte = pte_offset_kernel(pmd, vaddr);
145 if (!pte_none(*pte) &&
146 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
147 pte_ERROR(*pte);
148 set_pte(pte, new_pte);
149
150 /*
151 * It's enough to flush this one mapping.
152 * (PGE mappings get flushed as well)
153 */
154 __flush_tlb_one(vaddr);
155}
156
157/* NOTE: this is meant to be run only at boot */
158void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159{
160 unsigned long address = __fix_to_virt(idx);
161
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
164 return;
165 }
166 set_pte_phys(address, phys, prot);
167}
168
169unsigned long __initdata table_start, table_end;
170
171extern pmd_t temp_boot_pmds[];
172
173static struct temp_map {
174 pmd_t *pmd;
175 void *address;
176 int allocated;
177} temp_mappings[] __initdata = {
178 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
179 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
180 {}
181};
182
183static __init void *alloc_low_page(int *index, unsigned long *phys)
184{
185 struct temp_map *ti;
186 int i;
187 unsigned long pfn = table_end++, paddr;
188 void *adr;
189
190 if (pfn >= end_pfn)
191 panic("alloc_low_page: ran out of memory");
192 for (i = 0; temp_mappings[i].allocated; i++) {
193 if (!temp_mappings[i].pmd)
194 panic("alloc_low_page: ran out of temp mappings");
195 }
196 ti = &temp_mappings[i];
197 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
198 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
199 ti->allocated = 1;
200 __flush_tlb();
201 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
202 *index = i;
203 *phys = pfn * PAGE_SIZE;
204 return adr;
205}
206
207static __init void unmap_low_page(int i)
208{
209 struct temp_map *ti = &temp_mappings[i];
210 set_pmd(ti->pmd, __pmd(0));
211 ti->allocated = 0;
212}
213
214static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
215{
216 long i, j;
217
218 i = pud_index(address);
219 pud = pud + i;
220 for (; i < PTRS_PER_PUD; pud++, i++) {
221 int map;
222 unsigned long paddr, pmd_phys;
223 pmd_t *pmd;
224
225 paddr = address + i*PUD_SIZE;
226 if (paddr >= end) {
227 for (; i < PTRS_PER_PUD; i++, pud++)
228 set_pud(pud, __pud(0));
229 break;
230 }
231
232 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
233 set_pud(pud, __pud(0));
234 continue;
235 }
236
237 pmd = alloc_low_page(&map, &pmd_phys);
238 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
239 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
240 unsigned long pe;
241
242 if (paddr >= end) {
243 for (; j < PTRS_PER_PMD; j++, pmd++)
244 set_pmd(pmd, __pmd(0));
245 break;
246 }
247 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
248 pe &= __supported_pte_mask;
249 set_pmd(pmd, __pmd(pe));
250 }
251 unmap_low_page(map);
252 }
253 __flush_tlb();
254}
255
256static void __init find_early_table_space(unsigned long end)
257{
6c5acd16 258 unsigned long puds, pmds, tables, start;
1da177e4
LT
259
260 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
261 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
262 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
263 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
264
ee408c79
AK
265 /* RED-PEN putting page tables only on node 0 could
266 cause a hotspot and fill up ZONE_DMA. The page tables
267 need roughly 0.5KB per GB. */
268 start = 0x8000;
269 table_start = find_e820_area(start, end, tables);
1da177e4
LT
270 if (table_start == -1UL)
271 panic("Cannot find space for the kernel page tables");
272
273 table_start >>= PAGE_SHIFT;
274 table_end = table_start;
275}
276
277/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
278 This runs before bootmem is initialized and gets pages directly from the
279 physical memory. To access them they are temporarily mapped. */
280void __init init_memory_mapping(unsigned long start, unsigned long end)
281{
282 unsigned long next;
283
284 Dprintk("init_memory_mapping\n");
285
286 /*
287 * Find space for the kernel direct mapping tables.
288 * Later we should allocate these tables in the local node of the memory
289 * mapped. Unfortunately this is done currently before the nodes are
290 * discovered.
291 */
292 find_early_table_space(end);
293
294 start = (unsigned long)__va(start);
295 end = (unsigned long)__va(end);
296
297 for (; start < end; start = next) {
298 int map;
299 unsigned long pud_phys;
300 pud_t *pud = alloc_low_page(&map, &pud_phys);
301 next = start + PGDIR_SIZE;
302 if (next > end)
303 next = end;
304 phys_pud_init(pud, __pa(start), __pa(next));
305 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
306 unmap_low_page(map);
307 }
308
309 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
310 __flush_tlb_all();
311 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
312 table_start<<PAGE_SHIFT,
313 table_end<<PAGE_SHIFT);
314}
315
f6c2e333 316void __cpuinit zap_low_mappings(int cpu)
1da177e4 317{
f6c2e333
SS
318 if (cpu == 0) {
319 pgd_t *pgd = pgd_offset_k(0UL);
320 pgd_clear(pgd);
321 } else {
322 /*
323 * For AP's, zap the low identity mappings by changing the cr3
324 * to init_level4_pgt and doing local flush tlb all
325 */
326 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
327 }
328 __flush_tlb_all();
1da177e4
LT
329}
330
a2f1b424
AK
331/* Compute zone sizes for the DMA and DMA32 zones in a node. */
332__init void
333size_zones(unsigned long *z, unsigned long *h,
334 unsigned long start_pfn, unsigned long end_pfn)
335{
336 int i;
337 unsigned long w;
338
339 for (i = 0; i < MAX_NR_ZONES; i++)
340 z[i] = 0;
341
342 if (start_pfn < MAX_DMA_PFN)
343 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
344 if (start_pfn < MAX_DMA32_PFN) {
345 unsigned long dma32_pfn = MAX_DMA32_PFN;
346 if (dma32_pfn > end_pfn)
347 dma32_pfn = end_pfn;
348 z[ZONE_DMA32] = dma32_pfn - start_pfn;
349 }
350 z[ZONE_NORMAL] = end_pfn - start_pfn;
351
352 /* Remove lower zones from higher ones. */
353 w = 0;
354 for (i = 0; i < MAX_NR_ZONES; i++) {
355 if (z[i])
356 z[i] -= w;
357 w += z[i];
358 }
359
360 /* Compute holes */
576fc097 361 w = start_pfn;
a2f1b424
AK
362 for (i = 0; i < MAX_NR_ZONES; i++) {
363 unsigned long s = w;
364 w += z[i];
365 h[i] = e820_hole_size(s, w);
366 }
e18c6874
AK
367
368 /* Add the space pace needed for mem_map to the holes too. */
369 for (i = 0; i < MAX_NR_ZONES; i++)
370 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
371
372 /* The 16MB DMA zone has the kernel and other misc mappings.
373 Account them too */
374 if (h[ZONE_DMA]) {
375 h[ZONE_DMA] += dma_reserve;
376 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
377 printk(KERN_WARNING
378 "Kernel too large and filling up ZONE_DMA?\n");
379 h[ZONE_DMA] = z[ZONE_DMA];
380 }
381 }
a2f1b424
AK
382}
383
2b97690f 384#ifndef CONFIG_NUMA
1da177e4
LT
385void __init paging_init(void)
386{
a2f1b424
AK
387 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
388 size_zones(zones, holes, 0, end_pfn);
389 free_area_init_node(0, NODE_DATA(0), zones,
390 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
1da177e4
LT
391}
392#endif
393
394/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
395 from the CPU leading to inconsistent cache lines. address and size
396 must be aligned to 2MB boundaries.
397 Does nothing when the mapping doesn't exist. */
398void __init clear_kernel_mapping(unsigned long address, unsigned long size)
399{
400 unsigned long end = address + size;
401
402 BUG_ON(address & ~LARGE_PAGE_MASK);
403 BUG_ON(size & ~LARGE_PAGE_MASK);
404
405 for (; address < end; address += LARGE_PAGE_SIZE) {
406 pgd_t *pgd = pgd_offset_k(address);
407 pud_t *pud;
408 pmd_t *pmd;
409 if (pgd_none(*pgd))
410 continue;
411 pud = pud_offset(pgd, address);
412 if (pud_none(*pud))
413 continue;
414 pmd = pmd_offset(pud, address);
415 if (!pmd || pmd_none(*pmd))
416 continue;
417 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
418 /* Could handle this, but it should not happen currently. */
419 printk(KERN_ERR
420 "clear_kernel_mapping: mapping has been split. will leak memory\n");
421 pmd_ERROR(*pmd);
422 }
423 set_pmd(pmd, __pmd(0));
424 }
425 __flush_tlb_all();
426}
427
1da177e4
LT
428static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
429 kcore_vsyscall;
430
431void __init mem_init(void)
432{
0a43e4bf 433 long codesize, reservedpages, datasize, initsize;
1da177e4
LT
434
435#ifdef CONFIG_SWIOTLB
17a941d8 436 pci_swiotlb_init();
1da177e4 437#endif
17a941d8 438 no_iommu_init();
1da177e4
LT
439
440 /* How many end-of-memory variables you have, grandma! */
441 max_low_pfn = end_pfn;
442 max_pfn = end_pfn;
443 num_physpages = end_pfn;
444 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
445
446 /* clear the zero-page */
447 memset(empty_zero_page, 0, PAGE_SIZE);
448
449 reservedpages = 0;
450
451 /* this will put all low memory onto the freelists */
2b97690f 452#ifdef CONFIG_NUMA
0a43e4bf 453 totalram_pages = numa_free_all_bootmem();
1da177e4 454#else
0a43e4bf 455 totalram_pages = free_all_bootmem();
1da177e4 456#endif
0a43e4bf 457 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
1da177e4
LT
458
459 after_bootmem = 1;
460
461 codesize = (unsigned long) &_etext - (unsigned long) &_text;
462 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
463 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
464
465 /* Register memory areas for /proc/kcore */
466 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
467 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
468 VMALLOC_END-VMALLOC_START);
469 kclist_add(&kcore_kernel, &_stext, _end - _stext);
470 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
471 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
472 VSYSCALL_END - VSYSCALL_START);
473
0a43e4bf 474 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
1da177e4
LT
475 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
476 end_pfn << (PAGE_SHIFT-10),
477 codesize >> 10,
478 reservedpages << (PAGE_SHIFT-10),
479 datasize >> 10,
480 initsize >> 10);
481
f6c2e333 482#ifdef CONFIG_SMP
1da177e4 483 /*
f6c2e333
SS
484 * Sync boot_level4_pgt mappings with the init_level4_pgt
485 * except for the low identity mappings which are already zapped
486 * in init_level4_pgt. This sync-up is essential for AP's bringup
1da177e4 487 */
f6c2e333 488 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
1da177e4
LT
489#endif
490}
491
1da177e4
LT
492void free_initmem(void)
493{
494 unsigned long addr;
495
496 addr = (unsigned long)(&__init_begin);
497 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
498 ClearPageReserved(virt_to_page(addr));
499 set_page_count(virt_to_page(addr), 1);
500 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
501 free_page(addr);
502 totalram_pages++;
503 }
504 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
2bc0414e 505 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
1da177e4
LT
506}
507
67df197b
AV
508#ifdef CONFIG_DEBUG_RODATA
509
510extern char __start_rodata, __end_rodata;
511void mark_rodata_ro(void)
512{
513 unsigned long addr = (unsigned long)&__start_rodata;
514
515 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
516 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
517
518 printk ("Write protecting the kernel read-only data: %luk\n",
519 (&__end_rodata - &__start_rodata) >> 10);
520
521 /*
522 * change_page_attr_addr() requires a global_flush_tlb() call after it.
523 * We do this after the printk so that if something went wrong in the
524 * change, the printk gets out at least to give a better debug hint
525 * of who is the culprit.
526 */
527 global_flush_tlb();
528}
529#endif
530
1da177e4
LT
531#ifdef CONFIG_BLK_DEV_INITRD
532void free_initrd_mem(unsigned long start, unsigned long end)
533{
534 if (start < (unsigned long)&_end)
535 return;
536 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
537 for (; start < end; start += PAGE_SIZE) {
538 ClearPageReserved(virt_to_page(start));
539 set_page_count(virt_to_page(start), 1);
540 free_page(start);
541 totalram_pages++;
542 }
543}
544#endif
545
546void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
547{
548 /* Should check here against the e820 map to avoid double free */
2b97690f 549#ifdef CONFIG_NUMA
1da177e4
LT
550 int nid = phys_to_nid(phys);
551 reserve_bootmem_node(NODE_DATA(nid), phys, len);
552#else
553 reserve_bootmem(phys, len);
554#endif
e18c6874
AK
555 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
556 dma_reserve += len / PAGE_SIZE;
1da177e4
LT
557}
558
559int kern_addr_valid(unsigned long addr)
560{
561 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
562 pgd_t *pgd;
563 pud_t *pud;
564 pmd_t *pmd;
565 pte_t *pte;
566
567 if (above != 0 && above != -1UL)
568 return 0;
569
570 pgd = pgd_offset_k(addr);
571 if (pgd_none(*pgd))
572 return 0;
573
574 pud = pud_offset(pgd, addr);
575 if (pud_none(*pud))
576 return 0;
577
578 pmd = pmd_offset(pud, addr);
579 if (pmd_none(*pmd))
580 return 0;
581 if (pmd_large(*pmd))
582 return pfn_valid(pmd_pfn(*pmd));
583
584 pte = pte_offset_kernel(pmd, addr);
585 if (pte_none(*pte))
586 return 0;
587 return pfn_valid(pte_pfn(*pte));
588}
589
590#ifdef CONFIG_SYSCTL
591#include <linux/sysctl.h>
592
593extern int exception_trace, page_fault_trace;
594
595static ctl_table debug_table2[] = {
596 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
597 proc_dointvec },
1da177e4
LT
598 { 0, }
599};
600
601static ctl_table debug_root_table2[] = {
602 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
603 .child = debug_table2 },
604 { 0 },
605};
606
607static __init int x8664_sysctl_init(void)
608{
609 register_sysctl_table(debug_root_table2, 1);
610 return 0;
611}
612__initcall(x8664_sysctl_init);
613#endif
614
1e014410
AK
615/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
616 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
617 not need special handling anymore. */
1da177e4
LT
618
619static struct vm_area_struct gate_vma = {
620 .vm_start = VSYSCALL_START,
621 .vm_end = VSYSCALL_END,
622 .vm_page_prot = PAGE_READONLY
623};
624
1da177e4
LT
625struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
626{
627#ifdef CONFIG_IA32_EMULATION
1e014410
AK
628 if (test_tsk_thread_flag(tsk, TIF_IA32))
629 return NULL;
1da177e4
LT
630#endif
631 return &gate_vma;
632}
633
634int in_gate_area(struct task_struct *task, unsigned long addr)
635{
636 struct vm_area_struct *vma = get_gate_vma(task);
1e014410
AK
637 if (!vma)
638 return 0;
1da177e4
LT
639 return (addr >= vma->vm_start) && (addr < vma->vm_end);
640}
641
642/* Use this when you have no reliable task/vma, typically from interrupt
643 * context. It is less reliable than using the task's vma and may give
644 * false positives.
645 */
646int in_gate_area_no_task(unsigned long addr)
647{
1e014410 648 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1da177e4 649}
This page took 0.113694 seconds and 5 git commands to generate.