2 * x86_64 specific EFI support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
5 * Copyright (C) 2005-2008 Intel Co.
6 * Fenghua Yu <fenghua.yu@intel.com>
7 * Bibo Mao <bibo.mao@intel.com>
8 * Chandramouli Narayanan <mouli@linux.intel.com>
9 * Huang Ying <ying.huang@intel.com>
11 * Code to convert EFI to E820 map has been implemented in elilo bootloader
12 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
13 * is setup appropriately for EFI runtime code.
18 #include <linux/kernel.h>
19 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/efi.h>
27 #include <linux/uaccess.h>
29 #include <linux/reboot.h>
31 #include <asm/setup.h>
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
36 #include <asm/cacheflush.h>
37 #include <asm/proto.h>
40 static pgd_t save_pgd __initdata
;
41 static unsigned long efi_flags __initdata
;
43 static int __init
setup_noefi(char *arg
)
48 early_param("noefi", setup_noefi
);
50 static void __init
early_mapping_set_exec(unsigned long start
,
58 kpte
= lookup_address((unsigned long)__va(start
), &level
);
61 set_pte(kpte
, pte_mkexec(*kpte
));
63 set_pte(kpte
, __pte((pte_val(*kpte
) | _PAGE_NX
) & \
64 __supported_pte_mask
));
66 start
= (start
+ PMD_SIZE
) & PMD_MASK
;
68 start
= (start
+ PAGE_SIZE
) & PAGE_MASK
;
72 static void __init
early_runtime_code_mapping_set_exec(int executable
)
74 efi_memory_desc_t
*md
;
77 /* Make EFI runtime service code area executable */
78 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
80 if (md
->type
== EFI_RUNTIME_SERVICES_CODE
) {
82 end
= md
->phys_addr
+ (md
->num_pages
<< PAGE_SHIFT
);
83 early_mapping_set_exec(md
->phys_addr
, end
, executable
);
88 void __init
efi_call_phys_prelog(void)
90 unsigned long vaddress
;
92 local_irq_save(efi_flags
);
93 early_runtime_code_mapping_set_exec(1);
94 vaddress
= (unsigned long)__va(0x0UL
);
95 pgd_val(save_pgd
) = pgd_val(*pgd_offset_k(0x0UL
));
96 set_pgd(pgd_offset_k(0x0UL
), *pgd_offset_k(vaddress
));
100 void __init
efi_call_phys_epilog(void)
103 * After the lock is released, the original page table is restored.
105 set_pgd(pgd_offset_k(0x0UL
), save_pgd
);
106 early_runtime_code_mapping_set_exec(0);
108 local_irq_restore(efi_flags
);
112 * We need to map the EFI memory map again after init_memory_mapping().
114 void __init
efi_map_memmap(void)
116 memmap
.map
= __va(memmap
.phys_map
);
117 memmap
.map_end
= memmap
.map
+ (memmap
.nr_map
* memmap
.desc_size
);
120 void __init
efi_reserve_bootmem(void)
122 reserve_bootmem_generic((unsigned long)memmap
.phys_map
,
123 memmap
.nr_map
* memmap
.desc_size
);
126 void __init
runtime_code_page_mkexec(void)
128 efi_memory_desc_t
*md
;
131 /* Make EFI runtime service code area executable */
132 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
134 if (md
->type
== EFI_RUNTIME_SERVICES_CODE
)
135 change_page_attr_addr(md
->virt_addr
,
142 void __iomem
* __init
efi_ioremap(unsigned long offset
,
145 static unsigned pages_mapped
;
146 unsigned long last_addr
;
149 last_addr
= offset
+ size
- 1;
151 pages
= (PAGE_ALIGN(last_addr
) - offset
) >> PAGE_SHIFT
;
152 if (pages_mapped
+ pages
> MAX_EFI_IO_PAGES
)
155 for (i
= 0; i
< pages
; i
++) {
156 set_fixmap_nocache(FIX_EFI_IO_MAP_FIRST_PAGE
- pages_mapped
,
162 return (void __iomem
*)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE
- \
163 (pages_mapped
- pages
));