| 1 | /* |
| 2 | * Suspend support specific for i386. |
| 3 | * |
| 4 | * Distribute under GPLv2 |
| 5 | * |
| 6 | * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> |
| 7 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/smp.h> |
| 11 | #include <linux/suspend.h> |
| 12 | #include <asm/proto.h> |
| 13 | #include <asm/page.h> |
| 14 | #include <asm/pgtable.h> |
| 15 | #include <asm/mtrr.h> |
| 16 | |
| 17 | /* References to section boundaries */ |
| 18 | extern const void __nosave_begin, __nosave_end; |
| 19 | |
| 20 | static void fix_processor_context(void); |
| 21 | |
| 22 | struct saved_context saved_context; |
| 23 | |
| 24 | /** |
| 25 | * __save_processor_state - save CPU registers before creating a |
| 26 | * hibernation image and before restoring the memory state from it |
| 27 | * @ctxt - structure to store the registers contents in |
| 28 | * |
| 29 | * NOTE: If there is a CPU register the modification of which by the |
| 30 | * boot kernel (ie. the kernel used for loading the hibernation image) |
| 31 | * might affect the operations of the restored target kernel (ie. the one |
| 32 | * saved in the hibernation image), then its contents must be saved by this |
| 33 | * function. In other words, if kernel A is hibernated and different |
| 34 | * kernel B is used for loading the hibernation image into memory, the |
| 35 | * kernel A's __save_processor_state() function must save all registers |
| 36 | * needed by kernel A, so that it can operate correctly after the resume |
| 37 | * regardless of what kernel B does in the meantime. |
| 38 | */ |
| 39 | static void __save_processor_state(struct saved_context *ctxt) |
| 40 | { |
| 41 | kernel_fpu_begin(); |
| 42 | |
| 43 | /* |
| 44 | * descriptor tables |
| 45 | */ |
| 46 | store_gdt((struct desc_ptr *)&ctxt->gdt_limit); |
| 47 | store_idt((struct desc_ptr *)&ctxt->idt_limit); |
| 48 | store_tr(ctxt->tr); |
| 49 | |
| 50 | /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ |
| 51 | /* |
| 52 | * segment registers |
| 53 | */ |
| 54 | asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); |
| 55 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); |
| 56 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); |
| 57 | asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); |
| 58 | asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); |
| 59 | |
| 60 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); |
| 61 | rdmsrl(MSR_GS_BASE, ctxt->gs_base); |
| 62 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); |
| 63 | mtrr_save_fixed_ranges(NULL); |
| 64 | |
| 65 | /* |
| 66 | * control registers |
| 67 | */ |
| 68 | rdmsrl(MSR_EFER, ctxt->efer); |
| 69 | ctxt->cr0 = read_cr0(); |
| 70 | ctxt->cr2 = read_cr2(); |
| 71 | ctxt->cr3 = read_cr3(); |
| 72 | ctxt->cr4 = read_cr4(); |
| 73 | ctxt->cr8 = read_cr8(); |
| 74 | } |
| 75 | |
| 76 | void save_processor_state(void) |
| 77 | { |
| 78 | __save_processor_state(&saved_context); |
| 79 | } |
| 80 | |
| 81 | static void do_fpu_end(void) |
| 82 | { |
| 83 | /* |
| 84 | * Restore FPU regs if necessary |
| 85 | */ |
| 86 | kernel_fpu_end(); |
| 87 | } |
| 88 | |
| 89 | /** |
| 90 | * __restore_processor_state - restore the contents of CPU registers saved |
| 91 | * by __save_processor_state() |
| 92 | * @ctxt - structure to load the registers contents from |
| 93 | */ |
| 94 | static void __restore_processor_state(struct saved_context *ctxt) |
| 95 | { |
| 96 | /* |
| 97 | * control registers |
| 98 | */ |
| 99 | wrmsrl(MSR_EFER, ctxt->efer); |
| 100 | write_cr8(ctxt->cr8); |
| 101 | write_cr4(ctxt->cr4); |
| 102 | write_cr3(ctxt->cr3); |
| 103 | write_cr2(ctxt->cr2); |
| 104 | write_cr0(ctxt->cr0); |
| 105 | |
| 106 | /* |
| 107 | * now restore the descriptor tables to their proper values |
| 108 | * ltr is done i fix_processor_context(). |
| 109 | */ |
| 110 | load_gdt((const struct desc_ptr *)&ctxt->gdt_limit); |
| 111 | load_idt((const struct desc_ptr *)&ctxt->idt_limit); |
| 112 | |
| 113 | |
| 114 | /* |
| 115 | * segment registers |
| 116 | */ |
| 117 | asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); |
| 118 | asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); |
| 119 | asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); |
| 120 | load_gs_index(ctxt->gs); |
| 121 | asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); |
| 122 | |
| 123 | wrmsrl(MSR_FS_BASE, ctxt->fs_base); |
| 124 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); |
| 125 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); |
| 126 | |
| 127 | fix_processor_context(); |
| 128 | |
| 129 | do_fpu_end(); |
| 130 | mtrr_ap_init(); |
| 131 | } |
| 132 | |
| 133 | void restore_processor_state(void) |
| 134 | { |
| 135 | __restore_processor_state(&saved_context); |
| 136 | } |
| 137 | |
| 138 | static void fix_processor_context(void) |
| 139 | { |
| 140 | int cpu = smp_processor_id(); |
| 141 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
| 142 | |
| 143 | set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ |
| 144 | |
| 145 | get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; |
| 146 | |
| 147 | syscall_init(); /* This sets MSR_*STAR and related */ |
| 148 | load_TR_desc(); /* This does ltr */ |
| 149 | load_LDT(¤t->active_mm->context); /* This does lldt */ |
| 150 | |
| 151 | /* |
| 152 | * Now maybe reload the debug registers |
| 153 | */ |
| 154 | if (current->thread.debugreg7){ |
| 155 | loaddebug(¤t->thread, 0); |
| 156 | loaddebug(¤t->thread, 1); |
| 157 | loaddebug(¤t->thread, 2); |
| 158 | loaddebug(¤t->thread, 3); |
| 159 | /* no 4 and 5 */ |
| 160 | loaddebug(¤t->thread, 6); |
| 161 | loaddebug(¤t->thread, 7); |
| 162 | } |
| 163 | |
| 164 | } |
| 165 | |
| 166 | #ifdef CONFIG_HIBERNATION |
| 167 | /* Defined in arch/x86_64/kernel/suspend_asm.S */ |
| 168 | extern int restore_image(void); |
| 169 | |
| 170 | /* |
| 171 | * Address to jump to in the last phase of restore in order to get to the image |
| 172 | * kernel's text (this value is passed in the image header). |
| 173 | */ |
| 174 | unsigned long restore_jump_address; |
| 175 | |
| 176 | /* |
| 177 | * Value of the cr3 register from before the hibernation (this value is passed |
| 178 | * in the image header). |
| 179 | */ |
| 180 | unsigned long restore_cr3; |
| 181 | |
| 182 | pgd_t *temp_level4_pgt; |
| 183 | |
| 184 | void *relocated_restore_code; |
| 185 | |
| 186 | static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) |
| 187 | { |
| 188 | long i, j; |
| 189 | |
| 190 | i = pud_index(address); |
| 191 | pud = pud + i; |
| 192 | for (; i < PTRS_PER_PUD; pud++, i++) { |
| 193 | unsigned long paddr; |
| 194 | pmd_t *pmd; |
| 195 | |
| 196 | paddr = address + i*PUD_SIZE; |
| 197 | if (paddr >= end) |
| 198 | break; |
| 199 | |
| 200 | pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); |
| 201 | if (!pmd) |
| 202 | return -ENOMEM; |
| 203 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); |
| 204 | for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { |
| 205 | unsigned long pe; |
| 206 | |
| 207 | if (paddr >= end) |
| 208 | break; |
| 209 | pe = __PAGE_KERNEL_LARGE_EXEC | paddr; |
| 210 | pe &= __supported_pte_mask; |
| 211 | set_pmd(pmd, __pmd(pe)); |
| 212 | } |
| 213 | } |
| 214 | return 0; |
| 215 | } |
| 216 | |
| 217 | static int set_up_temporary_mappings(void) |
| 218 | { |
| 219 | unsigned long start, end, next; |
| 220 | int error; |
| 221 | |
| 222 | temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); |
| 223 | if (!temp_level4_pgt) |
| 224 | return -ENOMEM; |
| 225 | |
| 226 | /* It is safe to reuse the original kernel mapping */ |
| 227 | set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), |
| 228 | init_level4_pgt[pgd_index(__START_KERNEL_map)]); |
| 229 | |
| 230 | /* Set up the direct mapping from scratch */ |
| 231 | start = (unsigned long)pfn_to_kaddr(0); |
| 232 | end = (unsigned long)pfn_to_kaddr(end_pfn); |
| 233 | |
| 234 | for (; start < end; start = next) { |
| 235 | pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC); |
| 236 | if (!pud) |
| 237 | return -ENOMEM; |
| 238 | next = start + PGDIR_SIZE; |
| 239 | if (next > end) |
| 240 | next = end; |
| 241 | if ((error = res_phys_pud_init(pud, __pa(start), __pa(next)))) |
| 242 | return error; |
| 243 | set_pgd(temp_level4_pgt + pgd_index(start), |
| 244 | mk_kernel_pgd(__pa(pud))); |
| 245 | } |
| 246 | return 0; |
| 247 | } |
| 248 | |
| 249 | int swsusp_arch_resume(void) |
| 250 | { |
| 251 | int error; |
| 252 | |
| 253 | /* We have got enough memory and from now on we cannot recover */ |
| 254 | if ((error = set_up_temporary_mappings())) |
| 255 | return error; |
| 256 | |
| 257 | relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); |
| 258 | if (!relocated_restore_code) |
| 259 | return -ENOMEM; |
| 260 | memcpy(relocated_restore_code, &core_restore_code, |
| 261 | &restore_registers - &core_restore_code); |
| 262 | |
| 263 | restore_image(); |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | /* |
| 268 | * pfn_is_nosave - check if given pfn is in the 'nosave' section |
| 269 | */ |
| 270 | |
| 271 | int pfn_is_nosave(unsigned long pfn) |
| 272 | { |
| 273 | unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; |
| 274 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; |
| 275 | return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); |
| 276 | } |
| 277 | |
| 278 | struct restore_data_record { |
| 279 | unsigned long jump_address; |
| 280 | unsigned long cr3; |
| 281 | unsigned long magic; |
| 282 | }; |
| 283 | |
| 284 | #define RESTORE_MAGIC 0x0123456789ABCDEFUL |
| 285 | |
| 286 | /** |
| 287 | * arch_hibernation_header_save - populate the architecture specific part |
| 288 | * of a hibernation image header |
| 289 | * @addr: address to save the data at |
| 290 | */ |
| 291 | int arch_hibernation_header_save(void *addr, unsigned int max_size) |
| 292 | { |
| 293 | struct restore_data_record *rdr = addr; |
| 294 | |
| 295 | if (max_size < sizeof(struct restore_data_record)) |
| 296 | return -EOVERFLOW; |
| 297 | rdr->jump_address = restore_jump_address; |
| 298 | rdr->cr3 = restore_cr3; |
| 299 | rdr->magic = RESTORE_MAGIC; |
| 300 | return 0; |
| 301 | } |
| 302 | |
| 303 | /** |
| 304 | * arch_hibernation_header_restore - read the architecture specific data |
| 305 | * from the hibernation image header |
| 306 | * @addr: address to read the data from |
| 307 | */ |
| 308 | int arch_hibernation_header_restore(void *addr) |
| 309 | { |
| 310 | struct restore_data_record *rdr = addr; |
| 311 | |
| 312 | restore_jump_address = rdr->jump_address; |
| 313 | restore_cr3 = rdr->cr3; |
| 314 | return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; |
| 315 | } |
| 316 | #endif /* CONFIG_HIBERNATION */ |