2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpufreq.h>
18 #include <asm/setup.h>
20 #include <asm/xen/hypervisor.h>
21 #include <asm/xen/hypercall.h>
25 #include <xen/interface/callback.h>
26 #include <xen/interface/memory.h>
27 #include <xen/interface/physdev.h>
28 #include <xen/features.h>
32 /* These are code, but not functions. Defined in entry.S */
33 extern const char xen_hypervisor_callback
[];
34 extern const char xen_failsafe_callback
[];
35 extern void xen_sysenter_target(void);
36 extern void xen_syscall_target(void);
37 extern void xen_syscall32_target(void);
39 /* Amount of extra memory space we add to the e820 ranges */
40 struct xen_memory_region xen_extra_mem
[XEN_EXTRA_MEM_MAX_REGIONS
] __initdata
;
42 /* Number of pages released from the initial allocation. */
43 unsigned long xen_released_pages
;
46 * The maximum amount of extra memory compared to the base size. The
47 * main scaling factor is the size of struct page. At extreme ratios
48 * of base:extra, all the base memory can be filled with page
49 * structures for the extra memory, leaving no space for anything
52 * 10x seems like a reasonable balance between scaling flexibility and
53 * leaving a practically usable system.
55 #define EXTRA_MEM_RATIO (10)
57 static void __init
xen_add_extra_mem(u64 start
, u64 size
)
62 for (i
= 0; i
< XEN_EXTRA_MEM_MAX_REGIONS
; i
++) {
64 if (xen_extra_mem
[i
].size
== 0) {
65 xen_extra_mem
[i
].start
= start
;
66 xen_extra_mem
[i
].size
= size
;
69 /* Append to existing region. */
70 if (xen_extra_mem
[i
].start
+ xen_extra_mem
[i
].size
== start
) {
71 xen_extra_mem
[i
].size
+= size
;
75 if (i
== XEN_EXTRA_MEM_MAX_REGIONS
)
76 printk(KERN_WARNING
"Warning: not enough extra memory regions\n");
78 memblock_reserve(start
, size
);
80 xen_max_p2m_pfn
= PFN_DOWN(start
+ size
);
82 for (pfn
= PFN_DOWN(start
); pfn
<= xen_max_p2m_pfn
; pfn
++)
83 __set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
86 static unsigned long __init
xen_do_chunk(unsigned long start
,
87 unsigned long end
, bool release
)
89 struct xen_memory_reservation reservation
= {
94 unsigned long len
= 0;
98 for (pfn
= start
; pfn
< end
; pfn
++) {
100 unsigned long mfn
= pfn_to_mfn(pfn
);
103 /* Make sure pfn exists to start with */
104 if (mfn
== INVALID_P2M_ENTRY
|| mfn_to_pfn(mfn
) != pfn
)
108 if (mfn
!= INVALID_P2M_ENTRY
)
112 set_xen_guest_handle(reservation
.extent_start
, &frame
);
113 reservation
.nr_extents
= 1;
115 ret
= HYPERVISOR_memory_op(release
? XENMEM_decrease_reservation
: XENMEM_populate_physmap
,
117 WARN(ret
!= 1, "Failed to %s pfn %lx err=%d\n",
118 release
? "release" : "populate", pfn
, ret
);
121 if (!early_set_phys_to_machine(pfn
, release
? INVALID_P2M_ENTRY
: frame
)) {
124 set_xen_guest_handle(reservation
.extent_start
, &frame
);
125 reservation
.nr_extents
= 1;
126 ret
= HYPERVISOR_memory_op(XENMEM_decrease_reservation
,
135 printk(KERN_INFO
"%s %lx-%lx pfn range: %lu pages %s\n",
136 release
? "Freeing" : "Populating",
138 release
? "freed" : "added");
143 static unsigned long __init
xen_release_chunk(unsigned long start
,
146 return xen_do_chunk(start
, end
, true);
149 static unsigned long __init
xen_populate_chunk(
150 const struct e820entry
*list
, size_t map_size
,
151 unsigned long max_pfn
, unsigned long *last_pfn
,
152 unsigned long credits_left
)
154 const struct e820entry
*entry
;
156 unsigned long done
= 0;
157 unsigned long dest_pfn
;
159 for (i
= 0, entry
= list
; i
< map_size
; i
++, entry
++) {
165 if (credits_left
<= 0)
168 if (entry
->type
!= E820_RAM
)
171 e_pfn
= PFN_DOWN(entry
->addr
+ entry
->size
);
173 /* We only care about E820 after the xen_start_info->nr_pages */
174 if (e_pfn
<= max_pfn
)
177 s_pfn
= PFN_UP(entry
->addr
);
178 /* If the E820 falls within the nr_pages, we want to start
179 * at the nr_pages PFN.
180 * If that would mean going past the E820 entry, skip it
182 if (s_pfn
<= max_pfn
) {
183 capacity
= e_pfn
- max_pfn
;
186 capacity
= e_pfn
- s_pfn
;
190 if (credits_left
< capacity
)
191 capacity
= credits_left
;
193 pfns
= xen_do_chunk(dest_pfn
, dest_pfn
+ capacity
, false);
195 *last_pfn
= (dest_pfn
+ pfns
);
198 credits_left
-= pfns
;
203 static void __init
xen_set_identity_and_release_chunk(
204 unsigned long start_pfn
, unsigned long end_pfn
, unsigned long nr_pages
,
205 unsigned long *released
, unsigned long *identity
)
210 * If the PFNs are currently mapped, the VA mapping also needs
211 * to be updated to be 1:1.
213 for (pfn
= start_pfn
; pfn
<= max_pfn_mapped
&& pfn
< end_pfn
; pfn
++)
214 (void)HYPERVISOR_update_va_mapping(
215 (unsigned long)__va(pfn
<< PAGE_SHIFT
),
216 mfn_pte(pfn
, PAGE_KERNEL_IO
), 0);
218 if (start_pfn
< nr_pages
)
219 *released
+= xen_release_chunk(
220 start_pfn
, min(end_pfn
, nr_pages
));
222 *identity
+= set_phys_range_identity(start_pfn
, end_pfn
);
225 static unsigned long __init
xen_set_identity_and_release(
226 const struct e820entry
*list
, size_t map_size
, unsigned long nr_pages
)
228 phys_addr_t start
= 0;
229 unsigned long released
= 0;
230 unsigned long identity
= 0;
231 const struct e820entry
*entry
;
235 * Combine non-RAM regions and gaps until a RAM region (or the
236 * end of the map) is reached, then set the 1:1 map and
237 * release the pages (if available) in those non-RAM regions.
239 * The combined non-RAM regions are rounded to a whole number
240 * of pages so any partial pages are accessible via the 1:1
241 * mapping. This is needed for some BIOSes that put (for
242 * example) the DMI tables in a reserved region that begins on
243 * a non-page boundary.
245 for (i
= 0, entry
= list
; i
< map_size
; i
++, entry
++) {
246 phys_addr_t end
= entry
->addr
+ entry
->size
;
247 if (entry
->type
== E820_RAM
|| i
== map_size
- 1) {
248 unsigned long start_pfn
= PFN_DOWN(start
);
249 unsigned long end_pfn
= PFN_UP(end
);
251 if (entry
->type
== E820_RAM
)
252 end_pfn
= PFN_UP(entry
->addr
);
254 if (start_pfn
< end_pfn
)
255 xen_set_identity_and_release_chunk(
256 start_pfn
, end_pfn
, nr_pages
,
257 &released
, &identity
);
264 printk(KERN_INFO
"Released %lu pages of unused memory\n", released
);
266 printk(KERN_INFO
"Set %ld page(s) to 1-1 mapping\n", identity
);
271 static unsigned long __init
xen_get_max_pages(void)
273 unsigned long max_pages
= MAX_DOMAIN_PAGES
;
274 domid_t domid
= DOMID_SELF
;
278 * For the initial domain we use the maximum reservation as
281 * For guest domains the current maximum reservation reflects
282 * the current maximum rather than the static maximum. In this
283 * case the e820 map provided to us will cover the static
286 if (xen_initial_domain()) {
287 ret
= HYPERVISOR_memory_op(XENMEM_maximum_reservation
, &domid
);
292 return min(max_pages
, MAX_DOMAIN_PAGES
);
295 static void xen_align_and_add_e820_region(u64 start
, u64 size
, int type
)
297 u64 end
= start
+ size
;
299 /* Align RAM regions to page boundaries. */
300 if (type
== E820_RAM
) {
301 start
= PAGE_ALIGN(start
);
302 end
&= ~((u64
)PAGE_SIZE
- 1);
305 e820_add_region(start
, end
- start
, type
);
309 * machine_specific_memory_setup - Hook for machine specific memory setup.
311 char * __init
xen_memory_setup(void)
313 static struct e820entry map
[E820MAX
] __initdata
;
315 unsigned long max_pfn
= xen_start_info
->nr_pages
;
316 unsigned long long mem_end
;
318 struct xen_memory_map memmap
;
319 unsigned long max_pages
;
320 unsigned long last_pfn
= 0;
321 unsigned long extra_pages
= 0;
322 unsigned long populated
;
326 max_pfn
= min(MAX_DOMAIN_PAGES
, max_pfn
);
327 mem_end
= PFN_PHYS(max_pfn
);
329 memmap
.nr_entries
= E820MAX
;
330 set_xen_guest_handle(memmap
.buffer
, map
);
332 op
= xen_initial_domain() ?
333 XENMEM_machine_memory_map
:
335 rc
= HYPERVISOR_memory_op(op
, &memmap
);
337 BUG_ON(xen_initial_domain());
338 memmap
.nr_entries
= 1;
340 map
[0].size
= mem_end
;
341 /* 8MB slack (to balance backend allocations). */
342 map
[0].size
+= 8ULL << 20;
343 map
[0].type
= E820_RAM
;
348 /* Make sure the Xen-supplied memory map is well-ordered. */
349 sanitize_e820_map(map
, memmap
.nr_entries
, &memmap
.nr_entries
);
351 max_pages
= xen_get_max_pages();
352 if (max_pages
> max_pfn
)
353 extra_pages
+= max_pages
- max_pfn
;
356 * Set P2M for all non-RAM pages and E820 gaps to be identity
357 * type PFNs. Any RAM pages that would be made inaccesible by
358 * this are first released.
360 xen_released_pages
= xen_set_identity_and_release(
361 map
, memmap
.nr_entries
, max_pfn
);
364 * Populate back the non-RAM pages and E820 gaps that had been
366 populated
= xen_populate_chunk(map
, memmap
.nr_entries
,
367 max_pfn
, &last_pfn
, xen_released_pages
);
369 xen_released_pages
-= populated
;
370 extra_pages
+= xen_released_pages
;
372 if (last_pfn
> max_pfn
) {
373 max_pfn
= min(MAX_DOMAIN_PAGES
, last_pfn
);
374 mem_end
= PFN_PHYS(max_pfn
);
377 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
378 * factor the base size. On non-highmem systems, the base
379 * size is the full initial memory allocation; on highmem it
380 * is limited to the max size of lowmem, so that it doesn't
381 * get completely filled.
383 * In principle there could be a problem in lowmem systems if
384 * the initial memory is also very large with respect to
385 * lowmem, but we won't try to deal with that here.
387 extra_pages
= min(EXTRA_MEM_RATIO
* min(max_pfn
, PFN_DOWN(MAXMEM
)),
390 while (i
< memmap
.nr_entries
) {
391 u64 addr
= map
[i
].addr
;
392 u64 size
= map
[i
].size
;
393 u32 type
= map
[i
].type
;
395 if (type
== E820_RAM
) {
396 if (addr
< mem_end
) {
397 size
= min(size
, mem_end
- addr
);
398 } else if (extra_pages
) {
399 size
= min(size
, (u64
)extra_pages
* PAGE_SIZE
);
400 extra_pages
-= size
/ PAGE_SIZE
;
401 xen_add_extra_mem(addr
, size
);
403 type
= E820_UNUSABLE
;
406 xen_align_and_add_e820_region(addr
, size
, type
);
410 if (map
[i
].size
== 0)
415 * In domU, the ISA region is normal, usable memory, but we
416 * reserve ISA memory anyway because too many things poke
419 e820_add_region(ISA_START_ADDRESS
, ISA_END_ADDRESS
- ISA_START_ADDRESS
,
426 * See comment above "struct start_info" in <xen/interface/xen.h>
428 memblock_reserve(__pa(xen_start_info
->mfn_list
),
429 xen_start_info
->pt_base
- xen_start_info
->mfn_list
);
431 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
437 * Set the bit indicating "nosegneg" library variants should be used.
438 * We only need to bother in pure 32-bit mode; compat 32-bit processes
439 * can have un-truncated segments, so wrapping around is allowed.
441 static void __init
fiddle_vdso(void)
445 mask
= VDSO32_SYMBOL(&vdso32_int80_start
, NOTE_MASK
);
446 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
447 mask
= VDSO32_SYMBOL(&vdso32_sysenter_start
, NOTE_MASK
);
448 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
452 static int __cpuinit
register_callback(unsigned type
, const void *func
)
454 struct callback_register callback
= {
456 .address
= XEN_CALLBACK(__KERNEL_CS
, func
),
457 .flags
= CALLBACKF_mask_events
,
460 return HYPERVISOR_callback_op(CALLBACKOP_register
, &callback
);
463 void __cpuinit
xen_enable_sysenter(void)
466 unsigned sysenter_feature
;
469 sysenter_feature
= X86_FEATURE_SEP
;
471 sysenter_feature
= X86_FEATURE_SYSENTER32
;
474 if (!boot_cpu_has(sysenter_feature
))
477 ret
= register_callback(CALLBACKTYPE_sysenter
, xen_sysenter_target
);
479 setup_clear_cpu_cap(sysenter_feature
);
482 void __cpuinit
xen_enable_syscall(void)
487 ret
= register_callback(CALLBACKTYPE_syscall
, xen_syscall_target
);
489 printk(KERN_ERR
"Failed to set syscall callback: %d\n", ret
);
490 /* Pretty fatal; 64-bit userspace has no other
491 mechanism for syscalls. */
494 if (boot_cpu_has(X86_FEATURE_SYSCALL32
)) {
495 ret
= register_callback(CALLBACKTYPE_syscall32
,
496 xen_syscall32_target
);
498 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32
);
500 #endif /* CONFIG_X86_64 */
503 void __init
xen_arch_setup(void)
505 xen_panic_handler_init();
507 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_4gb_segments
);
508 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_writable_pagetables
);
510 if (!xen_feature(XENFEAT_auto_translated_physmap
))
511 HYPERVISOR_vm_assist(VMASST_CMD_enable
,
512 VMASST_TYPE_pae_extended_cr3
);
514 if (register_callback(CALLBACKTYPE_event
, xen_hypervisor_callback
) ||
515 register_callback(CALLBACKTYPE_failsafe
, xen_failsafe_callback
))
518 xen_enable_sysenter();
519 xen_enable_syscall();
522 if (!(xen_start_info
->flags
& SIF_INITDOMAIN
)) {
523 printk(KERN_INFO
"ACPI in unprivileged domain disabled\n");
528 memcpy(boot_command_line
, xen_start_info
->cmd_line
,
529 MAX_GUEST_CMDLINE
> COMMAND_LINE_SIZE
?
530 COMMAND_LINE_SIZE
: MAX_GUEST_CMDLINE
);
532 /* Set up idle, making sure it calls safe_halt() pvop */
534 boot_cpu_data
.hlt_works_ok
= 1;
538 WARN_ON(set_pm_idle_to_default());