2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpufreq.h>
18 #include <asm/setup.h>
20 #include <asm/xen/hypervisor.h>
21 #include <asm/xen/hypercall.h>
25 #include <xen/interface/callback.h>
26 #include <xen/interface/memory.h>
27 #include <xen/interface/physdev.h>
28 #include <xen/features.h>
32 /* These are code, but not functions. Defined in entry.S */
33 extern const char xen_hypervisor_callback
[];
34 extern const char xen_failsafe_callback
[];
35 extern void xen_sysenter_target(void);
36 extern void xen_syscall_target(void);
37 extern void xen_syscall32_target(void);
39 /* Amount of extra memory space we add to the e820 ranges */
40 struct xen_memory_region xen_extra_mem
[XEN_EXTRA_MEM_MAX_REGIONS
] __initdata
;
42 /* Number of pages released from the initial allocation. */
43 unsigned long xen_released_pages
;
46 * The maximum amount of extra memory compared to the base size. The
47 * main scaling factor is the size of struct page. At extreme ratios
48 * of base:extra, all the base memory can be filled with page
49 * structures for the extra memory, leaving no space for anything
52 * 10x seems like a reasonable balance between scaling flexibility and
53 * leaving a practically usable system.
55 #define EXTRA_MEM_RATIO (10)
57 static void __init
xen_add_extra_mem(u64 start
, u64 size
)
62 for (i
= 0; i
< XEN_EXTRA_MEM_MAX_REGIONS
; i
++) {
64 if (xen_extra_mem
[i
].size
== 0) {
65 xen_extra_mem
[i
].start
= start
;
66 xen_extra_mem
[i
].size
= size
;
69 /* Append to existing region. */
70 if (xen_extra_mem
[i
].start
+ xen_extra_mem
[i
].size
== start
) {
71 xen_extra_mem
[i
].size
+= size
;
75 if (i
== XEN_EXTRA_MEM_MAX_REGIONS
)
76 printk(KERN_WARNING
"Warning: not enough extra memory regions\n");
78 memblock_reserve(start
, size
);
80 xen_max_p2m_pfn
= PFN_DOWN(start
+ size
);
82 for (pfn
= PFN_DOWN(start
); pfn
<= xen_max_p2m_pfn
; pfn
++)
83 __set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
86 static unsigned long __init
xen_do_chunk(unsigned long start
,
87 unsigned long end
, bool release
)
89 struct xen_memory_reservation reservation
= {
94 unsigned long len
= 0;
98 for (pfn
= start
; pfn
< end
; pfn
++) {
100 unsigned long mfn
= pfn_to_mfn(pfn
);
103 /* Make sure pfn exists to start with */
104 if (mfn
== INVALID_P2M_ENTRY
|| mfn_to_pfn(mfn
) != pfn
)
108 if (mfn
!= INVALID_P2M_ENTRY
)
112 set_xen_guest_handle(reservation
.extent_start
, &frame
);
113 reservation
.nr_extents
= 1;
115 ret
= HYPERVISOR_memory_op(release
? XENMEM_decrease_reservation
: XENMEM_populate_physmap
,
117 WARN(ret
!= 1, "Failed to %s pfn %lx err=%d\n",
118 release
? "release" : "populate", pfn
, ret
);
121 if (!early_set_phys_to_machine(pfn
, release
? INVALID_P2M_ENTRY
: frame
)) {
124 set_xen_guest_handle(reservation
.extent_start
, &frame
);
125 reservation
.nr_extents
= 1;
126 ret
= HYPERVISOR_memory_op(XENMEM_decrease_reservation
,
135 printk(KERN_INFO
"%s %lx-%lx pfn range: %lu pages %s\n",
136 release
? "Freeing" : "Populating",
138 release
? "freed" : "added");
143 static unsigned long __init
xen_release_chunk(unsigned long start
,
146 return xen_do_chunk(start
, end
, true);
149 static unsigned long __init
xen_populate_chunk(
150 const struct e820entry
*list
, size_t map_size
,
151 unsigned long max_pfn
, unsigned long *last_pfn
,
152 unsigned long credits_left
)
154 const struct e820entry
*entry
;
156 unsigned long done
= 0;
157 unsigned long dest_pfn
;
159 for (i
= 0, entry
= list
; i
< map_size
; i
++, entry
++) {
160 unsigned long credits
= credits_left
;
169 if (entry
->type
!= E820_RAM
)
172 e_pfn
= PFN_UP(entry
->addr
+ entry
->size
);
174 /* We only care about E820 after the xen_start_info->nr_pages */
175 if (e_pfn
<= max_pfn
)
178 s_pfn
= PFN_DOWN(entry
->addr
);
179 /* If the E820 falls within the nr_pages, we want to start
180 * at the nr_pages PFN.
181 * If that would mean going past the E820 entry, skip it
183 if (s_pfn
<= max_pfn
) {
184 capacity
= e_pfn
- max_pfn
;
187 /* last_pfn MUST be within E820_RAM regions */
188 if (*last_pfn
&& e_pfn
>= *last_pfn
)
190 capacity
= e_pfn
- s_pfn
;
193 /* If we had filled this E820_RAM entry, go to the next one. */
197 if (credits
> capacity
)
200 pfns
= xen_do_chunk(dest_pfn
, dest_pfn
+ credits
, false);
202 credits_left
-= pfns
;
203 *last_pfn
= (dest_pfn
+ pfns
);
208 static void __init
xen_set_identity_and_release_chunk(
209 unsigned long start_pfn
, unsigned long end_pfn
, unsigned long nr_pages
,
210 unsigned long *released
, unsigned long *identity
)
215 * If the PFNs are currently mapped, the VA mapping also needs
216 * to be updated to be 1:1.
218 for (pfn
= start_pfn
; pfn
<= max_pfn_mapped
&& pfn
< end_pfn
; pfn
++)
219 (void)HYPERVISOR_update_va_mapping(
220 (unsigned long)__va(pfn
<< PAGE_SHIFT
),
221 mfn_pte(pfn
, PAGE_KERNEL_IO
), 0);
223 if (start_pfn
< nr_pages
)
224 *released
+= xen_release_chunk(
225 start_pfn
, min(end_pfn
, nr_pages
));
227 *identity
+= set_phys_range_identity(start_pfn
, end_pfn
);
230 static unsigned long __init
xen_set_identity_and_release(
231 const struct e820entry
*list
, size_t map_size
, unsigned long nr_pages
)
233 phys_addr_t start
= 0;
234 unsigned long released
= 0;
235 unsigned long identity
= 0;
236 const struct e820entry
*entry
;
240 * Combine non-RAM regions and gaps until a RAM region (or the
241 * end of the map) is reached, then set the 1:1 map and
242 * release the pages (if available) in those non-RAM regions.
244 * The combined non-RAM regions are rounded to a whole number
245 * of pages so any partial pages are accessible via the 1:1
246 * mapping. This is needed for some BIOSes that put (for
247 * example) the DMI tables in a reserved region that begins on
248 * a non-page boundary.
250 for (i
= 0, entry
= list
; i
< map_size
; i
++, entry
++) {
251 phys_addr_t end
= entry
->addr
+ entry
->size
;
252 if (entry
->type
== E820_RAM
|| i
== map_size
- 1) {
253 unsigned long start_pfn
= PFN_DOWN(start
);
254 unsigned long end_pfn
= PFN_UP(end
);
256 if (entry
->type
== E820_RAM
)
257 end_pfn
= PFN_UP(entry
->addr
);
259 if (start_pfn
< end_pfn
)
260 xen_set_identity_and_release_chunk(
261 start_pfn
, end_pfn
, nr_pages
,
262 &released
, &identity
);
269 printk(KERN_INFO
"Released %lu pages of unused memory\n", released
);
271 printk(KERN_INFO
"Set %ld page(s) to 1-1 mapping\n", identity
);
276 static unsigned long __init
xen_get_max_pages(void)
278 unsigned long max_pages
= MAX_DOMAIN_PAGES
;
279 domid_t domid
= DOMID_SELF
;
283 * For the initial domain we use the maximum reservation as
286 * For guest domains the current maximum reservation reflects
287 * the current maximum rather than the static maximum. In this
288 * case the e820 map provided to us will cover the static
291 if (xen_initial_domain()) {
292 ret
= HYPERVISOR_memory_op(XENMEM_maximum_reservation
, &domid
);
297 return min(max_pages
, MAX_DOMAIN_PAGES
);
300 static void xen_align_and_add_e820_region(u64 start
, u64 size
, int type
)
302 u64 end
= start
+ size
;
304 /* Align RAM regions to page boundaries. */
305 if (type
== E820_RAM
) {
306 start
= PAGE_ALIGN(start
);
307 end
&= ~((u64
)PAGE_SIZE
- 1);
310 e820_add_region(start
, end
- start
, type
);
314 * machine_specific_memory_setup - Hook for machine specific memory setup.
316 char * __init
xen_memory_setup(void)
318 static struct e820entry map
[E820MAX
] __initdata
;
320 unsigned long max_pfn
= xen_start_info
->nr_pages
;
321 unsigned long long mem_end
;
323 struct xen_memory_map memmap
;
324 unsigned long max_pages
;
325 unsigned long last_pfn
= 0;
326 unsigned long extra_pages
= 0;
327 unsigned long populated
;
331 max_pfn
= min(MAX_DOMAIN_PAGES
, max_pfn
);
332 mem_end
= PFN_PHYS(max_pfn
);
334 memmap
.nr_entries
= E820MAX
;
335 set_xen_guest_handle(memmap
.buffer
, map
);
337 op
= xen_initial_domain() ?
338 XENMEM_machine_memory_map
:
340 rc
= HYPERVISOR_memory_op(op
, &memmap
);
342 BUG_ON(xen_initial_domain());
343 memmap
.nr_entries
= 1;
345 map
[0].size
= mem_end
;
346 /* 8MB slack (to balance backend allocations). */
347 map
[0].size
+= 8ULL << 20;
348 map
[0].type
= E820_RAM
;
353 /* Make sure the Xen-supplied memory map is well-ordered. */
354 sanitize_e820_map(map
, memmap
.nr_entries
, &memmap
.nr_entries
);
356 max_pages
= xen_get_max_pages();
357 if (max_pages
> max_pfn
)
358 extra_pages
+= max_pages
- max_pfn
;
361 * Set P2M for all non-RAM pages and E820 gaps to be identity
362 * type PFNs. Any RAM pages that would be made inaccesible by
363 * this are first released.
365 xen_released_pages
= xen_set_identity_and_release(
366 map
, memmap
.nr_entries
, max_pfn
);
369 * Populate back the non-RAM pages and E820 gaps that had been
371 populated
= xen_populate_chunk(map
, memmap
.nr_entries
,
372 max_pfn
, &last_pfn
, xen_released_pages
);
374 extra_pages
+= (xen_released_pages
- populated
);
376 if (last_pfn
> max_pfn
) {
377 max_pfn
= min(MAX_DOMAIN_PAGES
, last_pfn
);
378 mem_end
= PFN_PHYS(max_pfn
);
381 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
382 * factor the base size. On non-highmem systems, the base
383 * size is the full initial memory allocation; on highmem it
384 * is limited to the max size of lowmem, so that it doesn't
385 * get completely filled.
387 * In principle there could be a problem in lowmem systems if
388 * the initial memory is also very large with respect to
389 * lowmem, but we won't try to deal with that here.
391 extra_pages
= min(EXTRA_MEM_RATIO
* min(max_pfn
, PFN_DOWN(MAXMEM
)),
394 while (i
< memmap
.nr_entries
) {
395 u64 addr
= map
[i
].addr
;
396 u64 size
= map
[i
].size
;
397 u32 type
= map
[i
].type
;
399 if (type
== E820_RAM
) {
400 if (addr
< mem_end
) {
401 size
= min(size
, mem_end
- addr
);
402 } else if (extra_pages
) {
403 size
= min(size
, (u64
)extra_pages
* PAGE_SIZE
);
404 extra_pages
-= size
/ PAGE_SIZE
;
405 xen_add_extra_mem(addr
, size
);
407 type
= E820_UNUSABLE
;
410 xen_align_and_add_e820_region(addr
, size
, type
);
414 if (map
[i
].size
== 0)
419 * In domU, the ISA region is normal, usable memory, but we
420 * reserve ISA memory anyway because too many things poke
423 e820_add_region(ISA_START_ADDRESS
, ISA_END_ADDRESS
- ISA_START_ADDRESS
,
430 * See comment above "struct start_info" in <xen/interface/xen.h>
432 memblock_reserve(__pa(xen_start_info
->mfn_list
),
433 xen_start_info
->pt_base
- xen_start_info
->mfn_list
);
435 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
441 * Set the bit indicating "nosegneg" library variants should be used.
442 * We only need to bother in pure 32-bit mode; compat 32-bit processes
443 * can have un-truncated segments, so wrapping around is allowed.
445 static void __init
fiddle_vdso(void)
449 mask
= VDSO32_SYMBOL(&vdso32_int80_start
, NOTE_MASK
);
450 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
451 mask
= VDSO32_SYMBOL(&vdso32_sysenter_start
, NOTE_MASK
);
452 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
456 static int __cpuinit
register_callback(unsigned type
, const void *func
)
458 struct callback_register callback
= {
460 .address
= XEN_CALLBACK(__KERNEL_CS
, func
),
461 .flags
= CALLBACKF_mask_events
,
464 return HYPERVISOR_callback_op(CALLBACKOP_register
, &callback
);
467 void __cpuinit
xen_enable_sysenter(void)
470 unsigned sysenter_feature
;
473 sysenter_feature
= X86_FEATURE_SEP
;
475 sysenter_feature
= X86_FEATURE_SYSENTER32
;
478 if (!boot_cpu_has(sysenter_feature
))
481 ret
= register_callback(CALLBACKTYPE_sysenter
, xen_sysenter_target
);
483 setup_clear_cpu_cap(sysenter_feature
);
486 void __cpuinit
xen_enable_syscall(void)
491 ret
= register_callback(CALLBACKTYPE_syscall
, xen_syscall_target
);
493 printk(KERN_ERR
"Failed to set syscall callback: %d\n", ret
);
494 /* Pretty fatal; 64-bit userspace has no other
495 mechanism for syscalls. */
498 if (boot_cpu_has(X86_FEATURE_SYSCALL32
)) {
499 ret
= register_callback(CALLBACKTYPE_syscall32
,
500 xen_syscall32_target
);
502 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32
);
504 #endif /* CONFIG_X86_64 */
507 void __init
xen_arch_setup(void)
509 xen_panic_handler_init();
511 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_4gb_segments
);
512 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_writable_pagetables
);
514 if (!xen_feature(XENFEAT_auto_translated_physmap
))
515 HYPERVISOR_vm_assist(VMASST_CMD_enable
,
516 VMASST_TYPE_pae_extended_cr3
);
518 if (register_callback(CALLBACKTYPE_event
, xen_hypervisor_callback
) ||
519 register_callback(CALLBACKTYPE_failsafe
, xen_failsafe_callback
))
522 xen_enable_sysenter();
523 xen_enable_syscall();
526 if (!(xen_start_info
->flags
& SIF_INITDOMAIN
)) {
527 printk(KERN_INFO
"ACPI in unprivileged domain disabled\n");
532 memcpy(boot_command_line
, xen_start_info
->cmd_line
,
533 MAX_GUEST_CMDLINE
> COMMAND_LINE_SIZE
?
534 COMMAND_LINE_SIZE
: MAX_GUEST_CMDLINE
);
536 /* Set up idle, making sure it calls safe_halt() pvop */
538 boot_cpu_data
.hlt_works_ok
= 1;
542 WARN_ON(set_pm_idle_to_default());