2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpufreq.h>
18 #include <asm/setup.h>
21 #include <asm/xen/hypervisor.h>
22 #include <asm/xen/hypercall.h>
26 #include <xen/interface/callback.h>
27 #include <xen/interface/memory.h>
28 #include <xen/interface/physdev.h>
29 #include <xen/features.h>
30 #include <xen/hvc-console.h>
35 #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
37 /* Amount of extra memory space we add to the e820 ranges */
38 struct xen_memory_region xen_extra_mem
[XEN_EXTRA_MEM_MAX_REGIONS
] __initdata
;
40 /* Number of pages released from the initial allocation. */
41 unsigned long xen_released_pages
;
43 /* E820 map used during setting up memory. */
44 static struct e820entry xen_e820_map
[E820MAX
] __initdata
;
45 static u32 xen_e820_map_entries __initdata
;
48 * Buffer used to remap identity mapped pages. We only need the virtual space.
49 * The physical page behind this address is remapped as needed to different
52 #define REMAP_SIZE (P2M_PER_PAGE - 3)
54 unsigned long next_area_mfn
;
55 unsigned long target_pfn
;
57 unsigned long mfns
[REMAP_SIZE
];
58 } xen_remap_buf __initdata
__aligned(PAGE_SIZE
);
59 static unsigned long xen_remap_mfn __initdata
= INVALID_P2M_ENTRY
;
62 * The maximum amount of extra memory compared to the base size. The
63 * main scaling factor is the size of struct page. At extreme ratios
64 * of base:extra, all the base memory can be filled with page
65 * structures for the extra memory, leaving no space for anything
68 * 10x seems like a reasonable balance between scaling flexibility and
69 * leaving a practically usable system.
71 #define EXTRA_MEM_RATIO (10)
73 static bool xen_512gb_limit __initdata
= IS_ENABLED(CONFIG_XEN_512GB
);
75 static void __init
xen_parse_512gb(void)
80 arg
= strstr(xen_start_info
->cmd_line
, "xen_512gb_limit");
84 arg
= strstr(xen_start_info
->cmd_line
, "xen_512gb_limit=");
87 else if (strtobool(arg
+ strlen("xen_512gb_limit="), &val
))
90 xen_512gb_limit
= val
;
93 static void __init
xen_add_extra_mem(phys_addr_t start
, phys_addr_t size
)
97 for (i
= 0; i
< XEN_EXTRA_MEM_MAX_REGIONS
; i
++) {
99 if (xen_extra_mem
[i
].size
== 0) {
100 xen_extra_mem
[i
].start
= start
;
101 xen_extra_mem
[i
].size
= size
;
104 /* Append to existing region. */
105 if (xen_extra_mem
[i
].start
+ xen_extra_mem
[i
].size
== start
) {
106 xen_extra_mem
[i
].size
+= size
;
110 if (i
== XEN_EXTRA_MEM_MAX_REGIONS
)
111 printk(KERN_WARNING
"Warning: not enough extra memory regions\n");
113 memblock_reserve(start
, size
);
116 static void __init
xen_del_extra_mem(phys_addr_t start
, phys_addr_t size
)
119 phys_addr_t start_r
, size_r
;
121 for (i
= 0; i
< XEN_EXTRA_MEM_MAX_REGIONS
; i
++) {
122 start_r
= xen_extra_mem
[i
].start
;
123 size_r
= xen_extra_mem
[i
].size
;
125 /* Start of region. */
126 if (start_r
== start
) {
127 BUG_ON(size
> size_r
);
128 xen_extra_mem
[i
].start
+= size
;
129 xen_extra_mem
[i
].size
-= size
;
133 if (start_r
+ size_r
== start
+ size
) {
134 BUG_ON(size
> size_r
);
135 xen_extra_mem
[i
].size
-= size
;
139 if (start
> start_r
&& start
< start_r
+ size_r
) {
140 BUG_ON(start
+ size
> start_r
+ size_r
);
141 xen_extra_mem
[i
].size
= start
- start_r
;
142 /* Calling memblock_reserve() again is okay. */
143 xen_add_extra_mem(start
+ size
, start_r
+ size_r
-
148 memblock_free(start
, size
);
152 * Called during boot before the p2m list can take entries beyond the
153 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
156 unsigned long __ref
xen_chk_extra_mem(unsigned long pfn
)
159 phys_addr_t addr
= PFN_PHYS(pfn
);
161 for (i
= 0; i
< XEN_EXTRA_MEM_MAX_REGIONS
; i
++) {
162 if (addr
>= xen_extra_mem
[i
].start
&&
163 addr
< xen_extra_mem
[i
].start
+ xen_extra_mem
[i
].size
)
164 return INVALID_P2M_ENTRY
;
167 return IDENTITY_FRAME(pfn
);
171 * Mark all pfns of extra mem as invalid in p2m list.
173 void __init
xen_inv_extra_mem(void)
175 unsigned long pfn
, pfn_s
, pfn_e
;
178 for (i
= 0; i
< XEN_EXTRA_MEM_MAX_REGIONS
; i
++) {
179 if (!xen_extra_mem
[i
].size
)
181 pfn_s
= PFN_DOWN(xen_extra_mem
[i
].start
);
182 pfn_e
= PFN_UP(xen_extra_mem
[i
].start
+ xen_extra_mem
[i
].size
);
183 for (pfn
= pfn_s
; pfn
< pfn_e
; pfn
++)
184 set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
189 * Finds the next RAM pfn available in the E820 map after min_pfn.
190 * This function updates min_pfn with the pfn found and returns
191 * the size of that range or zero if not found.
193 static unsigned long __init
xen_find_pfn_range(unsigned long *min_pfn
)
195 const struct e820entry
*entry
= xen_e820_map
;
197 unsigned long done
= 0;
199 for (i
= 0; i
< xen_e820_map_entries
; i
++, entry
++) {
203 if (entry
->type
!= E820_RAM
)
206 e_pfn
= PFN_DOWN(entry
->addr
+ entry
->size
);
208 /* We only care about E820 after this */
209 if (e_pfn
< *min_pfn
)
212 s_pfn
= PFN_UP(entry
->addr
);
214 /* If min_pfn falls within the E820 entry, we want to start
215 * at the min_pfn PFN.
217 if (s_pfn
<= *min_pfn
) {
218 done
= e_pfn
- *min_pfn
;
220 done
= e_pfn
- s_pfn
;
229 static int __init
xen_free_mfn(unsigned long mfn
)
231 struct xen_memory_reservation reservation
= {
237 set_xen_guest_handle(reservation
.extent_start
, &mfn
);
238 reservation
.nr_extents
= 1;
240 return HYPERVISOR_memory_op(XENMEM_decrease_reservation
, &reservation
);
244 * This releases a chunk of memory and then does the identity map. It's used
245 * as a fallback if the remapping fails.
247 static void __init
xen_set_identity_and_release_chunk(unsigned long start_pfn
,
248 unsigned long end_pfn
, unsigned long nr_pages
)
250 unsigned long pfn
, end
;
253 WARN_ON(start_pfn
> end_pfn
);
255 /* Release pages first. */
256 end
= min(end_pfn
, nr_pages
);
257 for (pfn
= start_pfn
; pfn
< end
; pfn
++) {
258 unsigned long mfn
= pfn_to_mfn(pfn
);
260 /* Make sure pfn exists to start with */
261 if (mfn
== INVALID_P2M_ENTRY
|| mfn_to_pfn(mfn
) != pfn
)
264 ret
= xen_free_mfn(mfn
);
265 WARN(ret
!= 1, "Failed to release pfn %lx err=%d\n", pfn
, ret
);
268 xen_released_pages
++;
269 if (!__set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
))
275 set_phys_range_identity(start_pfn
, end_pfn
);
279 * Helper function to update the p2m and m2p tables and kernel mapping.
281 static void __init
xen_update_mem_tables(unsigned long pfn
, unsigned long mfn
)
283 struct mmu_update update
= {
284 .ptr
= ((uint64_t)mfn
<< PAGE_SHIFT
) | MMU_MACHPHYS_UPDATE
,
289 if (!set_phys_to_machine(pfn
, mfn
)) {
290 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
296 if (HYPERVISOR_mmu_update(&update
, 1, NULL
, DOMID_SELF
) < 0) {
297 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
302 /* Update kernel mapping, but not for highmem. */
303 if (pfn
>= PFN_UP(__pa(high_memory
- 1)))
306 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn
<< PAGE_SHIFT
),
307 mfn_pte(mfn
, PAGE_KERNEL
), 0)) {
308 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
315 * This function updates the p2m and m2p tables with an identity map from
316 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
317 * original allocation at remap_pfn. The information needed for remapping is
318 * saved in the memory itself to avoid the need for allocating buffers. The
319 * complete remap information is contained in a list of MFNs each containing
320 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
321 * This enables us to preserve the original mfn sequence while doing the
322 * remapping at a time when the memory management is capable of allocating
323 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
326 static void __init
xen_do_set_identity_and_remap_chunk(
327 unsigned long start_pfn
, unsigned long size
, unsigned long remap_pfn
)
329 unsigned long buf
= (unsigned long)&xen_remap_buf
;
330 unsigned long mfn_save
, mfn
;
331 unsigned long ident_pfn_iter
, remap_pfn_iter
;
332 unsigned long ident_end_pfn
= start_pfn
+ size
;
333 unsigned long left
= size
;
334 unsigned int i
, chunk
;
338 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap
));
340 mfn_save
= virt_to_mfn(buf
);
342 for (ident_pfn_iter
= start_pfn
, remap_pfn_iter
= remap_pfn
;
343 ident_pfn_iter
< ident_end_pfn
;
344 ident_pfn_iter
+= REMAP_SIZE
, remap_pfn_iter
+= REMAP_SIZE
) {
345 chunk
= (left
< REMAP_SIZE
) ? left
: REMAP_SIZE
;
347 /* Map first pfn to xen_remap_buf */
348 mfn
= pfn_to_mfn(ident_pfn_iter
);
349 set_pte_mfn(buf
, mfn
, PAGE_KERNEL
);
351 /* Save mapping information in page */
352 xen_remap_buf
.next_area_mfn
= xen_remap_mfn
;
353 xen_remap_buf
.target_pfn
= remap_pfn_iter
;
354 xen_remap_buf
.size
= chunk
;
355 for (i
= 0; i
< chunk
; i
++)
356 xen_remap_buf
.mfns
[i
] = pfn_to_mfn(ident_pfn_iter
+ i
);
358 /* Put remap buf into list. */
361 /* Set identity map */
362 set_phys_range_identity(ident_pfn_iter
, ident_pfn_iter
+ chunk
);
367 /* Restore old xen_remap_buf mapping */
368 set_pte_mfn(buf
, mfn_save
, PAGE_KERNEL
);
372 * This function takes a contiguous pfn range that needs to be identity mapped
375 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
376 * 2) Calls the do_ function to actually do the mapping/remapping work.
378 * The goal is to not allocate additional memory but to remap the existing
379 * pages. In the case of an error the underlying memory is simply released back
380 * to Xen and not remapped.
382 static unsigned long __init
xen_set_identity_and_remap_chunk(
383 unsigned long start_pfn
, unsigned long end_pfn
, unsigned long nr_pages
,
384 unsigned long remap_pfn
)
388 unsigned long n
= end_pfn
- start_pfn
;
391 unsigned long cur_pfn
= start_pfn
+ i
;
392 unsigned long left
= n
- i
;
393 unsigned long size
= left
;
394 unsigned long remap_range_size
;
396 /* Do not remap pages beyond the current allocation */
397 if (cur_pfn
>= nr_pages
) {
398 /* Identity map remaining pages */
399 set_phys_range_identity(cur_pfn
, cur_pfn
+ size
);
402 if (cur_pfn
+ size
> nr_pages
)
403 size
= nr_pages
- cur_pfn
;
405 remap_range_size
= xen_find_pfn_range(&remap_pfn
);
406 if (!remap_range_size
) {
407 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
408 xen_set_identity_and_release_chunk(cur_pfn
,
409 cur_pfn
+ left
, nr_pages
);
412 /* Adjust size to fit in current e820 RAM region */
413 if (size
> remap_range_size
)
414 size
= remap_range_size
;
416 xen_do_set_identity_and_remap_chunk(cur_pfn
, size
, remap_pfn
);
418 /* Update variables to reflect new mappings. */
424 * If the PFNs are currently mapped, the VA mapping also needs
425 * to be updated to be 1:1.
427 for (pfn
= start_pfn
; pfn
<= max_pfn_mapped
&& pfn
< end_pfn
; pfn
++)
428 (void)HYPERVISOR_update_va_mapping(
429 (unsigned long)__va(pfn
<< PAGE_SHIFT
),
430 mfn_pte(pfn
, PAGE_KERNEL_IO
), 0);
435 static void __init
xen_set_identity_and_remap(unsigned long nr_pages
)
437 phys_addr_t start
= 0;
438 unsigned long last_pfn
= nr_pages
;
439 const struct e820entry
*entry
= xen_e820_map
;
443 * Combine non-RAM regions and gaps until a RAM region (or the
444 * end of the map) is reached, then set the 1:1 map and
445 * remap the memory in those non-RAM regions.
447 * The combined non-RAM regions are rounded to a whole number
448 * of pages so any partial pages are accessible via the 1:1
449 * mapping. This is needed for some BIOSes that put (for
450 * example) the DMI tables in a reserved region that begins on
451 * a non-page boundary.
453 for (i
= 0; i
< xen_e820_map_entries
; i
++, entry
++) {
454 phys_addr_t end
= entry
->addr
+ entry
->size
;
455 if (entry
->type
== E820_RAM
|| i
== xen_e820_map_entries
- 1) {
456 unsigned long start_pfn
= PFN_DOWN(start
);
457 unsigned long end_pfn
= PFN_UP(end
);
459 if (entry
->type
== E820_RAM
)
460 end_pfn
= PFN_UP(entry
->addr
);
462 if (start_pfn
< end_pfn
)
463 last_pfn
= xen_set_identity_and_remap_chunk(
464 start_pfn
, end_pfn
, nr_pages
,
470 pr_info("Released %ld page(s)\n", xen_released_pages
);
474 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
475 * The remap information (which mfn remap to which pfn) is contained in the
476 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
477 * This scheme allows to remap the different chunks in arbitrary order while
478 * the resulting mapping will be independant from the order.
480 void __init
xen_remap_memory(void)
482 unsigned long buf
= (unsigned long)&xen_remap_buf
;
483 unsigned long mfn_save
, mfn
, pfn
;
484 unsigned long remapped
= 0;
486 unsigned long pfn_s
= ~0UL;
487 unsigned long len
= 0;
489 mfn_save
= virt_to_mfn(buf
);
491 while (xen_remap_mfn
!= INVALID_P2M_ENTRY
) {
492 /* Map the remap information */
493 set_pte_mfn(buf
, xen_remap_mfn
, PAGE_KERNEL
);
495 BUG_ON(xen_remap_mfn
!= xen_remap_buf
.mfns
[0]);
497 pfn
= xen_remap_buf
.target_pfn
;
498 for (i
= 0; i
< xen_remap_buf
.size
; i
++) {
499 mfn
= xen_remap_buf
.mfns
[i
];
500 xen_update_mem_tables(pfn
, mfn
);
504 if (pfn_s
== ~0UL || pfn
== pfn_s
) {
505 pfn_s
= xen_remap_buf
.target_pfn
;
506 len
+= xen_remap_buf
.size
;
507 } else if (pfn_s
+ len
== xen_remap_buf
.target_pfn
) {
508 len
+= xen_remap_buf
.size
;
510 xen_del_extra_mem(PFN_PHYS(pfn_s
), PFN_PHYS(len
));
511 pfn_s
= xen_remap_buf
.target_pfn
;
512 len
= xen_remap_buf
.size
;
516 xen_remap_mfn
= xen_remap_buf
.next_area_mfn
;
519 if (pfn_s
!= ~0UL && len
)
520 xen_del_extra_mem(PFN_PHYS(pfn_s
), PFN_PHYS(len
));
522 set_pte_mfn(buf
, mfn_save
, PAGE_KERNEL
);
524 pr_info("Remapped %ld page(s)\n", remapped
);
527 static unsigned long __init
xen_get_pages_limit(void)
532 limit
= GB(64) / PAGE_SIZE
;
535 if (!xen_initial_domain() && xen_512gb_limit
)
536 limit
= GB(512) / PAGE_SIZE
;
541 static unsigned long __init
xen_get_max_pages(void)
543 unsigned long max_pages
, limit
;
544 domid_t domid
= DOMID_SELF
;
547 limit
= xen_get_pages_limit();
551 * For the initial domain we use the maximum reservation as
554 * For guest domains the current maximum reservation reflects
555 * the current maximum rather than the static maximum. In this
556 * case the e820 map provided to us will cover the static
559 if (xen_initial_domain()) {
560 ret
= HYPERVISOR_memory_op(XENMEM_maximum_reservation
, &domid
);
565 return min(max_pages
, limit
);
568 static void __init
xen_align_and_add_e820_region(phys_addr_t start
,
569 phys_addr_t size
, int type
)
571 phys_addr_t end
= start
+ size
;
573 /* Align RAM regions to page boundaries. */
574 if (type
== E820_RAM
) {
575 start
= PAGE_ALIGN(start
);
576 end
&= ~((phys_addr_t
)PAGE_SIZE
- 1);
579 e820_add_region(start
, end
- start
, type
);
582 static void __init
xen_ignore_unusable(void)
584 struct e820entry
*entry
= xen_e820_map
;
587 for (i
= 0; i
< xen_e820_map_entries
; i
++, entry
++) {
588 if (entry
->type
== E820_UNUSABLE
)
589 entry
->type
= E820_RAM
;
593 static unsigned long __init
xen_count_remap_pages(unsigned long max_pfn
)
595 unsigned long extra
= 0;
596 const struct e820entry
*entry
= xen_e820_map
;
599 for (i
= 0; i
< xen_e820_map_entries
; i
++, entry
++) {
600 unsigned long start_pfn
= PFN_DOWN(entry
->addr
);
601 unsigned long end_pfn
= PFN_UP(entry
->addr
+ entry
->size
);
603 if (start_pfn
>= max_pfn
)
605 if (entry
->type
== E820_RAM
)
607 if (end_pfn
>= max_pfn
)
609 extra
+= end_pfn
- start_pfn
;
615 bool __init
xen_is_e820_reserved(phys_addr_t start
, phys_addr_t size
)
617 struct e820entry
*entry
;
625 entry
= xen_e820_map
;
627 for (mapcnt
= 0; mapcnt
< xen_e820_map_entries
; mapcnt
++) {
628 if (entry
->type
== E820_RAM
&& entry
->addr
<= start
&&
629 (entry
->addr
+ entry
->size
) >= end
)
639 * Find a free area in physical memory not yet reserved and compliant with
641 * Used to relocate pre-allocated areas like initrd or p2m list which are in
642 * conflict with the to be used E820 map.
643 * In case no area is found, return 0. Otherwise return the physical address
644 * of the area which is already reserved for convenience.
646 phys_addr_t __init
xen_find_free_area(phys_addr_t size
)
649 phys_addr_t addr
, start
;
650 struct e820entry
*entry
= xen_e820_map
;
652 for (mapcnt
= 0; mapcnt
< xen_e820_map_entries
; mapcnt
++, entry
++) {
653 if (entry
->type
!= E820_RAM
|| entry
->size
< size
)
656 for (addr
= start
; addr
< start
+ size
; addr
+= PAGE_SIZE
) {
657 if (!memblock_is_reserved(addr
))
659 start
= addr
+ PAGE_SIZE
;
660 if (start
+ size
> entry
->addr
+ entry
->size
)
663 if (addr
>= start
+ size
) {
664 memblock_reserve(start
, size
);
673 * Like memcpy, but with physical addresses for dest and src.
675 static void __init
xen_phys_memcpy(phys_addr_t dest
, phys_addr_t src
,
678 phys_addr_t dest_off
, src_off
, dest_len
, src_len
, len
;
682 dest_off
= dest
& ~PAGE_MASK
;
683 src_off
= src
& ~PAGE_MASK
;
685 if (dest_len
> (NR_FIX_BTMAPS
<< PAGE_SHIFT
) - dest_off
)
686 dest_len
= (NR_FIX_BTMAPS
<< PAGE_SHIFT
) - dest_off
;
688 if (src_len
> (NR_FIX_BTMAPS
<< PAGE_SHIFT
) - src_off
)
689 src_len
= (NR_FIX_BTMAPS
<< PAGE_SHIFT
) - src_off
;
690 len
= min(dest_len
, src_len
);
691 to
= early_memremap(dest
- dest_off
, dest_len
+ dest_off
);
692 from
= early_memremap(src
- src_off
, src_len
+ src_off
);
693 memcpy(to
, from
, len
);
694 early_memunmap(to
, dest_len
+ dest_off
);
695 early_memunmap(from
, src_len
+ src_off
);
703 * Reserve Xen mfn_list.
705 static void __init
xen_reserve_xen_mfnlist(void)
707 phys_addr_t start
, size
;
709 if (xen_start_info
->mfn_list
>= __START_KERNEL_map
) {
710 start
= __pa(xen_start_info
->mfn_list
);
711 size
= PFN_ALIGN(xen_start_info
->nr_pages
*
712 sizeof(unsigned long));
714 start
= PFN_PHYS(xen_start_info
->first_p2m_pfn
);
715 size
= PFN_PHYS(xen_start_info
->nr_p2m_frames
);
718 if (!xen_is_e820_reserved(start
, size
)) {
719 memblock_reserve(start
, size
);
725 * Relocating the p2m on 32 bit system to an arbitrary virtual address
726 * is not supported, so just give up.
728 xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
736 * machine_specific_memory_setup - Hook for machine specific memory setup.
738 char * __init
xen_memory_setup(void)
740 unsigned long max_pfn
;
741 phys_addr_t mem_end
, addr
, size
, chunk_size
;
744 struct xen_memory_map memmap
;
745 unsigned long max_pages
;
746 unsigned long extra_pages
= 0;
751 max_pfn
= xen_get_pages_limit();
752 max_pfn
= min(max_pfn
, xen_start_info
->nr_pages
);
753 mem_end
= PFN_PHYS(max_pfn
);
755 memmap
.nr_entries
= E820MAX
;
756 set_xen_guest_handle(memmap
.buffer
, xen_e820_map
);
758 op
= xen_initial_domain() ?
759 XENMEM_machine_memory_map
:
761 rc
= HYPERVISOR_memory_op(op
, &memmap
);
763 BUG_ON(xen_initial_domain());
764 memmap
.nr_entries
= 1;
765 xen_e820_map
[0].addr
= 0ULL;
766 xen_e820_map
[0].size
= mem_end
;
767 /* 8MB slack (to balance backend allocations). */
768 xen_e820_map
[0].size
+= 8ULL << 20;
769 xen_e820_map
[0].type
= E820_RAM
;
773 BUG_ON(memmap
.nr_entries
== 0);
774 xen_e820_map_entries
= memmap
.nr_entries
;
777 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
778 * regions, so if we're using the machine memory map leave the
779 * region as RAM as it is in the pseudo-physical map.
781 * UNUSABLE regions in domUs are not handled and will need
782 * a patch in the future.
784 if (xen_initial_domain())
785 xen_ignore_unusable();
787 /* Make sure the Xen-supplied memory map is well-ordered. */
788 sanitize_e820_map(xen_e820_map
, xen_e820_map_entries
,
789 &xen_e820_map_entries
);
791 max_pages
= xen_get_max_pages();
793 /* How many extra pages do we need due to remapping? */
794 max_pages
+= xen_count_remap_pages(max_pfn
);
796 if (max_pages
> max_pfn
)
797 extra_pages
+= max_pages
- max_pfn
;
800 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
801 * factor the base size. On non-highmem systems, the base
802 * size is the full initial memory allocation; on highmem it
803 * is limited to the max size of lowmem, so that it doesn't
804 * get completely filled.
806 * Make sure we have no memory above max_pages, as this area
807 * isn't handled by the p2m management.
809 * In principle there could be a problem in lowmem systems if
810 * the initial memory is also very large with respect to
811 * lowmem, but we won't try to deal with that here.
813 extra_pages
= min3(EXTRA_MEM_RATIO
* min(max_pfn
, PFN_DOWN(MAXMEM
)),
814 extra_pages
, max_pages
- max_pfn
);
816 addr
= xen_e820_map
[0].addr
;
817 size
= xen_e820_map
[0].size
;
818 while (i
< xen_e820_map_entries
) {
820 type
= xen_e820_map
[i
].type
;
822 if (type
== E820_RAM
) {
823 if (addr
< mem_end
) {
824 chunk_size
= min(size
, mem_end
- addr
);
825 } else if (extra_pages
) {
826 chunk_size
= min(size
, PFN_PHYS(extra_pages
));
827 extra_pages
-= PFN_DOWN(chunk_size
);
828 xen_add_extra_mem(addr
, chunk_size
);
829 xen_max_p2m_pfn
= PFN_DOWN(addr
+ chunk_size
);
831 type
= E820_UNUSABLE
;
834 xen_align_and_add_e820_region(addr
, chunk_size
, type
);
840 if (i
< xen_e820_map_entries
) {
841 addr
= xen_e820_map
[i
].addr
;
842 size
= xen_e820_map
[i
].size
;
848 * Set the rest as identity mapped, in case PCI BARs are
851 set_phys_range_identity(addr
/ PAGE_SIZE
, ~0ul);
854 * In domU, the ISA region is normal, usable memory, but we
855 * reserve ISA memory anyway because too many things poke
858 e820_add_region(ISA_START_ADDRESS
, ISA_END_ADDRESS
- ISA_START_ADDRESS
,
861 sanitize_e820_map(e820
.map
, ARRAY_SIZE(e820
.map
), &e820
.nr_map
);
864 * Check whether the kernel itself conflicts with the target E820 map.
865 * Failing now is better than running into weird problems later due
866 * to relocating (and even reusing) pages with kernel text or data.
868 if (xen_is_e820_reserved(__pa_symbol(_text
),
869 __pa_symbol(__bss_stop
) - __pa_symbol(_text
))) {
870 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
875 * Check for a conflict of the hypervisor supplied page tables with
876 * the target E820 map.
880 xen_reserve_xen_mfnlist();
882 /* Check for a conflict of the initrd with the target E820 map. */
883 if (xen_is_e820_reserved(boot_params
.hdr
.ramdisk_image
,
884 boot_params
.hdr
.ramdisk_size
)) {
885 phys_addr_t new_area
, start
, size
;
887 new_area
= xen_find_free_area(boot_params
.hdr
.ramdisk_size
);
889 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
893 start
= boot_params
.hdr
.ramdisk_image
;
894 size
= boot_params
.hdr
.ramdisk_size
;
895 xen_phys_memcpy(new_area
, start
, size
);
896 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
897 start
, start
+ size
, new_area
, new_area
+ size
);
898 memblock_free(start
, size
);
899 boot_params
.hdr
.ramdisk_image
= new_area
;
900 boot_params
.ext_ramdisk_image
= new_area
>> 32;
904 * Set identity map on non-RAM pages and prepare remapping the
907 xen_set_identity_and_remap(max_pfn
);
913 * Machine specific memory setup for auto-translated guests.
915 char * __init
xen_auto_xlated_memory_setup(void)
917 struct xen_memory_map memmap
;
921 memmap
.nr_entries
= E820MAX
;
922 set_xen_guest_handle(memmap
.buffer
, xen_e820_map
);
924 rc
= HYPERVISOR_memory_op(XENMEM_memory_map
, &memmap
);
926 panic("No memory map (%d)\n", rc
);
928 xen_e820_map_entries
= memmap
.nr_entries
;
930 sanitize_e820_map(xen_e820_map
, ARRAY_SIZE(xen_e820_map
),
931 &xen_e820_map_entries
);
933 for (i
= 0; i
< xen_e820_map_entries
; i
++)
934 e820_add_region(xen_e820_map
[i
].addr
, xen_e820_map
[i
].size
,
935 xen_e820_map
[i
].type
);
937 /* Remove p2m info, it is not needed. */
938 xen_start_info
->mfn_list
= 0;
939 xen_start_info
->first_p2m_pfn
= 0;
940 xen_start_info
->nr_p2m_frames
= 0;
946 * Set the bit indicating "nosegneg" library variants should be used.
947 * We only need to bother in pure 32-bit mode; compat 32-bit processes
948 * can have un-truncated segments, so wrapping around is allowed.
950 static void __init
fiddle_vdso(void)
954 * This could be called before selected_vdso32 is initialized, so
955 * just fiddle with both possible images. vdso_image_32_syscall
956 * can't be selected, since it only exists on 64-bit systems.
959 mask
= vdso_image_32_int80
.data
+
960 vdso_image_32_int80
.sym_VDSO32_NOTE_MASK
;
961 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
962 mask
= vdso_image_32_sysenter
.data
+
963 vdso_image_32_sysenter
.sym_VDSO32_NOTE_MASK
;
964 *mask
|= 1 << VDSO_NOTE_NONEGSEG_BIT
;
968 static int register_callback(unsigned type
, const void *func
)
970 struct callback_register callback
= {
972 .address
= XEN_CALLBACK(__KERNEL_CS
, func
),
973 .flags
= CALLBACKF_mask_events
,
976 return HYPERVISOR_callback_op(CALLBACKOP_register
, &callback
);
979 void xen_enable_sysenter(void)
982 unsigned sysenter_feature
;
985 sysenter_feature
= X86_FEATURE_SEP
;
987 sysenter_feature
= X86_FEATURE_SYSENTER32
;
990 if (!boot_cpu_has(sysenter_feature
))
993 ret
= register_callback(CALLBACKTYPE_sysenter
, xen_sysenter_target
);
995 setup_clear_cpu_cap(sysenter_feature
);
998 void xen_enable_syscall(void)
1000 #ifdef CONFIG_X86_64
1003 ret
= register_callback(CALLBACKTYPE_syscall
, xen_syscall_target
);
1005 printk(KERN_ERR
"Failed to set syscall callback: %d\n", ret
);
1006 /* Pretty fatal; 64-bit userspace has no other
1007 mechanism for syscalls. */
1010 if (boot_cpu_has(X86_FEATURE_SYSCALL32
)) {
1011 ret
= register_callback(CALLBACKTYPE_syscall32
,
1012 xen_syscall32_target
);
1014 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32
);
1016 #endif /* CONFIG_X86_64 */
1019 void __init
xen_pvmmu_arch_setup(void)
1021 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_4gb_segments
);
1022 HYPERVISOR_vm_assist(VMASST_CMD_enable
, VMASST_TYPE_writable_pagetables
);
1024 HYPERVISOR_vm_assist(VMASST_CMD_enable
,
1025 VMASST_TYPE_pae_extended_cr3
);
1027 if (register_callback(CALLBACKTYPE_event
, xen_hypervisor_callback
) ||
1028 register_callback(CALLBACKTYPE_failsafe
, xen_failsafe_callback
))
1031 xen_enable_sysenter();
1032 xen_enable_syscall();
1035 /* This function is not called for HVM domains */
1036 void __init
xen_arch_setup(void)
1038 xen_panic_handler_init();
1039 if (!xen_feature(XENFEAT_auto_translated_physmap
))
1040 xen_pvmmu_arch_setup();
1043 if (!(xen_start_info
->flags
& SIF_INITDOMAIN
)) {
1044 printk(KERN_INFO
"ACPI in unprivileged domain disabled\n");
1049 memcpy(boot_command_line
, xen_start_info
->cmd_line
,
1050 MAX_GUEST_CMDLINE
> COMMAND_LINE_SIZE
?
1051 COMMAND_LINE_SIZE
: MAX_GUEST_CMDLINE
);
1053 /* Set up idle, making sure it calls safe_halt() pvop */
1056 WARN_ON(xen_set_default_idle());