| 1 | /* |
| 2 | * S390 version |
| 3 | * Copyright IBM Corp. 1999 |
| 4 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 5 | * |
| 6 | * Derived from "arch/i386/mm/init.c" |
| 7 | * Copyright (C) 1995 Linus Torvalds |
| 8 | */ |
| 9 | |
| 10 | #include <linux/signal.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/types.h> |
| 16 | #include <linux/ptrace.h> |
| 17 | #include <linux/mman.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/swap.h> |
| 20 | #include <linux/smp.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/pagemap.h> |
| 23 | #include <linux/bootmem.h> |
| 24 | #include <linux/memory.h> |
| 25 | #include <linux/pfn.h> |
| 26 | #include <linux/poison.h> |
| 27 | #include <linux/initrd.h> |
| 28 | #include <linux/export.h> |
| 29 | #include <linux/gfp.h> |
| 30 | #include <linux/memblock.h> |
| 31 | #include <asm/processor.h> |
| 32 | #include <asm/uaccess.h> |
| 33 | #include <asm/pgtable.h> |
| 34 | #include <asm/pgalloc.h> |
| 35 | #include <asm/dma.h> |
| 36 | #include <asm/lowcore.h> |
| 37 | #include <asm/tlb.h> |
| 38 | #include <asm/tlbflush.h> |
| 39 | #include <asm/sections.h> |
| 40 | #include <asm/ctl_reg.h> |
| 41 | #include <asm/sclp.h> |
| 42 | |
| 43 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); |
| 44 | |
| 45 | unsigned long empty_zero_page, zero_page_mask; |
| 46 | EXPORT_SYMBOL(empty_zero_page); |
| 47 | EXPORT_SYMBOL(zero_page_mask); |
| 48 | |
| 49 | static void __init setup_zero_pages(void) |
| 50 | { |
| 51 | unsigned int order; |
| 52 | struct page *page; |
| 53 | int i; |
| 54 | |
| 55 | /* Latest machines require a mapping granularity of 512KB */ |
| 56 | order = 7; |
| 57 | |
| 58 | /* Limit number of empty zero pages for small memory sizes */ |
| 59 | while (order > 2 && (totalram_pages >> 10) < (1UL << order)) |
| 60 | order--; |
| 61 | |
| 62 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
| 63 | if (!empty_zero_page) |
| 64 | panic("Out of memory in setup_zero_pages"); |
| 65 | |
| 66 | page = virt_to_page((void *) empty_zero_page); |
| 67 | split_page(page, order); |
| 68 | for (i = 1 << order; i > 0; i--) { |
| 69 | mark_page_reserved(page); |
| 70 | page++; |
| 71 | } |
| 72 | |
| 73 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; |
| 74 | } |
| 75 | |
| 76 | /* |
| 77 | * paging_init() sets up the page tables |
| 78 | */ |
| 79 | void __init paging_init(void) |
| 80 | { |
| 81 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
| 82 | unsigned long pgd_type, asce_bits; |
| 83 | |
| 84 | init_mm.pgd = swapper_pg_dir; |
| 85 | if (VMALLOC_END > (1UL << 42)) { |
| 86 | asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; |
| 87 | pgd_type = _REGION2_ENTRY_EMPTY; |
| 88 | } else { |
| 89 | asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; |
| 90 | pgd_type = _REGION3_ENTRY_EMPTY; |
| 91 | } |
| 92 | S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; |
| 93 | clear_table((unsigned long *) init_mm.pgd, pgd_type, |
| 94 | sizeof(unsigned long)*2048); |
| 95 | vmem_map_init(); |
| 96 | |
| 97 | /* enable virtual mapping in kernel mode */ |
| 98 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
| 99 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
| 100 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
| 101 | __arch_local_irq_stosm(0x04); |
| 102 | |
| 103 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
| 104 | sparse_init(); |
| 105 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 106 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
| 107 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
| 108 | free_area_init_nodes(max_zone_pfns); |
| 109 | } |
| 110 | |
| 111 | void mark_rodata_ro(void) |
| 112 | { |
| 113 | /* Text and rodata are already protected. Nothing to do here. */ |
| 114 | pr_info("Write protecting the kernel read-only data: %luk\n", |
| 115 | ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10); |
| 116 | } |
| 117 | |
| 118 | void __init mem_init(void) |
| 119 | { |
| 120 | if (MACHINE_HAS_TLB_LC) |
| 121 | cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); |
| 122 | cpumask_set_cpu(0, mm_cpumask(&init_mm)); |
| 123 | atomic_set(&init_mm.context.attach_count, 1); |
| 124 | |
| 125 | set_max_mapnr(max_low_pfn); |
| 126 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
| 127 | |
| 128 | /* Setup guest page hinting */ |
| 129 | cmma_init(); |
| 130 | |
| 131 | /* this will put all low memory onto the freelists */ |
| 132 | free_all_bootmem(); |
| 133 | setup_zero_pages(); /* Setup zeroed pages. */ |
| 134 | |
| 135 | mem_init_print_info(NULL); |
| 136 | } |
| 137 | |
| 138 | void free_initmem(void) |
| 139 | { |
| 140 | free_initmem_default(POISON_FREE_INITMEM); |
| 141 | } |
| 142 | |
| 143 | #ifdef CONFIG_BLK_DEV_INITRD |
| 144 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
| 145 | { |
| 146 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
| 147 | "initrd"); |
| 148 | } |
| 149 | #endif |
| 150 | |
| 151 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 152 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
| 153 | { |
| 154 | unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); |
| 155 | unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS); |
| 156 | unsigned long start_pfn = PFN_DOWN(start); |
| 157 | unsigned long size_pages = PFN_DOWN(size); |
| 158 | unsigned long nr_pages; |
| 159 | int rc, zone_enum; |
| 160 | |
| 161 | rc = vmem_add_mapping(start, size); |
| 162 | if (rc) |
| 163 | return rc; |
| 164 | |
| 165 | while (size_pages > 0) { |
| 166 | if (start_pfn < dma_end_pfn) { |
| 167 | nr_pages = (start_pfn + size_pages > dma_end_pfn) ? |
| 168 | dma_end_pfn - start_pfn : size_pages; |
| 169 | zone_enum = ZONE_DMA; |
| 170 | } else if (start_pfn < normal_end_pfn) { |
| 171 | nr_pages = (start_pfn + size_pages > normal_end_pfn) ? |
| 172 | normal_end_pfn - start_pfn : size_pages; |
| 173 | zone_enum = ZONE_NORMAL; |
| 174 | } else { |
| 175 | nr_pages = size_pages; |
| 176 | zone_enum = ZONE_MOVABLE; |
| 177 | } |
| 178 | rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, |
| 179 | start_pfn, size_pages); |
| 180 | if (rc) |
| 181 | break; |
| 182 | start_pfn += nr_pages; |
| 183 | size_pages -= nr_pages; |
| 184 | } |
| 185 | if (rc) |
| 186 | vmem_remove_mapping(start, size); |
| 187 | return rc; |
| 188 | } |
| 189 | |
| 190 | unsigned long memory_block_size_bytes(void) |
| 191 | { |
| 192 | /* |
| 193 | * Make sure the memory block size is always greater |
| 194 | * or equal than the memory increment size. |
| 195 | */ |
| 196 | return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm); |
| 197 | } |
| 198 | |
| 199 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 200 | int arch_remove_memory(u64 start, u64 size) |
| 201 | { |
| 202 | /* |
| 203 | * There is no hardware or firmware interface which could trigger a |
| 204 | * hot memory remove on s390. So there is nothing that needs to be |
| 205 | * implemented. |
| 206 | */ |
| 207 | return -EBUSY; |
| 208 | } |
| 209 | #endif |
| 210 | #endif /* CONFIG_MEMORY_HOTPLUG */ |