Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / arch / powerpc / mm / pgtable-radix.c
1 /*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11 #include <linux/sched.h>
12 #include <linux/memblock.h>
13 #include <linux/of_fdt.h>
14
15 #include <asm/pgtable.h>
16 #include <asm/pgalloc.h>
17 #include <asm/dma.h>
18 #include <asm/machdep.h>
19 #include <asm/mmu.h>
20 #include <asm/firmware.h>
21
22 #include <trace/events/thp.h>
23
24 static int native_register_process_table(unsigned long base, unsigned long pg_sz,
25 unsigned long table_size)
26 {
27 unsigned long patb1 = base | table_size | PATB_GR;
28
29 partition_tb->patb1 = cpu_to_be64(patb1);
30 return 0;
31 }
32
33 static __ref void *early_alloc_pgtable(unsigned long size)
34 {
35 void *pt;
36
37 pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
38 memset(pt, 0, size);
39
40 return pt;
41 }
42
43 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
44 pgprot_t flags,
45 unsigned int map_page_size)
46 {
47 pgd_t *pgdp;
48 pud_t *pudp;
49 pmd_t *pmdp;
50 pte_t *ptep;
51 /*
52 * Make sure task size is correct as per the max adddr
53 */
54 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
55 if (slab_is_available()) {
56 pgdp = pgd_offset_k(ea);
57 pudp = pud_alloc(&init_mm, pgdp, ea);
58 if (!pudp)
59 return -ENOMEM;
60 if (map_page_size == PUD_SIZE) {
61 ptep = (pte_t *)pudp;
62 goto set_the_pte;
63 }
64 pmdp = pmd_alloc(&init_mm, pudp, ea);
65 if (!pmdp)
66 return -ENOMEM;
67 if (map_page_size == PMD_SIZE) {
68 ptep = (pte_t *)pudp;
69 goto set_the_pte;
70 }
71 ptep = pte_alloc_kernel(pmdp, ea);
72 if (!ptep)
73 return -ENOMEM;
74 } else {
75 pgdp = pgd_offset_k(ea);
76 if (pgd_none(*pgdp)) {
77 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
78 BUG_ON(pudp == NULL);
79 pgd_populate(&init_mm, pgdp, pudp);
80 }
81 pudp = pud_offset(pgdp, ea);
82 if (map_page_size == PUD_SIZE) {
83 ptep = (pte_t *)pudp;
84 goto set_the_pte;
85 }
86 if (pud_none(*pudp)) {
87 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
88 BUG_ON(pmdp == NULL);
89 pud_populate(&init_mm, pudp, pmdp);
90 }
91 pmdp = pmd_offset(pudp, ea);
92 if (map_page_size == PMD_SIZE) {
93 ptep = (pte_t *)pudp;
94 goto set_the_pte;
95 }
96 if (!pmd_present(*pmdp)) {
97 ptep = early_alloc_pgtable(PAGE_SIZE);
98 BUG_ON(ptep == NULL);
99 pmd_populate_kernel(&init_mm, pmdp, ptep);
100 }
101 ptep = pte_offset_kernel(pmdp, ea);
102 }
103
104 set_the_pte:
105 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
106 smp_wmb();
107 return 0;
108 }
109
110 static void __init radix_init_pgtable(void)
111 {
112 int loop_count;
113 u64 base, end, start_addr;
114 unsigned long rts_field;
115 struct memblock_region *reg;
116 unsigned long linear_page_size;
117
118 /* We don't support slb for radix */
119 mmu_slb_size = 0;
120 /*
121 * Create the linear mapping, using standard page size for now
122 */
123 loop_count = 0;
124 for_each_memblock(memory, reg) {
125
126 start_addr = reg->base;
127
128 redo:
129 if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift)
130 linear_page_size = PUD_SIZE;
131 else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift)
132 linear_page_size = PMD_SIZE;
133 else
134 linear_page_size = PAGE_SIZE;
135
136 base = _ALIGN_UP(start_addr, linear_page_size);
137 end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size);
138
139 pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n",
140 (unsigned long)base, (unsigned long)end,
141 linear_page_size);
142
143 while (base < end) {
144 radix__map_kernel_page((unsigned long)__va(base),
145 base, PAGE_KERNEL_X,
146 linear_page_size);
147 base += linear_page_size;
148 }
149 /*
150 * map the rest using lower page size
151 */
152 if (end < reg->base + reg->size) {
153 start_addr = end;
154 loop_count++;
155 goto redo;
156 }
157 }
158 /*
159 * Allocate Partition table and process table for the
160 * host.
161 */
162 BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 23), "Process table size too large.");
163 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
164 /*
165 * Fill in the process table.
166 */
167 rts_field = radix__get_tree_size();
168 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
169 /*
170 * Fill in the partition table. We are suppose to use effective address
171 * of process table here. But our linear mapping also enable us to use
172 * physical address here.
173 */
174 ppc_md.register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
175 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
176 }
177
178 static void __init radix_init_partition_table(void)
179 {
180 unsigned long rts_field;
181
182 rts_field = radix__get_tree_size();
183
184 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
185 partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
186 partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) |
187 RADIX_PGD_INDEX_SIZE | PATB_HR);
188 pr_info("Initializing Radix MMU\n");
189 pr_info("Partition table %p\n", partition_tb);
190
191 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
192 /*
193 * update partition table control register,
194 * 64 K size.
195 */
196 mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
197 }
198
199 void __init radix_init_native(void)
200 {
201 ppc_md.register_process_table = native_register_process_table;
202 }
203
204 static int __init get_idx_from_shift(unsigned int shift)
205 {
206 int idx = -1;
207
208 switch (shift) {
209 case 0xc:
210 idx = MMU_PAGE_4K;
211 break;
212 case 0x10:
213 idx = MMU_PAGE_64K;
214 break;
215 case 0x15:
216 idx = MMU_PAGE_2M;
217 break;
218 case 0x1e:
219 idx = MMU_PAGE_1G;
220 break;
221 }
222 return idx;
223 }
224
225 static int __init radix_dt_scan_page_sizes(unsigned long node,
226 const char *uname, int depth,
227 void *data)
228 {
229 int size = 0;
230 int shift, idx;
231 unsigned int ap;
232 const __be32 *prop;
233 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
234
235 /* We are scanning "cpu" nodes only */
236 if (type == NULL || strcmp(type, "cpu") != 0)
237 return 0;
238
239 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
240 if (!prop)
241 return 0;
242
243 pr_info("Page sizes from device-tree:\n");
244 for (; size >= 4; size -= 4, ++prop) {
245
246 struct mmu_psize_def *def;
247
248 /* top 3 bit is AP encoding */
249 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
250 ap = be32_to_cpu(prop[0]) >> 29;
251 pr_info("Page size sift = %d AP=0x%x\n", shift, ap);
252
253 idx = get_idx_from_shift(shift);
254 if (idx < 0)
255 continue;
256
257 def = &mmu_psize_defs[idx];
258 def->shift = shift;
259 def->ap = ap;
260 }
261
262 /* needed ? */
263 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
264 return 1;
265 }
266
267 static void __init radix_init_page_sizes(void)
268 {
269 int rc;
270
271 /*
272 * Try to find the available page sizes in the device-tree
273 */
274 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
275 if (rc != 0) /* Found */
276 goto found;
277 /*
278 * let's assume we have page 4k and 64k support
279 */
280 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
281 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
282
283 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
284 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
285 found:
286 #ifdef CONFIG_SPARSEMEM_VMEMMAP
287 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
288 /*
289 * map vmemmap using 2M if available
290 */
291 mmu_vmemmap_psize = MMU_PAGE_2M;
292 }
293 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
294 return;
295 }
296
297 void __init radix__early_init_mmu(void)
298 {
299 unsigned long lpcr;
300
301 #ifdef CONFIG_PPC_64K_PAGES
302 /* PAGE_SIZE mappings */
303 mmu_virtual_psize = MMU_PAGE_64K;
304 #else
305 mmu_virtual_psize = MMU_PAGE_4K;
306 #endif
307
308 #ifdef CONFIG_SPARSEMEM_VMEMMAP
309 /* vmemmap mapping */
310 mmu_vmemmap_psize = mmu_virtual_psize;
311 #endif
312 /*
313 * initialize page table size
314 */
315 __pte_index_size = RADIX_PTE_INDEX_SIZE;
316 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
317 __pud_index_size = RADIX_PUD_INDEX_SIZE;
318 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
319 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
320 __pte_table_size = RADIX_PTE_TABLE_SIZE;
321 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
322 __pud_table_size = RADIX_PUD_TABLE_SIZE;
323 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
324
325 __pmd_val_bits = RADIX_PMD_VAL_BITS;
326 __pud_val_bits = RADIX_PUD_VAL_BITS;
327 __pgd_val_bits = RADIX_PGD_VAL_BITS;
328
329 __kernel_virt_start = RADIX_KERN_VIRT_START;
330 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
331 __vmalloc_start = RADIX_VMALLOC_START;
332 __vmalloc_end = RADIX_VMALLOC_END;
333 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
334 ioremap_bot = IOREMAP_BASE;
335
336 #ifdef CONFIG_PCI
337 pci_io_base = ISA_IO_BASE;
338 #endif
339
340 /*
341 * For now radix also use the same frag size
342 */
343 __pte_frag_nr = H_PTE_FRAG_NR;
344 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
345
346 radix_init_page_sizes();
347 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
348 radix_init_native();
349 lpcr = mfspr(SPRN_LPCR);
350 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
351 radix_init_partition_table();
352 }
353
354 radix_init_pgtable();
355 }
356
357 void radix__early_init_mmu_secondary(void)
358 {
359 unsigned long lpcr;
360 /*
361 * update partition table control register and UPRT
362 */
363 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
364 lpcr = mfspr(SPRN_LPCR);
365 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
366
367 mtspr(SPRN_PTCR,
368 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
369 }
370 }
371
372 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
373 phys_addr_t first_memblock_size)
374 {
375 /* We don't currently support the first MEMBLOCK not mapping 0
376 * physical on those processors
377 */
378 BUG_ON(first_memblock_base != 0);
379 /*
380 * We limit the allocation that depend on ppc64_rma_size
381 * to first_memblock_size. We also clamp it to 1GB to
382 * avoid some funky things such as RTAS bugs.
383 *
384 * On radix config we really don't have a limitation
385 * on real mode access. But keeping it as above works
386 * well enough.
387 */
388 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
389 /*
390 * Finally limit subsequent allocations. We really don't want
391 * to limit the memblock allocations to rma_size. FIXME!! should
392 * we even limit at all ?
393 */
394 memblock_set_current_limit(first_memblock_base + first_memblock_size);
395 }
396
397 #ifdef CONFIG_SPARSEMEM_VMEMMAP
398 int __meminit radix__vmemmap_create_mapping(unsigned long start,
399 unsigned long page_size,
400 unsigned long phys)
401 {
402 /* Create a PTE encoding */
403 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
404
405 BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
406 return 0;
407 }
408
409 #ifdef CONFIG_MEMORY_HOTPLUG
410 void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
411 {
412 /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
413 }
414 #endif
415 #endif
416
417 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
418
419 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
420 pmd_t *pmdp, unsigned long clr,
421 unsigned long set)
422 {
423 unsigned long old;
424
425 #ifdef CONFIG_DEBUG_VM
426 WARN_ON(!radix__pmd_trans_huge(*pmdp));
427 assert_spin_locked(&mm->page_table_lock);
428 #endif
429
430 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
431 trace_hugepage_update(addr, old, clr, set);
432
433 return old;
434 }
435
436 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
437 pmd_t *pmdp)
438
439 {
440 pmd_t pmd;
441
442 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
443 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
444 /*
445 * khugepaged calls this for normal pmd
446 */
447 pmd = *pmdp;
448 pmd_clear(pmdp);
449 /*FIXME!! Verify whether we need this kick below */
450 kick_all_cpus_sync();
451 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
452 return pmd;
453 }
454
455 /*
456 * For us pgtable_t is pte_t *. Inorder to save the deposisted
457 * page table, we consider the allocated page table as a list
458 * head. On withdraw we need to make sure we zero out the used
459 * list_head memory area.
460 */
461 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
462 pgtable_t pgtable)
463 {
464 struct list_head *lh = (struct list_head *) pgtable;
465
466 assert_spin_locked(pmd_lockptr(mm, pmdp));
467
468 /* FIFO */
469 if (!pmd_huge_pte(mm, pmdp))
470 INIT_LIST_HEAD(lh);
471 else
472 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
473 pmd_huge_pte(mm, pmdp) = pgtable;
474 }
475
476 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
477 {
478 pte_t *ptep;
479 pgtable_t pgtable;
480 struct list_head *lh;
481
482 assert_spin_locked(pmd_lockptr(mm, pmdp));
483
484 /* FIFO */
485 pgtable = pmd_huge_pte(mm, pmdp);
486 lh = (struct list_head *) pgtable;
487 if (list_empty(lh))
488 pmd_huge_pte(mm, pmdp) = NULL;
489 else {
490 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
491 list_del(lh);
492 }
493 ptep = (pte_t *) pgtable;
494 *ptep = __pte(0);
495 ptep++;
496 *ptep = __pte(0);
497 return pgtable;
498 }
499
500
501 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
502 unsigned long addr, pmd_t *pmdp)
503 {
504 pmd_t old_pmd;
505 unsigned long old;
506
507 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
508 old_pmd = __pmd(old);
509 /*
510 * Serialize against find_linux_pte_or_hugepte which does lock-less
511 * lookup in page tables with local interrupts disabled. For huge pages
512 * it casts pmd_t to pte_t. Since format of pte_t is different from
513 * pmd_t we want to prevent transit from pmd pointing to page table
514 * to pmd pointing to huge page (and back) while interrupts are disabled.
515 * We clear pmd to possibly replace it with page table pointer in
516 * different code paths. So make sure we wait for the parallel
517 * find_linux_pte_or_hugepage to finish.
518 */
519 kick_all_cpus_sync();
520 return old_pmd;
521 }
522
523 int radix__has_transparent_hugepage(void)
524 {
525 /* For radix 2M at PMD level means thp */
526 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
527 return 1;
528 return 0;
529 }
530 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
This page took 0.040634 seconds and 5 git commands to generate.