Merge tag 'stable/for-linus-3.14-rc2-tag' of git://git.kernel.org/pub/scm/linux/kerne...
[deliverable/linux.git] / arch / powerpc / mm / slice.c
1 /*
2 * address space "slices" (meta-segments) support
3 *
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5 *
6 * Based on hugetlb implementation
7 *
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25 #undef DEBUG
26
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
33 #include <asm/mman.h>
34 #include <asm/mmu.h>
35 #include <asm/spu.h>
36
37 /* some sanity checks */
38 #if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
39 #error PGTABLE_RANGE exceeds slice_mask high_slices size
40 #endif
41
42 static DEFINE_SPINLOCK(slice_convert_lock);
43
44
45 #ifdef DEBUG
46 int _slice_debug = 1;
47
48 static void slice_print_mask(const char *label, struct slice_mask mask)
49 {
50 char *p, buf[16 + 3 + 64 + 1];
51 int i;
52
53 if (!_slice_debug)
54 return;
55 p = buf;
56 for (i = 0; i < SLICE_NUM_LOW; i++)
57 *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
58 *(p++) = ' ';
59 *(p++) = '-';
60 *(p++) = ' ';
61 for (i = 0; i < SLICE_NUM_HIGH; i++)
62 *(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
63 *(p++) = 0;
64
65 printk(KERN_DEBUG "%s:%s\n", label, buf);
66 }
67
68 #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
69
70 #else
71
72 static void slice_print_mask(const char *label, struct slice_mask mask) {}
73 #define slice_dbg(fmt...)
74
75 #endif
76
77 static struct slice_mask slice_range_to_mask(unsigned long start,
78 unsigned long len)
79 {
80 unsigned long end = start + len - 1;
81 struct slice_mask ret = { 0, 0 };
82
83 if (start < SLICE_LOW_TOP) {
84 unsigned long mend = min(end, SLICE_LOW_TOP);
85 unsigned long mstart = min(start, SLICE_LOW_TOP);
86
87 ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
88 - (1u << GET_LOW_SLICE_INDEX(mstart));
89 }
90
91 if ((start + len) > SLICE_LOW_TOP)
92 ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
93 - (1ul << GET_HIGH_SLICE_INDEX(start));
94
95 return ret;
96 }
97
98 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
99 unsigned long len)
100 {
101 struct vm_area_struct *vma;
102
103 if ((mm->task_size - len) < addr)
104 return 0;
105 vma = find_vma(mm, addr);
106 return (!vma || (addr + len) <= vma->vm_start);
107 }
108
109 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
110 {
111 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
112 1ul << SLICE_LOW_SHIFT);
113 }
114
115 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
116 {
117 unsigned long start = slice << SLICE_HIGH_SHIFT;
118 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
119
120 /* Hack, so that each addresses is controlled by exactly one
121 * of the high or low area bitmaps, the first high area starts
122 * at 4GB, not 0 */
123 if (start == 0)
124 start = SLICE_LOW_TOP;
125
126 return !slice_area_is_free(mm, start, end - start);
127 }
128
129 static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
130 {
131 struct slice_mask ret = { 0, 0 };
132 unsigned long i;
133
134 for (i = 0; i < SLICE_NUM_LOW; i++)
135 if (!slice_low_has_vma(mm, i))
136 ret.low_slices |= 1u << i;
137
138 if (mm->task_size <= SLICE_LOW_TOP)
139 return ret;
140
141 for (i = 0; i < SLICE_NUM_HIGH; i++)
142 if (!slice_high_has_vma(mm, i))
143 ret.high_slices |= 1ul << i;
144
145 return ret;
146 }
147
148 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
149 {
150 unsigned char *hpsizes;
151 int index, mask_index;
152 struct slice_mask ret = { 0, 0 };
153 unsigned long i;
154 u64 lpsizes;
155
156 lpsizes = mm->context.low_slices_psize;
157 for (i = 0; i < SLICE_NUM_LOW; i++)
158 if (((lpsizes >> (i * 4)) & 0xf) == psize)
159 ret.low_slices |= 1u << i;
160
161 hpsizes = mm->context.high_slices_psize;
162 for (i = 0; i < SLICE_NUM_HIGH; i++) {
163 mask_index = i & 0x1;
164 index = i >> 1;
165 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
166 ret.high_slices |= 1ul << i;
167 }
168
169 return ret;
170 }
171
172 static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
173 {
174 return (mask.low_slices & available.low_slices) == mask.low_slices &&
175 (mask.high_slices & available.high_slices) == mask.high_slices;
176 }
177
178 static void slice_flush_segments(void *parm)
179 {
180 struct mm_struct *mm = parm;
181 unsigned long flags;
182
183 if (mm != current->active_mm)
184 return;
185
186 /* update the paca copy of the context struct */
187 get_paca()->context = current->active_mm->context;
188
189 local_irq_save(flags);
190 slb_flush_and_rebolt();
191 local_irq_restore(flags);
192 }
193
194 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
195 {
196 int index, mask_index;
197 /* Write the new slice psize bits */
198 unsigned char *hpsizes;
199 u64 lpsizes;
200 unsigned long i, flags;
201
202 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
203 slice_print_mask(" mask", mask);
204
205 /* We need to use a spinlock here to protect against
206 * concurrent 64k -> 4k demotion ...
207 */
208 spin_lock_irqsave(&slice_convert_lock, flags);
209
210 lpsizes = mm->context.low_slices_psize;
211 for (i = 0; i < SLICE_NUM_LOW; i++)
212 if (mask.low_slices & (1u << i))
213 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
214 (((unsigned long)psize) << (i * 4));
215
216 /* Assign the value back */
217 mm->context.low_slices_psize = lpsizes;
218
219 hpsizes = mm->context.high_slices_psize;
220 for (i = 0; i < SLICE_NUM_HIGH; i++) {
221 mask_index = i & 0x1;
222 index = i >> 1;
223 if (mask.high_slices & (1ul << i))
224 hpsizes[index] = (hpsizes[index] &
225 ~(0xf << (mask_index * 4))) |
226 (((unsigned long)psize) << (mask_index * 4));
227 }
228
229 slice_dbg(" lsps=%lx, hsps=%lx\n",
230 mm->context.low_slices_psize,
231 mm->context.high_slices_psize);
232
233 spin_unlock_irqrestore(&slice_convert_lock, flags);
234
235 #ifdef CONFIG_SPU_BASE
236 spu_flush_all_slbs(mm);
237 #endif
238 }
239
240 /*
241 * Compute which slice addr is part of;
242 * set *boundary_addr to the start or end boundary of that slice
243 * (depending on 'end' parameter);
244 * return boolean indicating if the slice is marked as available in the
245 * 'available' slice_mark.
246 */
247 static bool slice_scan_available(unsigned long addr,
248 struct slice_mask available,
249 int end,
250 unsigned long *boundary_addr)
251 {
252 unsigned long slice;
253 if (addr < SLICE_LOW_TOP) {
254 slice = GET_LOW_SLICE_INDEX(addr);
255 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
256 return !!(available.low_slices & (1u << slice));
257 } else {
258 slice = GET_HIGH_SLICE_INDEX(addr);
259 *boundary_addr = (slice + end) ?
260 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
261 return !!(available.high_slices & (1ul << slice));
262 }
263 }
264
265 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
266 unsigned long len,
267 struct slice_mask available,
268 int psize)
269 {
270 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
271 unsigned long addr, found, next_end;
272 struct vm_unmapped_area_info info;
273
274 info.flags = 0;
275 info.length = len;
276 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
277 info.align_offset = 0;
278
279 addr = TASK_UNMAPPED_BASE;
280 while (addr < TASK_SIZE) {
281 info.low_limit = addr;
282 if (!slice_scan_available(addr, available, 1, &addr))
283 continue;
284
285 next_slice:
286 /*
287 * At this point [info.low_limit; addr) covers
288 * available slices only and ends at a slice boundary.
289 * Check if we need to reduce the range, or if we can
290 * extend it to cover the next available slice.
291 */
292 if (addr >= TASK_SIZE)
293 addr = TASK_SIZE;
294 else if (slice_scan_available(addr, available, 1, &next_end)) {
295 addr = next_end;
296 goto next_slice;
297 }
298 info.high_limit = addr;
299
300 found = vm_unmapped_area(&info);
301 if (!(found & ~PAGE_MASK))
302 return found;
303 }
304
305 return -ENOMEM;
306 }
307
308 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
309 unsigned long len,
310 struct slice_mask available,
311 int psize)
312 {
313 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
314 unsigned long addr, found, prev;
315 struct vm_unmapped_area_info info;
316
317 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
318 info.length = len;
319 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
320 info.align_offset = 0;
321
322 addr = mm->mmap_base;
323 while (addr > PAGE_SIZE) {
324 info.high_limit = addr;
325 if (!slice_scan_available(addr - 1, available, 0, &addr))
326 continue;
327
328 prev_slice:
329 /*
330 * At this point [addr; info.high_limit) covers
331 * available slices only and starts at a slice boundary.
332 * Check if we need to reduce the range, or if we can
333 * extend it to cover the previous available slice.
334 */
335 if (addr < PAGE_SIZE)
336 addr = PAGE_SIZE;
337 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
338 addr = prev;
339 goto prev_slice;
340 }
341 info.low_limit = addr;
342
343 found = vm_unmapped_area(&info);
344 if (!(found & ~PAGE_MASK))
345 return found;
346 }
347
348 /*
349 * A failed mmap() very likely causes application failure,
350 * so fall back to the bottom-up function here. This scenario
351 * can happen with large stack limits and large mmap()
352 * allocations.
353 */
354 return slice_find_area_bottomup(mm, len, available, psize);
355 }
356
357
358 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
359 struct slice_mask mask, int psize,
360 int topdown)
361 {
362 if (topdown)
363 return slice_find_area_topdown(mm, len, mask, psize);
364 else
365 return slice_find_area_bottomup(mm, len, mask, psize);
366 }
367
368 #define or_mask(dst, src) do { \
369 (dst).low_slices |= (src).low_slices; \
370 (dst).high_slices |= (src).high_slices; \
371 } while (0)
372
373 #define andnot_mask(dst, src) do { \
374 (dst).low_slices &= ~(src).low_slices; \
375 (dst).high_slices &= ~(src).high_slices; \
376 } while (0)
377
378 #ifdef CONFIG_PPC_64K_PAGES
379 #define MMU_PAGE_BASE MMU_PAGE_64K
380 #else
381 #define MMU_PAGE_BASE MMU_PAGE_4K
382 #endif
383
384 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
385 unsigned long flags, unsigned int psize,
386 int topdown)
387 {
388 struct slice_mask mask = {0, 0};
389 struct slice_mask good_mask;
390 struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
391 struct slice_mask compat_mask = {0, 0};
392 int fixed = (flags & MAP_FIXED);
393 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
394 struct mm_struct *mm = current->mm;
395 unsigned long newaddr;
396
397 /* Sanity checks */
398 BUG_ON(mm->task_size == 0);
399
400 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
401 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
402 addr, len, flags, topdown);
403
404 if (len > mm->task_size)
405 return -ENOMEM;
406 if (len & ((1ul << pshift) - 1))
407 return -EINVAL;
408 if (fixed && (addr & ((1ul << pshift) - 1)))
409 return -EINVAL;
410 if (fixed && addr > (mm->task_size - len))
411 return -ENOMEM;
412
413 /* If hint, make sure it matches our alignment restrictions */
414 if (!fixed && addr) {
415 addr = _ALIGN_UP(addr, 1ul << pshift);
416 slice_dbg(" aligned addr=%lx\n", addr);
417 /* Ignore hint if it's too large or overlaps a VMA */
418 if (addr > mm->task_size - len ||
419 !slice_area_is_free(mm, addr, len))
420 addr = 0;
421 }
422
423 /* First make up a "good" mask of slices that have the right size
424 * already
425 */
426 good_mask = slice_mask_for_size(mm, psize);
427 slice_print_mask(" good_mask", good_mask);
428
429 /*
430 * Here "good" means slices that are already the right page size,
431 * "compat" means slices that have a compatible page size (i.e.
432 * 4k in a 64k pagesize kernel), and "free" means slices without
433 * any VMAs.
434 *
435 * If MAP_FIXED:
436 * check if fits in good | compat => OK
437 * check if fits in good | compat | free => convert free
438 * else bad
439 * If have hint:
440 * check if hint fits in good => OK
441 * check if hint fits in good | free => convert free
442 * Otherwise:
443 * search in good, found => OK
444 * search in good | free, found => convert free
445 * search in good | compat | free, found => convert free.
446 */
447
448 #ifdef CONFIG_PPC_64K_PAGES
449 /* If we support combo pages, we can allow 64k pages in 4k slices */
450 if (psize == MMU_PAGE_64K) {
451 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
452 if (fixed)
453 or_mask(good_mask, compat_mask);
454 }
455 #endif
456
457 /* First check hint if it's valid or if we have MAP_FIXED */
458 if (addr != 0 || fixed) {
459 /* Build a mask for the requested range */
460 mask = slice_range_to_mask(addr, len);
461 slice_print_mask(" mask", mask);
462
463 /* Check if we fit in the good mask. If we do, we just return,
464 * nothing else to do
465 */
466 if (slice_check_fit(mask, good_mask)) {
467 slice_dbg(" fits good !\n");
468 return addr;
469 }
470 } else {
471 /* Now let's see if we can find something in the existing
472 * slices for that size
473 */
474 newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
475 if (newaddr != -ENOMEM) {
476 /* Found within the good mask, we don't have to setup,
477 * we thus return directly
478 */
479 slice_dbg(" found area at 0x%lx\n", newaddr);
480 return newaddr;
481 }
482 }
483
484 /* We don't fit in the good mask, check what other slices are
485 * empty and thus can be converted
486 */
487 potential_mask = slice_mask_for_free(mm);
488 or_mask(potential_mask, good_mask);
489 slice_print_mask(" potential", potential_mask);
490
491 if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
492 slice_dbg(" fits potential !\n");
493 goto convert;
494 }
495
496 /* If we have MAP_FIXED and failed the above steps, then error out */
497 if (fixed)
498 return -EBUSY;
499
500 slice_dbg(" search...\n");
501
502 /* If we had a hint that didn't work out, see if we can fit
503 * anywhere in the good area.
504 */
505 if (addr) {
506 addr = slice_find_area(mm, len, good_mask, psize, topdown);
507 if (addr != -ENOMEM) {
508 slice_dbg(" found area at 0x%lx\n", addr);
509 return addr;
510 }
511 }
512
513 /* Now let's see if we can find something in the existing slices
514 * for that size plus free slices
515 */
516 addr = slice_find_area(mm, len, potential_mask, psize, topdown);
517
518 #ifdef CONFIG_PPC_64K_PAGES
519 if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
520 /* retry the search with 4k-page slices included */
521 or_mask(potential_mask, compat_mask);
522 addr = slice_find_area(mm, len, potential_mask, psize,
523 topdown);
524 }
525 #endif
526
527 if (addr == -ENOMEM)
528 return -ENOMEM;
529
530 mask = slice_range_to_mask(addr, len);
531 slice_dbg(" found potential area at 0x%lx\n", addr);
532 slice_print_mask(" mask", mask);
533
534 convert:
535 andnot_mask(mask, good_mask);
536 andnot_mask(mask, compat_mask);
537 if (mask.low_slices || mask.high_slices) {
538 slice_convert(mm, mask, psize);
539 if (psize > MMU_PAGE_BASE)
540 on_each_cpu(slice_flush_segments, mm, 1);
541 }
542 return addr;
543
544 }
545 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
546
547 unsigned long arch_get_unmapped_area(struct file *filp,
548 unsigned long addr,
549 unsigned long len,
550 unsigned long pgoff,
551 unsigned long flags)
552 {
553 return slice_get_unmapped_area(addr, len, flags,
554 current->mm->context.user_psize, 0);
555 }
556
557 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
558 const unsigned long addr0,
559 const unsigned long len,
560 const unsigned long pgoff,
561 const unsigned long flags)
562 {
563 return slice_get_unmapped_area(addr0, len, flags,
564 current->mm->context.user_psize, 1);
565 }
566
567 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
568 {
569 unsigned char *hpsizes;
570 int index, mask_index;
571
572 if (addr < SLICE_LOW_TOP) {
573 u64 lpsizes;
574 lpsizes = mm->context.low_slices_psize;
575 index = GET_LOW_SLICE_INDEX(addr);
576 return (lpsizes >> (index * 4)) & 0xf;
577 }
578 hpsizes = mm->context.high_slices_psize;
579 index = GET_HIGH_SLICE_INDEX(addr);
580 mask_index = index & 0x1;
581 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
582 }
583 EXPORT_SYMBOL_GPL(get_slice_psize);
584
585 /*
586 * This is called by hash_page when it needs to do a lazy conversion of
587 * an address space from real 64K pages to combo 4K pages (typically
588 * when hitting a non cacheable mapping on a processor or hypervisor
589 * that won't allow them for 64K pages).
590 *
591 * This is also called in init_new_context() to change back the user
592 * psize from whatever the parent context had it set to
593 * N.B. This may be called before mm->context.id has been set.
594 *
595 * This function will only change the content of the {low,high)_slice_psize
596 * masks, it will not flush SLBs as this shall be handled lazily by the
597 * caller.
598 */
599 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
600 {
601 int index, mask_index;
602 unsigned char *hpsizes;
603 unsigned long flags, lpsizes;
604 unsigned int old_psize;
605 int i;
606
607 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
608
609 spin_lock_irqsave(&slice_convert_lock, flags);
610
611 old_psize = mm->context.user_psize;
612 slice_dbg(" old_psize=%d\n", old_psize);
613 if (old_psize == psize)
614 goto bail;
615
616 mm->context.user_psize = psize;
617 wmb();
618
619 lpsizes = mm->context.low_slices_psize;
620 for (i = 0; i < SLICE_NUM_LOW; i++)
621 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
622 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
623 (((unsigned long)psize) << (i * 4));
624 /* Assign the value back */
625 mm->context.low_slices_psize = lpsizes;
626
627 hpsizes = mm->context.high_slices_psize;
628 for (i = 0; i < SLICE_NUM_HIGH; i++) {
629 mask_index = i & 0x1;
630 index = i >> 1;
631 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
632 hpsizes[index] = (hpsizes[index] &
633 ~(0xf << (mask_index * 4))) |
634 (((unsigned long)psize) << (mask_index * 4));
635 }
636
637
638
639
640 slice_dbg(" lsps=%lx, hsps=%lx\n",
641 mm->context.low_slices_psize,
642 mm->context.high_slices_psize);
643
644 bail:
645 spin_unlock_irqrestore(&slice_convert_lock, flags);
646 }
647
648 void slice_set_psize(struct mm_struct *mm, unsigned long address,
649 unsigned int psize)
650 {
651 unsigned char *hpsizes;
652 unsigned long i, flags;
653 u64 *lpsizes;
654
655 spin_lock_irqsave(&slice_convert_lock, flags);
656 if (address < SLICE_LOW_TOP) {
657 i = GET_LOW_SLICE_INDEX(address);
658 lpsizes = &mm->context.low_slices_psize;
659 *lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
660 ((unsigned long) psize << (i * 4));
661 } else {
662 int index, mask_index;
663 i = GET_HIGH_SLICE_INDEX(address);
664 hpsizes = mm->context.high_slices_psize;
665 mask_index = i & 0x1;
666 index = i >> 1;
667 hpsizes[index] = (hpsizes[index] &
668 ~(0xf << (mask_index * 4))) |
669 (((unsigned long)psize) << (mask_index * 4));
670 }
671
672 spin_unlock_irqrestore(&slice_convert_lock, flags);
673
674 #ifdef CONFIG_SPU_BASE
675 spu_flush_all_slbs(mm);
676 #endif
677 }
678
679 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
680 unsigned long len, unsigned int psize)
681 {
682 struct slice_mask mask = slice_range_to_mask(start, len);
683
684 slice_convert(mm, mask, psize);
685 }
686
687 /*
688 * is_hugepage_only_range() is used by generic code to verify whether
689 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
690 *
691 * until the generic code provides a more generic hook and/or starts
692 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
693 * here knows how to deal with), we hijack it to keep standard mappings
694 * away from us.
695 *
696 * because of that generic code limitation, MAP_FIXED mapping cannot
697 * "convert" back a slice with no VMAs to the standard page size, only
698 * get_unmapped_area() can. It would be possible to fix it here but I
699 * prefer working on fixing the generic code instead.
700 *
701 * WARNING: This will not work if hugetlbfs isn't enabled since the
702 * generic code will redefine that function as 0 in that. This is ok
703 * for now as we only use slices with hugetlbfs enabled. This should
704 * be fixed as the generic code gets fixed.
705 */
706 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
707 unsigned long len)
708 {
709 struct slice_mask mask, available;
710 unsigned int psize = mm->context.user_psize;
711
712 mask = slice_range_to_mask(addr, len);
713 available = slice_mask_for_size(mm, psize);
714 #ifdef CONFIG_PPC_64K_PAGES
715 /* We need to account for 4k slices too */
716 if (psize == MMU_PAGE_64K) {
717 struct slice_mask compat_mask;
718 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
719 or_mask(available, compat_mask);
720 }
721 #endif
722
723 #if 0 /* too verbose */
724 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
725 mm, addr, len);
726 slice_print_mask(" mask", mask);
727 slice_print_mask(" available", available);
728 #endif
729 return !slice_check_fit(mask, available);
730 }
731
This page took 0.051096 seconds and 5 git commands to generate.