2 * address space "slices" (meta-segments) support
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6 * Based on hugetlb implementation
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
37 /* some sanity checks */
38 #if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
39 #error PGTABLE_RANGE exceeds slice_mask high_slices size
42 static DEFINE_SPINLOCK(slice_convert_lock);
48 static void slice_print_mask(const char *label, struct slice_mask mask)
50 char *p, buf[16 + 3 + 64 + 1];
56 for (i = 0; i < SLICE_NUM_LOW; i++)
57 *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
61 for (i = 0; i < SLICE_NUM_HIGH; i++)
62 *(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
65 printk(KERN_DEBUG "%s:%s\n", label, buf);
68 #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
72 static void slice_print_mask(const char *label, struct slice_mask mask) {}
73 #define slice_dbg(fmt...)
77 static struct slice_mask slice_range_to_mask(unsigned long start,
80 unsigned long end = start + len - 1;
81 struct slice_mask ret = { 0, 0 };
83 if (start < SLICE_LOW_TOP) {
84 unsigned long mend = min(end, SLICE_LOW_TOP);
85 unsigned long mstart = min(start, SLICE_LOW_TOP);
87 ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
88 - (1u << GET_LOW_SLICE_INDEX(mstart));
91 if ((start + len) > SLICE_LOW_TOP)
92 ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
93 - (1ul << GET_HIGH_SLICE_INDEX(start));
98 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
101 struct vm_area_struct *vma;
103 if ((mm->task_size - len) < addr)
105 vma = find_vma(mm, addr);
106 return (!vma || (addr + len) <= vma->vm_start);
109 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
111 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
112 1ul << SLICE_LOW_SHIFT);
115 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
117 unsigned long start = slice << SLICE_HIGH_SHIFT;
118 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
120 /* Hack, so that each addresses is controlled by exactly one
121 * of the high or low area bitmaps, the first high area starts
124 start = SLICE_LOW_TOP;
126 return !slice_area_is_free(mm, start, end - start);
129 static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
131 struct slice_mask ret = { 0, 0 };
134 for (i = 0; i < SLICE_NUM_LOW; i++)
135 if (!slice_low_has_vma(mm, i))
136 ret.low_slices |= 1u << i;
138 if (mm->task_size <= SLICE_LOW_TOP)
141 for (i = 0; i < SLICE_NUM_HIGH; i++)
142 if (!slice_high_has_vma(mm, i))
143 ret.high_slices |= 1ul << i;
148 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
150 unsigned char *hpsizes;
151 int index, mask_index;
152 struct slice_mask ret = { 0, 0 };
156 lpsizes = mm->context.low_slices_psize;
157 for (i = 0; i < SLICE_NUM_LOW; i++)
158 if (((lpsizes >> (i * 4)) & 0xf) == psize)
159 ret.low_slices |= 1u << i;
161 hpsizes = mm->context.high_slices_psize;
162 for (i = 0; i < SLICE_NUM_HIGH; i++) {
163 mask_index = i & 0x1;
165 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
166 ret.high_slices |= 1ul << i;
172 static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
174 return (mask.low_slices & available.low_slices) == mask.low_slices &&
175 (mask.high_slices & available.high_slices) == mask.high_slices;
178 static void slice_flush_segments(void *parm)
180 struct mm_struct *mm = parm;
183 if (mm != current->active_mm)
186 /* update the paca copy of the context struct */
187 get_paca()->context = current->active_mm->context;
189 local_irq_save(flags);
190 slb_flush_and_rebolt();
191 local_irq_restore(flags);
194 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
196 int index, mask_index;
197 /* Write the new slice psize bits */
198 unsigned char *hpsizes;
200 unsigned long i, flags;
202 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
203 slice_print_mask(" mask", mask);
205 /* We need to use a spinlock here to protect against
206 * concurrent 64k -> 4k demotion ...
208 spin_lock_irqsave(&slice_convert_lock, flags);
210 lpsizes = mm->context.low_slices_psize;
211 for (i = 0; i < SLICE_NUM_LOW; i++)
212 if (mask.low_slices & (1u << i))
213 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
214 (((unsigned long)psize) << (i * 4));
216 /* Assign the value back */
217 mm->context.low_slices_psize = lpsizes;
219 hpsizes = mm->context.high_slices_psize;
220 for (i = 0; i < SLICE_NUM_HIGH; i++) {
221 mask_index = i & 0x1;
223 if (mask.high_slices & (1ul << i))
224 hpsizes[index] = (hpsizes[index] &
225 ~(0xf << (mask_index * 4))) |
226 (((unsigned long)psize) << (mask_index * 4));
229 slice_dbg(" lsps=%lx, hsps=%lx\n",
230 mm->context.low_slices_psize,
231 mm->context.high_slices_psize);
233 spin_unlock_irqrestore(&slice_convert_lock, flags);
235 #ifdef CONFIG_SPU_BASE
236 spu_flush_all_slbs(mm);
240 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
242 struct slice_mask available,
243 int psize, int use_cache)
245 struct vm_area_struct *vma;
246 unsigned long start_addr, addr;
247 struct slice_mask mask;
248 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
251 if (len <= mm->cached_hole_size) {
252 start_addr = addr = TASK_UNMAPPED_BASE;
253 mm->cached_hole_size = 0;
255 start_addr = addr = mm->free_area_cache;
257 start_addr = addr = TASK_UNMAPPED_BASE;
261 addr = _ALIGN_UP(addr, 1ul << pshift);
262 if ((TASK_SIZE - len) < addr)
264 vma = find_vma(mm, addr);
265 BUG_ON(vma && (addr >= vma->vm_end));
267 mask = slice_range_to_mask(addr, len);
268 if (!slice_check_fit(mask, available)) {
269 if (addr < SLICE_LOW_TOP)
270 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
272 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
275 if (!vma || addr + len <= vma->vm_start) {
277 * Remember the place where we stopped the search:
280 mm->free_area_cache = addr + len;
283 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
284 mm->cached_hole_size = vma->vm_start - addr;
288 /* Make sure we didn't miss any holes */
289 if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
290 start_addr = addr = TASK_UNMAPPED_BASE;
291 mm->cached_hole_size = 0;
297 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
299 struct slice_mask available,
300 int psize, int use_cache)
302 struct vm_area_struct *vma;
304 struct slice_mask mask;
305 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
307 /* check if free_area_cache is useful for us */
309 if (len <= mm->cached_hole_size) {
310 mm->cached_hole_size = 0;
311 mm->free_area_cache = mm->mmap_base;
314 /* either no address requested or can't fit in requested
317 addr = mm->free_area_cache;
319 /* make sure it can fit in the remaining address space */
321 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
322 mask = slice_range_to_mask(addr, len);
323 if (slice_check_fit(mask, available) &&
324 slice_area_is_free(mm, addr, len))
325 /* remember the address as a hint for
328 return (mm->free_area_cache = addr);
332 addr = mm->mmap_base;
334 /* Go down by chunk size */
335 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
337 /* Check for hit with different page size */
338 mask = slice_range_to_mask(addr, len);
339 if (!slice_check_fit(mask, available)) {
340 if (addr < SLICE_LOW_TOP)
341 addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
342 else if (addr < (1ul << SLICE_HIGH_SHIFT))
343 addr = SLICE_LOW_TOP;
345 addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
350 * Lookup failure means no vma is above this address,
351 * else if new region fits below vma->vm_start,
352 * return with success:
354 vma = find_vma(mm, addr);
355 if (!vma || (addr + len) <= vma->vm_start) {
356 /* remember the address as a hint for next time */
358 mm->free_area_cache = addr;
362 /* remember the largest hole we saw so far */
363 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
364 mm->cached_hole_size = vma->vm_start - addr;
366 /* try just below the current vma->vm_start */
367 addr = vma->vm_start;
371 * A failed mmap() very likely causes application failure,
372 * so fall back to the bottom-up function here. This scenario
373 * can happen with large stack limits and large mmap()
376 addr = slice_find_area_bottomup(mm, len, available, psize, 0);
379 * Restore the topdown base:
382 mm->free_area_cache = mm->mmap_base;
383 mm->cached_hole_size = ~0UL;
390 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
391 struct slice_mask mask, int psize,
392 int topdown, int use_cache)
395 return slice_find_area_topdown(mm, len, mask, psize, use_cache);
397 return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
400 #define or_mask(dst, src) do { \
401 (dst).low_slices |= (src).low_slices; \
402 (dst).high_slices |= (src).high_slices; \
405 #define andnot_mask(dst, src) do { \
406 (dst).low_slices &= ~(src).low_slices; \
407 (dst).high_slices &= ~(src).high_slices; \
410 #ifdef CONFIG_PPC_64K_PAGES
411 #define MMU_PAGE_BASE MMU_PAGE_64K
413 #define MMU_PAGE_BASE MMU_PAGE_4K
416 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
417 unsigned long flags, unsigned int psize,
418 int topdown, int use_cache)
420 struct slice_mask mask = {0, 0};
421 struct slice_mask good_mask;
422 struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
423 struct slice_mask compat_mask = {0, 0};
424 int fixed = (flags & MAP_FIXED);
425 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
426 struct mm_struct *mm = current->mm;
427 unsigned long newaddr;
430 BUG_ON(mm->task_size == 0);
432 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
433 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
434 addr, len, flags, topdown, use_cache);
436 if (len > mm->task_size)
438 if (len & ((1ul << pshift) - 1))
440 if (fixed && (addr & ((1ul << pshift) - 1)))
442 if (fixed && addr > (mm->task_size - len))
445 /* If hint, make sure it matches our alignment restrictions */
446 if (!fixed && addr) {
447 addr = _ALIGN_UP(addr, 1ul << pshift);
448 slice_dbg(" aligned addr=%lx\n", addr);
449 /* Ignore hint if it's too large or overlaps a VMA */
450 if (addr > mm->task_size - len ||
451 !slice_area_is_free(mm, addr, len))
455 /* First make up a "good" mask of slices that have the right size
458 good_mask = slice_mask_for_size(mm, psize);
459 slice_print_mask(" good_mask", good_mask);
462 * Here "good" means slices that are already the right page size,
463 * "compat" means slices that have a compatible page size (i.e.
464 * 4k in a 64k pagesize kernel), and "free" means slices without
468 * check if fits in good | compat => OK
469 * check if fits in good | compat | free => convert free
472 * check if hint fits in good => OK
473 * check if hint fits in good | free => convert free
475 * search in good, found => OK
476 * search in good | free, found => convert free
477 * search in good | compat | free, found => convert free.
480 #ifdef CONFIG_PPC_64K_PAGES
481 /* If we support combo pages, we can allow 64k pages in 4k slices */
482 if (psize == MMU_PAGE_64K) {
483 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
485 or_mask(good_mask, compat_mask);
489 /* First check hint if it's valid or if we have MAP_FIXED */
490 if (addr != 0 || fixed) {
491 /* Build a mask for the requested range */
492 mask = slice_range_to_mask(addr, len);
493 slice_print_mask(" mask", mask);
495 /* Check if we fit in the good mask. If we do, we just return,
498 if (slice_check_fit(mask, good_mask)) {
499 slice_dbg(" fits good !\n");
503 /* Now let's see if we can find something in the existing
504 * slices for that size
506 newaddr = slice_find_area(mm, len, good_mask, psize, topdown,
508 if (newaddr != -ENOMEM) {
509 /* Found within the good mask, we don't have to setup,
510 * we thus return directly
512 slice_dbg(" found area at 0x%lx\n", newaddr);
517 /* We don't fit in the good mask, check what other slices are
518 * empty and thus can be converted
520 potential_mask = slice_mask_for_free(mm);
521 or_mask(potential_mask, good_mask);
522 slice_print_mask(" potential", potential_mask);
524 if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
525 slice_dbg(" fits potential !\n");
529 /* If we have MAP_FIXED and failed the above steps, then error out */
533 slice_dbg(" search...\n");
535 /* If we had a hint that didn't work out, see if we can fit
536 * anywhere in the good area.
539 addr = slice_find_area(mm, len, good_mask, psize, topdown,
541 if (addr != -ENOMEM) {
542 slice_dbg(" found area at 0x%lx\n", addr);
547 /* Now let's see if we can find something in the existing slices
548 * for that size plus free slices
550 addr = slice_find_area(mm, len, potential_mask, psize, topdown,
553 #ifdef CONFIG_PPC_64K_PAGES
554 if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
555 /* retry the search with 4k-page slices included */
556 or_mask(potential_mask, compat_mask);
557 addr = slice_find_area(mm, len, potential_mask, psize,
565 mask = slice_range_to_mask(addr, len);
566 slice_dbg(" found potential area at 0x%lx\n", addr);
567 slice_print_mask(" mask", mask);
570 andnot_mask(mask, good_mask);
571 andnot_mask(mask, compat_mask);
572 if (mask.low_slices || mask.high_slices) {
573 slice_convert(mm, mask, psize);
574 if (psize > MMU_PAGE_BASE)
575 on_each_cpu(slice_flush_segments, mm, 1);
580 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
582 unsigned long arch_get_unmapped_area(struct file *filp,
588 return slice_get_unmapped_area(addr, len, flags,
589 current->mm->context.user_psize,
593 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
594 const unsigned long addr0,
595 const unsigned long len,
596 const unsigned long pgoff,
597 const unsigned long flags)
599 return slice_get_unmapped_area(addr0, len, flags,
600 current->mm->context.user_psize,
604 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
606 unsigned char *hpsizes;
607 int index, mask_index;
609 if (addr < SLICE_LOW_TOP) {
611 lpsizes = mm->context.low_slices_psize;
612 index = GET_LOW_SLICE_INDEX(addr);
613 return (lpsizes >> (index * 4)) & 0xf;
615 hpsizes = mm->context.high_slices_psize;
616 index = GET_HIGH_SLICE_INDEX(addr);
617 mask_index = index & 0x1;
618 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
620 EXPORT_SYMBOL_GPL(get_slice_psize);
623 * This is called by hash_page when it needs to do a lazy conversion of
624 * an address space from real 64K pages to combo 4K pages (typically
625 * when hitting a non cacheable mapping on a processor or hypervisor
626 * that won't allow them for 64K pages).
628 * This is also called in init_new_context() to change back the user
629 * psize from whatever the parent context had it set to
630 * N.B. This may be called before mm->context.id has been set.
632 * This function will only change the content of the {low,high)_slice_psize
633 * masks, it will not flush SLBs as this shall be handled lazily by the
636 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
638 int index, mask_index;
639 unsigned char *hpsizes;
640 unsigned long flags, lpsizes;
641 unsigned int old_psize;
644 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
646 spin_lock_irqsave(&slice_convert_lock, flags);
648 old_psize = mm->context.user_psize;
649 slice_dbg(" old_psize=%d\n", old_psize);
650 if (old_psize == psize)
653 mm->context.user_psize = psize;
656 lpsizes = mm->context.low_slices_psize;
657 for (i = 0; i < SLICE_NUM_LOW; i++)
658 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
659 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
660 (((unsigned long)psize) << (i * 4));
661 /* Assign the value back */
662 mm->context.low_slices_psize = lpsizes;
664 hpsizes = mm->context.high_slices_psize;
665 for (i = 0; i < SLICE_NUM_HIGH; i++) {
666 mask_index = i & 0x1;
668 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
669 hpsizes[index] = (hpsizes[index] &
670 ~(0xf << (mask_index * 4))) |
671 (((unsigned long)psize) << (mask_index * 4));
677 slice_dbg(" lsps=%lx, hsps=%lx\n",
678 mm->context.low_slices_psize,
679 mm->context.high_slices_psize);
682 spin_unlock_irqrestore(&slice_convert_lock, flags);
685 void slice_set_psize(struct mm_struct *mm, unsigned long address,
688 unsigned char *hpsizes;
689 unsigned long i, flags;
692 spin_lock_irqsave(&slice_convert_lock, flags);
693 if (address < SLICE_LOW_TOP) {
694 i = GET_LOW_SLICE_INDEX(address);
695 lpsizes = &mm->context.low_slices_psize;
696 *lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
697 ((unsigned long) psize << (i * 4));
699 int index, mask_index;
700 i = GET_HIGH_SLICE_INDEX(address);
701 hpsizes = mm->context.high_slices_psize;
702 mask_index = i & 0x1;
704 hpsizes[index] = (hpsizes[index] &
705 ~(0xf << (mask_index * 4))) |
706 (((unsigned long)psize) << (mask_index * 4));
709 spin_unlock_irqrestore(&slice_convert_lock, flags);
711 #ifdef CONFIG_SPU_BASE
712 spu_flush_all_slbs(mm);
716 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
717 unsigned long len, unsigned int psize)
719 struct slice_mask mask = slice_range_to_mask(start, len);
721 slice_convert(mm, mask, psize);
725 * is_hugepage_only_range() is used by generic code to verify wether
726 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
728 * until the generic code provides a more generic hook and/or starts
729 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
730 * here knows how to deal with), we hijack it to keep standard mappings
733 * because of that generic code limitation, MAP_FIXED mapping cannot
734 * "convert" back a slice with no VMAs to the standard page size, only
735 * get_unmapped_area() can. It would be possible to fix it here but I
736 * prefer working on fixing the generic code instead.
738 * WARNING: This will not work if hugetlbfs isn't enabled since the
739 * generic code will redefine that function as 0 in that. This is ok
740 * for now as we only use slices with hugetlbfs enabled. This should
741 * be fixed as the generic code gets fixed.
743 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
746 struct slice_mask mask, available;
747 unsigned int psize = mm->context.user_psize;
749 mask = slice_range_to_mask(addr, len);
750 available = slice_mask_for_size(mm, psize);
751 #ifdef CONFIG_PPC_64K_PAGES
752 /* We need to account for 4k slices too */
753 if (psize == MMU_PAGE_64K) {
754 struct slice_mask compat_mask;
755 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
756 or_mask(available, compat_mask);
760 #if 0 /* too verbose */
761 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
763 slice_print_mask(" mask", mask);
764 slice_print_mask(" available", available);
766 return !slice_check_fit(mask, available);