2 * mpx.c - Memory Protection eXtensions
4 * Copyright (c) 2014, Intel Corporation.
5 * Qiaowei Ren <qiaowei.ren@intel.com>
6 * Dave Hansen <dave.hansen@intel.com>
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/syscalls.h>
11 #include <linux/sched/sysctl.h>
15 #include <asm/mmu_context.h>
17 #include <asm/processor.h>
18 #include <asm/fpu/internal.h>
20 #define CREATE_TRACE_POINTS
21 #include <asm/trace/mpx.h>
23 static const char *mpx_mapping_name(struct vm_area_struct
*vma
)
28 static struct vm_operations_struct mpx_vma_ops
= {
29 .name
= mpx_mapping_name
,
32 static int is_mpx_vma(struct vm_area_struct
*vma
)
34 return (vma
->vm_ops
== &mpx_vma_ops
);
37 static inline unsigned long mpx_bd_size_bytes(struct mm_struct
*mm
)
40 return MPX_BD_SIZE_BYTES_64
;
42 return MPX_BD_SIZE_BYTES_32
;
45 static inline unsigned long mpx_bt_size_bytes(struct mm_struct
*mm
)
48 return MPX_BT_SIZE_BYTES_64
;
50 return MPX_BT_SIZE_BYTES_32
;
54 * This is really a simplified "vm_mmap". it only handles MPX
55 * bounds tables (the bounds directory is user-allocated).
57 * Later on, we use the vma->vm_ops to uniquely identify these
60 static unsigned long mpx_mmap(unsigned long len
)
63 unsigned long addr
, pgoff
;
64 struct mm_struct
*mm
= current
->mm
;
66 struct vm_area_struct
*vma
;
68 /* Only bounds table can be allocated here */
69 if (len
!= mpx_bt_size_bytes(mm
))
72 down_write(&mm
->mmap_sem
);
74 /* Too many mappings? */
75 if (mm
->map_count
> sysctl_max_map_count
) {
80 /* Obtain the address to map to. we verify (or select) it and ensure
81 * that it represents a valid section of the address space.
83 addr
= get_unmapped_area(NULL
, 0, len
, 0, MAP_ANONYMOUS
| MAP_PRIVATE
);
84 if (addr
& ~PAGE_MASK
) {
89 vm_flags
= VM_READ
| VM_WRITE
| VM_MPX
|
90 mm
->def_flags
| VM_MAYREAD
| VM_MAYWRITE
| VM_MAYEXEC
;
92 /* Set pgoff according to addr for anon_vma */
93 pgoff
= addr
>> PAGE_SHIFT
;
95 ret
= mmap_region(NULL
, addr
, len
, vm_flags
, pgoff
);
96 if (IS_ERR_VALUE(ret
))
99 vma
= find_vma(mm
, ret
);
104 vma
->vm_ops
= &mpx_vma_ops
;
106 if (vm_flags
& VM_LOCKED
) {
107 up_write(&mm
->mmap_sem
);
108 mm_populate(ret
, len
);
113 up_write(&mm
->mmap_sem
);
123 static int get_reg_offset(struct insn
*insn
, struct pt_regs
*regs
,
128 static const int regoff
[] = {
129 offsetof(struct pt_regs
, ax
),
130 offsetof(struct pt_regs
, cx
),
131 offsetof(struct pt_regs
, dx
),
132 offsetof(struct pt_regs
, bx
),
133 offsetof(struct pt_regs
, sp
),
134 offsetof(struct pt_regs
, bp
),
135 offsetof(struct pt_regs
, si
),
136 offsetof(struct pt_regs
, di
),
138 offsetof(struct pt_regs
, r8
),
139 offsetof(struct pt_regs
, r9
),
140 offsetof(struct pt_regs
, r10
),
141 offsetof(struct pt_regs
, r11
),
142 offsetof(struct pt_regs
, r12
),
143 offsetof(struct pt_regs
, r13
),
144 offsetof(struct pt_regs
, r14
),
145 offsetof(struct pt_regs
, r15
),
148 int nr_registers
= ARRAY_SIZE(regoff
);
150 * Don't possibly decode a 32-bit instructions as
151 * reading a 64-bit-only register.
153 if (IS_ENABLED(CONFIG_X86_64
) && !insn
->x86_64
)
158 regno
= X86_MODRM_RM(insn
->modrm
.value
);
159 if (X86_REX_B(insn
->rex_prefix
.value
) == 1)
164 regno
= X86_SIB_INDEX(insn
->sib
.value
);
165 if (X86_REX_X(insn
->rex_prefix
.value
) == 1)
170 regno
= X86_SIB_BASE(insn
->sib
.value
);
171 if (X86_REX_B(insn
->rex_prefix
.value
) == 1)
176 pr_err("invalid register type");
181 if (regno
> nr_registers
) {
182 WARN_ONCE(1, "decoded an instruction with an invalid register");
185 return regoff
[regno
];
189 * return the address being referenced be instruction
190 * for rm=3 returning the content of the rm reg
191 * for rm!=3 calculates the address using SIB and Disp
193 static void __user
*mpx_get_addr_ref(struct insn
*insn
, struct pt_regs
*regs
)
195 unsigned long addr
, base
, indx
;
196 int addr_offset
, base_offset
, indx_offset
;
199 insn_get_modrm(insn
);
201 sib
= insn
->sib
.value
;
203 if (X86_MODRM_MOD(insn
->modrm
.value
) == 3) {
204 addr_offset
= get_reg_offset(insn
, regs
, REG_TYPE_RM
);
207 addr
= regs_get_register(regs
, addr_offset
);
209 if (insn
->sib
.nbytes
) {
210 base_offset
= get_reg_offset(insn
, regs
, REG_TYPE_BASE
);
214 indx_offset
= get_reg_offset(insn
, regs
, REG_TYPE_INDEX
);
218 base
= regs_get_register(regs
, base_offset
);
219 indx
= regs_get_register(regs
, indx_offset
);
220 addr
= base
+ indx
* (1 << X86_SIB_SCALE(sib
));
222 addr_offset
= get_reg_offset(insn
, regs
, REG_TYPE_RM
);
225 addr
= regs_get_register(regs
, addr_offset
);
227 addr
+= insn
->displacement
.value
;
229 return (void __user
*)addr
;
231 return (void __user
*)-1;
234 static int mpx_insn_decode(struct insn
*insn
,
235 struct pt_regs
*regs
)
237 unsigned char buf
[MAX_INSN_SIZE
];
238 int x86_64
= !test_thread_flag(TIF_IA32
);
242 not_copied
= copy_from_user(buf
, (void __user
*)regs
->ip
, sizeof(buf
));
243 nr_copied
= sizeof(buf
) - not_copied
;
245 * The decoder _should_ fail nicely if we pass it a short buffer.
246 * But, let's not depend on that implementation detail. If we
247 * did not get anything, just error out now.
251 insn_init(insn
, buf
, nr_copied
, x86_64
);
252 insn_get_length(insn
);
254 * copy_from_user() tries to get as many bytes as we could see in
255 * the largest possible instruction. If the instruction we are
256 * after is shorter than that _and_ we attempt to copy from
257 * something unreadable, we might get a short read. This is OK
258 * as long as the read did not stop in the middle of the
259 * instruction. Check to see if we got a partial instruction.
261 if (nr_copied
< insn
->length
)
264 insn_get_opcode(insn
);
266 * We only _really_ need to decode bndcl/bndcn/bndcu
267 * Error out on anything else.
269 if (insn
->opcode
.bytes
[0] != 0x0f)
271 if ((insn
->opcode
.bytes
[1] != 0x1a) &&
272 (insn
->opcode
.bytes
[1] != 0x1b))
281 * If a bounds overflow occurs then a #BR is generated. This
282 * function decodes MPX instructions to get violation address
283 * and set this address into extended struct siginfo.
285 * Note that this is not a super precise way of doing this.
286 * Userspace could have, by the time we get here, written
287 * anything it wants in to the instructions. We can not
288 * trust anything about it. They might not be valid
289 * instructions or might encode invalid registers, etc...
291 * The caller is expected to kfree() the returned siginfo_t.
293 siginfo_t
*mpx_generate_siginfo(struct pt_regs
*regs
)
295 const struct bndreg
*bndregs
, *bndreg
;
296 siginfo_t
*info
= NULL
;
301 err
= mpx_insn_decode(&insn
, regs
);
306 * We know at this point that we are only dealing with
309 insn_get_modrm(&insn
);
310 bndregno
= X86_MODRM_REG(insn
.modrm
.value
);
315 /* get bndregs field from current task's xsave area */
316 bndregs
= get_xsave_field_ptr(XSTATE_BNDREGS
);
321 /* now go select the individual register in the set of 4 */
322 bndreg
= &bndregs
[bndregno
];
324 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
330 * The registers are always 64-bit, but the upper 32
331 * bits are ignored in 32-bit mode. Also, note that the
332 * upper bounds are architecturally represented in 1's
335 * The 'unsigned long' cast is because the compiler
336 * complains when casting from integers to different-size
339 info
->si_lower
= (void __user
*)(unsigned long)bndreg
->lower_bound
;
340 info
->si_upper
= (void __user
*)(unsigned long)~bndreg
->upper_bound
;
341 info
->si_addr_lsb
= 0;
342 info
->si_signo
= SIGSEGV
;
344 info
->si_code
= SEGV_BNDERR
;
345 info
->si_addr
= mpx_get_addr_ref(&insn
, regs
);
347 * We were not able to extract an address from the instruction,
348 * probably because there was something invalid in it.
350 if (info
->si_addr
== (void *)-1) {
354 trace_mpx_bounds_register_exception(info
->si_addr
, bndreg
);
357 /* info might be NULL, but kfree() handles that */
362 static __user
void *mpx_get_bounds_dir(void)
364 const struct bndcsr
*bndcsr
;
366 if (!cpu_feature_enabled(X86_FEATURE_MPX
))
367 return MPX_INVALID_BOUNDS_DIR
;
370 * 32-bit binaries on 64-bit kernels are currently
373 if (IS_ENABLED(CONFIG_X86_64
) && test_thread_flag(TIF_IA32
))
374 return MPX_INVALID_BOUNDS_DIR
;
376 * The bounds directory pointer is stored in a register
377 * only accessible if we first do an xsave.
379 bndcsr
= get_xsave_field_ptr(XSTATE_BNDCSR
);
381 return MPX_INVALID_BOUNDS_DIR
;
384 * Make sure the register looks valid by checking the
387 if (!(bndcsr
->bndcfgu
& MPX_BNDCFG_ENABLE_FLAG
))
388 return MPX_INVALID_BOUNDS_DIR
;
391 * Lastly, mask off the low bits used for configuration
392 * flags, and return the address of the bounds table.
394 return (void __user
*)(unsigned long)
395 (bndcsr
->bndcfgu
& MPX_BNDCFG_ADDR_MASK
);
398 int mpx_enable_management(void)
400 void __user
*bd_base
= MPX_INVALID_BOUNDS_DIR
;
401 struct mm_struct
*mm
= current
->mm
;
405 * runtime in the userspace will be responsible for allocation of
406 * the bounds directory. Then, it will save the base of the bounds
407 * directory into XSAVE/XRSTOR Save Area and enable MPX through
408 * XRSTOR instruction.
410 * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
411 * expected to be relatively expensive. Storing the bounds
412 * directory here means that we do not have to do xsave in the
413 * unmap path; we can just use mm->bd_addr instead.
415 bd_base
= mpx_get_bounds_dir();
416 down_write(&mm
->mmap_sem
);
417 mm
->bd_addr
= bd_base
;
418 if (mm
->bd_addr
== MPX_INVALID_BOUNDS_DIR
)
421 up_write(&mm
->mmap_sem
);
425 int mpx_disable_management(void)
427 struct mm_struct
*mm
= current
->mm
;
429 if (!cpu_feature_enabled(X86_FEATURE_MPX
))
432 down_write(&mm
->mmap_sem
);
433 mm
->bd_addr
= MPX_INVALID_BOUNDS_DIR
;
434 up_write(&mm
->mmap_sem
);
438 static int mpx_cmpxchg_bd_entry(struct mm_struct
*mm
,
439 unsigned long *curval
,
440 unsigned long __user
*addr
,
441 unsigned long old_val
, unsigned long new_val
)
445 * user_atomic_cmpxchg_inatomic() actually uses sizeof()
446 * the pointer that we pass to it to figure out how much
447 * data to cmpxchg. We have to be careful here not to
448 * pass a pointer to a 64-bit data type when we only want
451 if (is_64bit_mm(mm
)) {
452 ret
= user_atomic_cmpxchg_inatomic(curval
,
453 addr
, old_val
, new_val
);
455 u32
uninitialized_var(curval_32
);
456 u32 old_val_32
= old_val
;
457 u32 new_val_32
= new_val
;
458 u32 __user
*addr_32
= (u32 __user
*)addr
;
460 ret
= user_atomic_cmpxchg_inatomic(&curval_32
,
461 addr_32
, old_val_32
, new_val_32
);
468 * With 32-bit mode, a bounds directory is 4MB, and the size of each
469 * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB,
470 * and the size of each bounds table is 4MB.
472 static int allocate_bt(struct mm_struct
*mm
, long __user
*bd_entry
)
474 unsigned long expected_old_val
= 0;
475 unsigned long actual_old_val
= 0;
476 unsigned long bt_addr
;
477 unsigned long bd_new_entry
;
481 * Carve the virtual space out of userspace for the new
484 bt_addr
= mpx_mmap(mpx_bt_size_bytes(mm
));
485 if (IS_ERR((void *)bt_addr
))
486 return PTR_ERR((void *)bt_addr
);
488 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
490 bd_new_entry
= bt_addr
| MPX_BD_ENTRY_VALID_FLAG
;
493 * Go poke the address of the new bounds table in to the
494 * bounds directory entry out in userspace memory. Note:
495 * we may race with another CPU instantiating the same table.
496 * In that case the cmpxchg will see an unexpected
499 * This can fault, but that's OK because we do not hold
500 * mmap_sem at this point, unlike some of the other part
501 * of the MPX code that have to pagefault_disable().
503 ret
= mpx_cmpxchg_bd_entry(mm
, &actual_old_val
, bd_entry
,
504 expected_old_val
, bd_new_entry
);
509 * The user_atomic_cmpxchg_inatomic() will only return nonzero
510 * for faults, *not* if the cmpxchg itself fails. Now we must
511 * verify that the cmpxchg itself completed successfully.
514 * We expected an empty 'expected_old_val', but instead found
515 * an apparently valid entry. Assume we raced with another
516 * thread to instantiate this table and desclare succecss.
518 if (actual_old_val
& MPX_BD_ENTRY_VALID_FLAG
) {
523 * We found a non-empty bd_entry but it did not have the
524 * VALID_FLAG set. Return an error which will result in
525 * a SEGV since this probably means that somebody scribbled
526 * some invalid data in to a bounds table.
528 if (expected_old_val
!= actual_old_val
) {
532 trace_mpx_new_bounds_table(bt_addr
);
535 vm_munmap(bt_addr
, mpx_bt_size_bytes(mm
));
540 * When a BNDSTX instruction attempts to save bounds to a bounds
541 * table, it will first attempt to look up the table in the
542 * first-level bounds directory. If it does not find a table in
543 * the directory, a #BR is generated and we get here in order to
544 * allocate a new table.
546 * With 32-bit mode, the size of BD is 4MB, and the size of each
547 * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
548 * and the size of each bound table is 4MB.
550 static int do_mpx_bt_fault(void)
552 unsigned long bd_entry
, bd_base
;
553 const struct bndcsr
*bndcsr
;
554 struct mm_struct
*mm
= current
->mm
;
556 bndcsr
= get_xsave_field_ptr(XSTATE_BNDCSR
);
560 * Mask off the preserve and enable bits
562 bd_base
= bndcsr
->bndcfgu
& MPX_BNDCFG_ADDR_MASK
;
564 * The hardware provides the address of the missing or invalid
565 * entry via BNDSTATUS, so we don't have to go look it up.
567 bd_entry
= bndcsr
->bndstatus
& MPX_BNDSTA_ADDR_MASK
;
569 * Make sure the directory entry is within where we think
572 if ((bd_entry
< bd_base
) ||
573 (bd_entry
>= bd_base
+ mpx_bd_size_bytes(mm
)))
576 return allocate_bt(mm
, (long __user
*)bd_entry
);
579 int mpx_handle_bd_fault(void)
582 * Userspace never asked us to manage the bounds tables,
585 if (!kernel_managing_mpx_tables(current
->mm
))
588 if (do_mpx_bt_fault()) {
589 force_sig(SIGSEGV
, current
);
591 * The force_sig() is essentially "handling" this
592 * exception, so we do not pass up the error
593 * from do_mpx_bt_fault().
600 * A thin wrapper around get_user_pages(). Returns 0 if the
601 * fault was resolved or -errno if not.
603 static int mpx_resolve_fault(long __user
*addr
, int write
)
609 gup_ret
= get_user_pages(current
, current
->mm
, (unsigned long)addr
,
610 nr_pages
, write
, force
, NULL
, NULL
);
612 * get_user_pages() returns number of pages gotten.
613 * 0 means we failed to fault in and get anything,
614 * probably because 'addr' is bad.
618 /* Other error, return it */
621 /* must have gup'd a page and gup_ret>0, success */
625 static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct
*mm
,
626 unsigned long bd_entry
)
628 unsigned long bt_addr
= bd_entry
;
631 * Bit 0 in a bt_entry is always the valid bit.
633 bt_addr
&= ~MPX_BD_ENTRY_VALID_FLAG
;
635 * Tables are naturally aligned at 8-byte boundaries
636 * on 64-bit and 4-byte boundaries on 32-bit. The
637 * documentation makes it appear that the low bits
638 * are ignored by the hardware, so we do the same.
644 bt_addr
&= ~(align_to_bytes
-1);
649 * Get the base of bounds tables pointed by specific bounds
652 static int get_bt_addr(struct mm_struct
*mm
,
653 long __user
*bd_entry_ptr
,
654 unsigned long *bt_addr_result
)
658 unsigned long bd_entry
;
659 unsigned long bt_addr
;
661 if (!access_ok(VERIFY_READ
, (bd_entry_ptr
), sizeof(*bd_entry_ptr
)))
668 ret
= get_user(bd_entry
, bd_entry_ptr
);
673 ret
= mpx_resolve_fault(bd_entry_ptr
, need_write
);
675 * If we could not resolve the fault, consider it
676 * userspace's fault and error out.
682 valid_bit
= bd_entry
& MPX_BD_ENTRY_VALID_FLAG
;
683 bt_addr
= mpx_bd_entry_to_bt_addr(mm
, bd_entry
);
686 * When the kernel is managing bounds tables, a bounds directory
687 * entry will either have a valid address (plus the valid bit)
688 * *OR* be completely empty. If we see a !valid entry *and* some
689 * data in the address field, we know something is wrong. This
690 * -EINVAL return will cause a SIGSEGV.
692 if (!valid_bit
&& bt_addr
)
695 * Do we have an completely zeroed bt entry? That is OK. It
696 * just means there was no bounds table for this memory. Make
697 * sure to distinguish this from -EINVAL, which will cause
703 *bt_addr_result
= bt_addr
;
708 * Free the backing physical pages of bounds table 'bt_addr'.
709 * Assume start...end is within that bounds table.
711 static int zap_bt_entries(struct mm_struct
*mm
,
712 unsigned long bt_addr
,
713 unsigned long start
, unsigned long end
)
715 struct vm_area_struct
*vma
;
716 unsigned long addr
, len
;
719 * Find the first overlapping vma. If vma->vm_start > start, there
720 * will be a hole in the bounds table. This -EINVAL return will
723 vma
= find_vma(mm
, start
);
724 if (!vma
|| vma
->vm_start
> start
)
728 * A NUMA policy on a VM_MPX VMA could cause this bouds table to
729 * be split. So we need to look across the entire 'start -> end'
730 * range of this bounds table, find all of the VM_MPX VMAs, and
734 while (vma
&& vma
->vm_start
< end
) {
736 * We followed a bounds directory entry down
737 * here. If we find a non-MPX VMA, that's bad,
738 * so stop immediately and return an error. This
739 * probably results in a SIGSEGV.
741 if (!is_mpx_vma(vma
))
744 len
= min(vma
->vm_end
, end
) - addr
;
745 zap_page_range(vma
, addr
, len
, NULL
);
746 trace_mpx_unmap_zap(addr
, addr
+len
);
749 addr
= vma
->vm_start
;
755 static int unmap_single_bt(struct mm_struct
*mm
,
756 long __user
*bd_entry
, unsigned long bt_addr
)
758 unsigned long expected_old_val
= bt_addr
| MPX_BD_ENTRY_VALID_FLAG
;
759 unsigned long uninitialized_var(actual_old_val
);
764 unsigned long cleared_bd_entry
= 0;
767 ret
= mpx_cmpxchg_bd_entry(mm
, &actual_old_val
,
768 bd_entry
, expected_old_val
, cleared_bd_entry
);
773 ret
= mpx_resolve_fault(bd_entry
, need_write
);
775 * If we could not resolve the fault, consider it
776 * userspace's fault and error out.
782 * The cmpxchg was performed, check the results.
784 if (actual_old_val
!= expected_old_val
) {
786 * Someone else raced with us to unmap the table.
787 * There was no bounds table pointed to by the
788 * directory, so declare success. Somebody freed
794 * Something messed with the bounds directory
795 * entry. We hold mmap_sem for read or write
796 * here, so it could not be a _new_ bounds table
797 * that someone just allocated. Something is
798 * wrong, so pass up the error and SIGSEGV.
804 * Note, we are likely being called under do_munmap() already. To
805 * avoid recursion, do_munmap() will check whether it comes
806 * from one bounds table through VM_MPX flag.
808 return do_munmap(mm
, bt_addr
, mpx_bt_size_bytes(mm
));
811 static inline int bt_entry_size_bytes(struct mm_struct
*mm
)
814 return MPX_BT_ENTRY_BYTES_64
;
816 return MPX_BT_ENTRY_BYTES_32
;
820 * Take a virtual address and turns it in to the offset in bytes
821 * inside of the bounds table where the bounds table entry
822 * controlling 'addr' can be found.
824 static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct
*mm
,
827 unsigned long bt_table_nr_entries
;
828 unsigned long offset
= addr
;
830 if (is_64bit_mm(mm
)) {
831 /* Bottom 3 bits are ignored on 64-bit */
833 bt_table_nr_entries
= MPX_BT_NR_ENTRIES_64
;
835 /* Bottom 2 bits are ignored on 32-bit */
837 bt_table_nr_entries
= MPX_BT_NR_ENTRIES_32
;
840 * We know the size of the table in to which we are
841 * indexing, and we have eliminated all the low bits
842 * which are ignored for indexing.
844 * Mask out all the high bits which we do not need
845 * to index in to the table. Note that the tables
846 * are always powers of two so this gives us a proper
849 offset
&= (bt_table_nr_entries
-1);
851 * We now have an entry offset in terms of *entries* in
852 * the table. We need to scale it back up to bytes.
854 offset
*= bt_entry_size_bytes(mm
);
859 * How much virtual address space does a single bounds
860 * directory entry cover?
862 * Note, we need a long long because 4GB doesn't fit in
863 * to a long on 32-bit.
865 static inline unsigned long bd_entry_virt_space(struct mm_struct
*mm
)
867 unsigned long long virt_space
= (1ULL << boot_cpu_data
.x86_virt_bits
);
869 return virt_space
/ MPX_BD_NR_ENTRIES_64
;
871 return virt_space
/ MPX_BD_NR_ENTRIES_32
;
875 * Return an offset in terms of bytes in to the bounds
876 * directory where the bounds directory entry for a given
877 * virtual address resides.
879 * This has to be in bytes because the directory entries
880 * are different sizes on 64/32 bit.
882 static unsigned long mpx_get_bd_entry_offset(struct mm_struct
*mm
,
886 * There are several ways to derive the bd offsets. We
887 * use the following approach here:
888 * 1. We know the size of the virtual address space
889 * 2. We know the number of entries in a bounds table
890 * 3. We know that each entry covers a fixed amount of
891 * virtual address space.
892 * So, we can just divide the virtual address by the
893 * virtual space used by one entry to determine which
894 * entry "controls" the given virtual address.
896 if (is_64bit_mm(mm
)) {
897 int bd_entry_size
= 8; /* 64-bit pointer */
899 * Take the 64-bit addressing hole in to account.
901 addr
&= ((1UL << boot_cpu_data
.x86_virt_bits
) - 1);
902 return (addr
/ bd_entry_virt_space(mm
)) * bd_entry_size
;
904 int bd_entry_size
= 4; /* 32-bit pointer */
906 * 32-bit has no hole so this case needs no mask
908 return (addr
/ bd_entry_virt_space(mm
)) * bd_entry_size
;
911 * The two return calls above are exact copies. If we
912 * pull out a single copy and put it in here, gcc won't
913 * realize that we're doing a power-of-2 divide and use
914 * shifts. It uses a real divide. If we put them up
915 * there, it manages to figure it out (gcc 4.8.3).
920 * If the bounds table pointed by bounds directory 'bd_entry' is
921 * not shared, unmap this whole bounds table. Otherwise, only free
922 * those backing physical pages of bounds table entries covered
923 * in this virtual address region start...end.
925 static int unmap_shared_bt(struct mm_struct
*mm
,
926 long __user
*bd_entry
, unsigned long start
,
927 unsigned long end
, bool prev_shared
, bool next_shared
)
929 unsigned long bt_addr
;
930 unsigned long start_off
, end_off
;
933 ret
= get_bt_addr(mm
, bd_entry
, &bt_addr
);
935 * We could see an "error" ret for not-present bounds
936 * tables (not really an error), or actual errors, but
937 * stop unmapping either way.
942 start_off
= mpx_get_bt_entry_offset_bytes(mm
, start
);
943 end_off
= mpx_get_bt_entry_offset_bytes(mm
, end
);
945 if (prev_shared
&& next_shared
)
946 ret
= zap_bt_entries(mm
, bt_addr
,
949 else if (prev_shared
)
950 ret
= zap_bt_entries(mm
, bt_addr
,
952 bt_addr
+ mpx_bt_size_bytes(mm
));
953 else if (next_shared
)
954 ret
= zap_bt_entries(mm
, bt_addr
, bt_addr
,
957 ret
= unmap_single_bt(mm
, bd_entry
, bt_addr
);
963 * A virtual address region being munmap()ed might share bounds table
964 * with adjacent VMAs. We only need to free the backing physical
965 * memory of these shared bounds tables entries covered in this virtual
968 static int unmap_edge_bts(struct mm_struct
*mm
,
969 unsigned long start
, unsigned long end
)
972 long __user
*bde_start
, *bde_end
;
973 struct vm_area_struct
*prev
, *next
;
974 bool prev_shared
= false, next_shared
= false;
976 bde_start
= mm
->bd_addr
+ mpx_get_bd_entry_offset(mm
, start
);
977 bde_end
= mm
->bd_addr
+ mpx_get_bd_entry_offset(mm
, end
-1);
980 * Check whether bde_start and bde_end are shared with adjacent
983 * We already unliked the VMAs from the mm's rbtree so 'start'
984 * is guaranteed to be in a hole. This gets us the first VMA
985 * before the hole in to 'prev' and the next VMA after the hole
988 next
= find_vma_prev(mm
, start
, &prev
);
989 if (prev
&& (mm
->bd_addr
+ mpx_get_bd_entry_offset(mm
, prev
->vm_end
-1))
992 if (next
&& (mm
->bd_addr
+ mpx_get_bd_entry_offset(mm
, next
->vm_start
))
997 * This virtual address region being munmap()ed is only
998 * covered by one bounds table.
1000 * In this case, if this table is also shared with adjacent
1001 * VMAs, only part of the backing physical memory of the bounds
1002 * table need be freeed. Otherwise the whole bounds table need
1005 if (bde_start
== bde_end
) {
1006 return unmap_shared_bt(mm
, bde_start
, start
, end
,
1007 prev_shared
, next_shared
);
1011 * If more than one bounds tables are covered in this virtual
1012 * address region being munmap()ed, we need to separately check
1013 * whether bde_start and bde_end are shared with adjacent VMAs.
1015 ret
= unmap_shared_bt(mm
, bde_start
, start
, end
, prev_shared
, false);
1018 ret
= unmap_shared_bt(mm
, bde_end
, start
, end
, false, next_shared
);
1025 static int mpx_unmap_tables(struct mm_struct
*mm
,
1026 unsigned long start
, unsigned long end
)
1029 long __user
*bd_entry
, *bde_start
, *bde_end
;
1030 unsigned long bt_addr
;
1032 trace_mpx_unmap_search(start
, end
);
1034 * "Edge" bounds tables are those which are being used by the region
1035 * (start -> end), but that may be shared with adjacent areas. If they
1036 * turn out to be completely unshared, they will be freed. If they are
1037 * shared, we will free the backing store (like an MADV_DONTNEED) for
1038 * areas used by this region.
1040 ret
= unmap_edge_bts(mm
, start
, end
);
1042 /* non-present tables are OK */
1045 /* Success, or no tables to unmap */
1054 * Only unmap the bounds table that are
1056 * 2. not at the edges of the mapping, even if full aligned
1058 bde_start
= mm
->bd_addr
+ mpx_get_bd_entry_offset(mm
, start
);
1059 bde_end
= mm
->bd_addr
+ mpx_get_bd_entry_offset(mm
, end
-1);
1060 for (bd_entry
= bde_start
+ 1; bd_entry
< bde_end
; bd_entry
++) {
1061 ret
= get_bt_addr(mm
, bd_entry
, &bt_addr
);
1066 /* No table here, try the next one */
1072 * Note: we are being strict here.
1073 * Any time we run in to an issue
1074 * unmapping tables, we stop and
1080 ret
= unmap_single_bt(mm
, bd_entry
, bt_addr
);
1089 * Free unused bounds tables covered in a virtual address region being
1090 * munmap()ed. Assume end > start.
1092 * This function will be called by do_munmap(), and the VMAs covering
1093 * the virtual address region start...end have already been split if
1094 * necessary, and the 'vma' is the first vma in this range (start -> end).
1096 void mpx_notify_unmap(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
1097 unsigned long start
, unsigned long end
)
1102 * Refuse to do anything unless userspace has asked
1103 * the kernel to help manage the bounds tables,
1105 if (!kernel_managing_mpx_tables(current
->mm
))
1108 * This will look across the entire 'start -> end' range,
1109 * and find all of the non-VM_MPX VMAs.
1111 * To avoid recursion, if a VM_MPX vma is found in the range
1112 * (start->end), we will not continue follow-up work. This
1113 * recursion represents having bounds tables for bounds tables,
1114 * which should not occur normally. Being strict about it here
1115 * helps ensure that we do not have an exploitable stack overflow.
1118 if (vma
->vm_flags
& VM_MPX
)
1121 } while (vma
&& vma
->vm_start
< end
);
1123 ret
= mpx_unmap_tables(mm
, start
, end
);
1125 force_sig(SIGSEGV
, current
);