uretprobes: Limit the depth of return probe nestedness
authorAnton Arapov <anton@redhat.com>
Wed, 3 Apr 2013 16:00:37 +0000 (18:00 +0200)
committerOleg Nesterov <oleg@redhat.com>
Sat, 13 Apr 2013 13:31:58 +0000 (15:31 +0200)
Unlike the kretprobes we can't trust userspace, thus must have
protection from user space attacks. User-space have  "unlimited"
stack, and this patch limits the return probes nestedness as a
simple remedy for it.

Note that this implementation leaks return_instance on siglongjmp
until exit()/exec().

The intention is to have KISS and bare minimum solution for the
initial implementation in order to not complicate the uretprobes
code.

In the future we may come up with more sophisticated solution that
remove this depth limitation. It is not easy task and lays beyond
this patchset.

Signed-off-by: Anton Arapov <anton@redhat.com>
Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
include/linux/uprobes.h
kernel/events/uprobes.c

index b0507f24eeb050bd74a7c0114535550b1bed7f82..06f28beed7c2c405a0a7760ca231c188257926b2 100644 (file)
@@ -38,6 +38,8 @@ struct inode;
 #define UPROBE_HANDLER_REMOVE          1
 #define UPROBE_HANDLER_MASK            1
 
+#define MAX_URETPROBE_DEPTH            64
+
 enum uprobe_filter_ctx {
        UPROBE_FILTER_REGISTER,
        UPROBE_FILTER_UNREGISTER,
@@ -72,6 +74,7 @@ struct uprobe_task {
        struct arch_uprobe_task         autask;
 
        struct return_instance          *return_instances;
+       unsigned int                    depth;
        struct uprobe                   *active_uprobe;
 
        unsigned long                   xol_vaddr;
index 65429ad2ce515370961846700433c2f6f31addfe..6ab00e090c8712b2737596b0aaba34e8a7773dcf 100644 (file)
@@ -1404,6 +1404,13 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
        if (!utask)
                return;
 
+       if (utask->depth >= MAX_URETPROBE_DEPTH) {
+               printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
+                               " nestedness limit pid/tgid=%d/%d\n",
+                               current->pid, current->tgid);
+               return;
+       }
+
        ri = kzalloc(sizeof(struct return_instance), GFP_KERNEL);
        if (!ri)
                goto fail;
@@ -1439,6 +1446,8 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
        ri->orig_ret_vaddr = orig_ret_vaddr;
        ri->chained = chained;
 
+       utask->depth++;
+
        /* add instance to the stack */
        ri->next = utask->return_instances;
        utask->return_instances = ri;
@@ -1681,6 +1690,8 @@ static bool handle_trampoline(struct pt_regs *regs)
                if (!chained)
                        break;
 
+               utask->depth--;
+
                BUG_ON(!ri);
        }
 
This page took 0.032344 seconds and 5 git commands to generate.