genirq: Do not copy affinity before set
authorThomas Gleixner <tglx@linutronix.de>
Mon, 7 Feb 2011 15:02:20 +0000 (16:02 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 19 Feb 2011 11:58:07 +0000 (12:58 +0100)
While rumaging through arch code I found that there are a few
workarounds which deal with the fact that the initial affinity setting
from request_irq() copies the mask into irq_data->affinity before the
chip code is called. In the normal path we unconditionally copy the
mask when the chip code returns 0.

Copy after the code is called and add a return code
IRQ_SET_MASK_OK_NOCOPY for the chip functions, which prevents the
copy. That way we see the real mask when the chip function decided to
truncate it further as some arches do. IRQ_SET_MASK_OK is 0, which is
the current behaviour.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/irq.h
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/proc.c

index e9f847d56c4d58f93db7074fd828c78a7f5433a0..f5e900309d21b98eb71697c036af85f9982604cd 100644 (file)
@@ -85,6 +85,17 @@ typedef      void (*irq_flow_handler_t)(unsigned int irq,
 # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
 #endif
 
+/*
+ * Return value for chip->irq_set_affinity()
+ *
+ * IRQ_SET_MASK_OK     - OK, core updates irq_data.affinity
+ * IRQ_SET_MASK_NOCPY  - OK, chip did update irq_data.affinity
+ */
+enum {
+       IRQ_SET_MASK_OK = 0,
+       IRQ_SET_MASK_OK_NOCOPY,
+};
+
 struct msi_desc;
 
 /**
index 99c3bc8a6fb464f1e19a00dc6e1959cb9e777b5e..b5bfa24aa6a6b96867e1d3ee71a1c29a612f10fc 100644 (file)
@@ -43,7 +43,7 @@ static inline void unregister_handler_proc(unsigned int irq,
                                           struct irqaction *action) { }
 #endif
 
-extern int irq_select_affinity_usr(unsigned int irq);
+extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
 
index ade65bfb466d20cee486117e599f118fdc8f289e..dc95d53df510c0fc3dcd3d33f8a65b8aeac71b11 100644 (file)
@@ -148,9 +148,12 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
 
        if (irq_can_move_pcntxt(desc)) {
                ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
-               if (!ret) {
+               switch (ret) {
+               case IRQ_SET_MASK_OK:
                        cpumask_copy(desc->irq_data.affinity, mask);
+               case IRQ_SET_MASK_OK_NOCOPY:
                        irq_set_thread_affinity(desc);
+                       ret = 0;
                }
        } else {
                desc->status |= IRQ_MOVE_PENDING;
@@ -254,9 +257,12 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 /*
  * Generic version of the affinity autoselector.
  */
-static int setup_affinity(unsigned int irq, struct irq_desc *desc)
+static int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
+       struct irq_chip *chip = get_irq_desc_chip(desc);
        struct cpumask *set = irq_default_affinity;
+       int ret;
 
        /* Excludes PER_CPU and NO_BALANCE interrupts */
        if (!irq_can_set_affinity(irq))
@@ -273,13 +279,20 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
                else
                        desc->status &= ~IRQ_AFFINITY_SET;
        }
-       cpumask_and(desc->irq_data.affinity, cpu_online_mask, set);
-       desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
 
+       cpumask_and(mask, cpu_online_mask, set);
+       ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
+       switch (ret) {
+       case IRQ_SET_MASK_OK:
+               cpumask_copy(desc->irq_data.affinity, mask);
+       case IRQ_SET_MASK_OK_NOCOPY:
+               irq_set_thread_affinity(desc);
+       }
        return 0;
 }
 #else
-static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
 {
        return irq_select_affinity(irq);
 }
@@ -288,23 +301,23 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
 /*
  * Called when affinity is set via /proc/irq
  */
-int irq_select_affinity_usr(unsigned int irq)
+int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
        int ret;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       ret = setup_affinity(irq, desc);
+       ret = setup_affinity(irq, desc, mask);
        if (!ret)
                irq_set_thread_affinity(desc);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
-
        return ret;
 }
 
 #else
-static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
+static inline int
+setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 {
        return 0;
 }
@@ -765,8 +778,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        struct irqaction *old, **old_ptr;
        const char *old_name = NULL;
        unsigned long flags;
-       int nested, shared = 0;
-       int ret;
+       int ret, nested, shared = 0;
+       cpumask_var_t mask;
 
        if (!desc)
                return -EINVAL;
@@ -831,6 +844,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                new->thread = t;
        }
 
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+               ret = -ENOMEM;
+               goto out_thread;
+       }
+
        /*
         * The following block of code has to be executed atomically
         */
@@ -876,7 +894,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                                        new->flags & IRQF_TRIGGER_MASK);
 
                        if (ret)
-                               goto out_thread;
+                               goto out_mask;
                } else
                        compat_irq_chip_set_default_handler(desc);
 #if defined(CONFIG_IRQ_PER_CPU)
@@ -903,7 +921,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                        desc->status |= IRQ_NO_BALANCING;
 
                /* Set default affinity mask once everything is setup */
-               setup_affinity(irq, desc);
+               setup_affinity(irq, desc, mask);
 
        } else if ((new->flags & IRQF_TRIGGER_MASK)
                        && (new->flags & IRQF_TRIGGER_MASK)
@@ -956,6 +974,9 @@ mismatch:
 #endif
        ret = -EBUSY;
 
+out_mask:
+       free_cpumask_var(mask);
+
 out_thread:
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        if (new->thread) {
index 6c8a2a9f8a7bf802f72527831e0f5f32b773e964..a46bd762db473065f855d495a370ed32c574ea83 100644 (file)
@@ -89,7 +89,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
        if (!cpumask_intersects(new_value, cpu_online_mask)) {
                /* Special case for empty set - allow the architecture
                   code to set default SMP affinity. */
-               err = irq_select_affinity_usr(irq) ? -EINVAL : count;
+               err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
        } else {
                irq_set_affinity(irq, new_value);
                err = count;
This page took 0.122042 seconds and 5 git commands to generate.