genirq: Mirror IRQ_PER_CPU and IRQ_NO_BALANCING in irq_data.state
authorThomas Gleixner <tglx@linutronix.de>
Tue, 8 Feb 2011 16:11:03 +0000 (17:11 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 19 Feb 2011 11:58:20 +0000 (12:58 +0100)
That's the right data structure to look at for arch code.

Accessor functions are provided.

 irqd_is_per_cpu(irqdata);
 irqd_can_balance(irqdata);

Coders who access them directly will be tracked down and slapped with
stinking trouts.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/irq.h
kernel/irq/chip.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/migration.c
kernel/irq/settings.h
kernel/irq/spurious.c

index 3f607ad942209e5fbb874e7a3380c937bfcac884..d5312e6fe1aae344acd36af1f521454e15ca7873 100644 (file)
@@ -132,10 +132,14 @@ struct irq_data {
  * Bit masks for irq_data.state
  *
  * IRQD_SETAFFINITY_PENDING    - Affinity setting is pending
+ * IRQD_NO_BALANCING           - Balancing disabled for this IRQ
+ * IRQD_PER_CPU                        - Interrupt is per cpu
  */
 enum {
        /* Bit 0 - 7 reserved for TYPE will use later */
-       IRQD_SETAFFINITY_PENDING = (1 << 8),
+       IRQD_SETAFFINITY_PENDING        = (1 <<  8),
+       IRQD_NO_BALANCING               = (1 << 10),
+       IRQD_PER_CPU                    = (1 << 11),
 };
 
 static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -143,6 +147,16 @@ static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
        return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
 }
 
+static inline bool irqd_is_per_cpu(struct irq_data *d)
+{
+       return d->state_use_accessors & IRQD_PER_CPU;
+}
+
+static inline bool irqd_can_balance(struct irq_data *d)
+{
+       return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
+}
+
 /**
  * struct irq_chip - hardware interrupt chip descriptor
  *
index 73b2e7e0093412ed740e4ed7d96982ba4978ff60..b8aa3dfe8301f3f92637022b4988a34e20de9a3e 100644 (file)
@@ -706,12 +706,15 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
        if (!desc)
                return;
 
-       /* Sanitize flags */
-       set &= IRQF_MODIFY_MASK;
-       clr &= IRQF_MODIFY_MASK;
-
        raw_spin_lock_irqsave(&desc->lock, flags);
-       desc->status &= ~clr;
-       desc->status |= set;
+
+       irq_settings_clr_and_set(desc, clr, set);
+
+       irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU);
+       if (irq_settings_has_no_balance_set(desc))
+               irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+       if (irq_settings_is_per_cpu(desc))
+               irqd_set(&desc->irq_data, IRQD_PER_CPU);
+
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
index b2ba59e73f21808570f6d8547e37b30418f90151..a80b44d2735e1b2b87a91da2cf26a5bcda3411b8 100644 (file)
@@ -139,3 +139,14 @@ static inline void irqd_clr_move_pending(struct irq_data *d)
        d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
        irq_compat_clr_move_pending(irq_data_to_desc(d));
 }
+
+static inline void irqd_clear(struct irq_data *d, unsigned int mask)
+{
+       d->state_use_accessors &= ~mask;
+}
+
+static inline void irqd_set(struct irq_data *d, unsigned int mask)
+{
+       d->state_use_accessors |= mask;
+}
+
index f1cfa271ba704998c90293a728813c3924231878..84a0a9c22226dc339f5d2d7e0803c7c340f6af35 100644 (file)
@@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
-       if ((desc->status & (IRQ_PER_CPU | IRQ_NO_BALANCING)) ||
-           !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
+       if (!irqd_can_balance(&desc->irq_data) || !desc->irq_data.chip ||
+           !desc->irq_data.chip->irq_set_affinity)
                return 0;
 
        return 1;
@@ -897,8 +897,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                                  IRQS_INPROGRESS | IRQS_ONESHOT | \
                                  IRQS_WAITING);
 
-               if (new->flags & IRQF_PERCPU)
-                       desc->status |= IRQ_PER_CPU;
+               if (new->flags & IRQF_PERCPU) {
+                       irqd_set(&desc->irq_data, IRQD_PER_CPU);
+                       irq_settings_set_per_cpu(desc);
+               }
 
                if (new->flags & IRQF_ONESHOT)
                        desc->istate |= IRQS_ONESHOT;
@@ -910,8 +912,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                        desc->depth = 1;
 
                /* Exclude IRQ from balancing if requested */
-               if (new->flags & IRQF_NOBALANCING)
-                       desc->status |= IRQ_NO_BALANCING;
+               if (new->flags & IRQF_NOBALANCING) {
+                       irq_settings_set_no_balancing(desc);
+                       irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+               }
 
                /* Set default affinity mask once everything is setup */
                setup_affinity(irq, desc, mask);
index 24f53caddf4773f05390b41bdb4b368982edcec8..7a93c6b88b2550353f20393b7c30d1040e181a20 100644 (file)
@@ -15,7 +15,7 @@ void move_masked_irq(int irq)
        /*
         * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
         */
-       if (desc->status & (IRQ_PER_CPU | IRQ_NO_BALANCING)) {
+       if (!irqd_can_balance(&desc->irq_data)) {
                WARN_ON(1);
                return;
        }
index bb104a2dce73088ebe800b9ca78d2dcc14c35e89..ba0fffe410ad2b58904078093872dff4dcb2ad38 100644 (file)
@@ -4,6 +4,9 @@
  */
 enum {
        _IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS,
+       _IRQ_PER_CPU            = IRQ_PER_CPU,
+       _IRQ_NO_BALANCING       = IRQ_NO_BALANCING,
+       _IRQF_MODIFY_MASK       = IRQF_MODIFY_MASK,
 };
 
 #undef IRQ_INPROGRESS
@@ -22,3 +25,36 @@ enum {
 #define IRQ_WAKEUP             GOT_YOU_MORON
 #undef IRQ_MOVE_PENDING
 #define IRQ_MOVE_PENDING       GOT_YOU_MORON
+#undef IRQ_PER_CPU
+#define IRQ_PER_CPU            GOT_YOU_MORON
+#undef IRQ_NO_BALANCING
+#define IRQ_NO_BALANCING       GOT_YOU_MORON
+#undef IRQF_MODIFY_MASK
+#define IRQF_MODIFY_MASK       GOT_YOU_MORON
+
+static inline void
+irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
+{
+       desc->status &= ~(clr & _IRQF_MODIFY_MASK);
+       desc->status |= (set & _IRQF_MODIFY_MASK);
+}
+
+static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+{
+       return desc->status & _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
+{
+       desc->status |= _IRQ_PER_CPU;
+}
+
+static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
+{
+       desc->status |= _IRQ_NO_BALANCING;
+}
+
+static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
+{
+       return desc->status & _IRQ_NO_BALANCING;
+}
index 692ce2bae302080a9ae1512039c1208f6c30b3c0..226ed7d26a84d86f8aa0fc321842544202fe14c9 100644 (file)
@@ -68,7 +68,8 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
        raw_spin_lock(&desc->lock);
 
        /* PER_CPU and nested thread interrupts are never polled */
-       if (desc->status & (IRQ_PER_CPU | IRQ_NESTED_THREAD))
+       if (irq_settings_is_per_cpu(desc) ||
+           (desc->status & IRQ_NESTED_THREAD))
                goto out;
 
        /*
This page took 0.031667 seconds and 5 git commands to generate.