genirq: Move IRQ_AFFINITY_SET to core
authorThomas Gleixner <tglx@linutronix.de>
Tue, 8 Feb 2011 16:22:00 +0000 (17:22 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 19 Feb 2011 11:58:20 +0000 (12:58 +0100)
Keep status in sync until last abuser is gone.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/irq.h
kernel/irq/compat.h
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/settings.h

index d5312e6fe1aae344acd36af1f521454e15ca7873..8da1782ecfca1bb0d71e58c71bafcb4bedbd7eaa 100644 (file)
@@ -60,6 +60,7 @@ typedef       void (*irq_flow_handler_t)(unsigned int irq,
 #define IRQ_MASKED             0x00002000      /* DEPRECATED */
 /* DEPRECATED use irq_setaffinity_pending() instead*/
 #define IRQ_MOVE_PENDING       0x00004000
+#define IRQ_AFFINITY_SET       0x02000000      /* DEPRECATED */
 #endif
 
 #define IRQ_LEVEL              0x00008000      /* IRQ level triggered */
@@ -70,7 +71,6 @@ typedef       void (*irq_flow_handler_t)(unsigned int irq,
 #define IRQ_WAKEUP             0x00100000      /* IRQ triggers system wakeup */
 #define IRQ_NO_BALANCING       0x00400000      /* IRQ is excluded from balancing */
 #define IRQ_MOVE_PCNTXT                0x01000000      /* IRQ migration from process context */
-#define IRQ_AFFINITY_SET       0x02000000      /* IRQ affinity was set from userspace*/
 #define IRQ_NESTED_THREAD      0x10000000      /* IRQ is nested into another, no own handler thread */
 
 #define IRQF_MODIFY_MASK       \
@@ -134,12 +134,14 @@ struct irq_data {
  * IRQD_SETAFFINITY_PENDING    - Affinity setting is pending
  * IRQD_NO_BALANCING           - Balancing disabled for this IRQ
  * IRQD_PER_CPU                        - Interrupt is per cpu
+ * IRQD_AFFINITY_SET           - Interrupt affinity was set
  */
 enum {
        /* Bit 0 - 7 reserved for TYPE will use later */
        IRQD_SETAFFINITY_PENDING        = (1 <<  8),
        IRQD_NO_BALANCING               = (1 << 10),
        IRQD_PER_CPU                    = (1 << 11),
+       IRQD_AFFINITY_SET               = (1 << 12),
 };
 
 static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -157,6 +159,11 @@ static inline bool irqd_can_balance(struct irq_data *d)
        return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
 }
 
+static inline bool irqd_affinity_was_set(struct irq_data *d)
+{
+       return d->state_use_accessors & IRQD_AFFINITY_SET;
+}
+
 /**
  * struct irq_chip - hardware interrupt chip descriptor
  *
index 5e33aadadacc76eb4694dcc0c5a2fd335bf1811d..6bbaf66aca859f52049b806866dd145cc2c3291f 100644 (file)
@@ -46,6 +46,15 @@ static inline void irq_compat_clr_move_pending(struct irq_desc *desc)
 {
        desc->status &= ~IRQ_MOVE_PENDING;
 }
+static inline void irq_compat_set_affinity(struct irq_desc *desc)
+{
+       desc->status |= IRQ_AFFINITY_SET;
+}
+
+static inline void irq_compat_clr_affinity(struct irq_desc *desc)
+{
+       desc->status &= ~IRQ_AFFINITY_SET;
+}
 #else
 static inline void irq_compat_set_progress(struct irq_desc *desc) { }
 static inline void irq_compat_clr_progress(struct irq_desc *desc) { }
@@ -57,5 +66,7 @@ static inline void irq_compat_set_masked(struct irq_desc *desc) { }
 static inline void irq_compat_clr_masked(struct irq_desc *desc) { }
 static inline void irq_compat_set_move_pending(struct irq_desc *desc) { }
 static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { }
+static inline void irq_compat_set_affinity(struct irq_desc *desc) { }
+static inline void irq_compat_clr_affinity(struct irq_desc *desc) { }
 #endif
 
index a80b44d2735e1b2b87a91da2cf26a5bcda3411b8..6776453c454c2f58e8e5278d3f440b9ea3172868 100644 (file)
@@ -150,3 +150,7 @@ static inline void irqd_set(struct irq_data *d, unsigned int mask)
        d->state_use_accessors |= mask;
 }
 
+static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
+{
+       return d->state_use_accessors & mask;
+}
index 550ae97a004078843c4624dcfd0f289ef8bde999..8246afc81956e9cf59654b3d75a28afeb090c183 100644 (file)
@@ -164,7 +164,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
                kref_get(&desc->affinity_notify->kref);
                schedule_work(&desc->affinity_notify->work);
        }
-       desc->status |= IRQ_AFFINITY_SET;
+       irq_compat_set_affinity(desc);
+       irqd_set(&desc->irq_data, IRQD_AFFINITY_SET);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        return ret;
 }
@@ -272,12 +273,14 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
         * Preserve an userspace affinity setup, but make sure that
         * one of the targets is online.
         */
-       if (desc->status & (IRQ_AFFINITY_SET)) {
+       if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
                if (cpumask_intersects(desc->irq_data.affinity,
                                       cpu_online_mask))
                        set = desc->irq_data.affinity;
-               else
-                       desc->status &= ~IRQ_AFFINITY_SET;
+               else {
+                       irq_compat_clr_affinity(desc);
+                       irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
+               }
        }
 
        cpumask_and(mask, cpu_online_mask, set);
index ba0fffe410ad2b58904078093872dff4dcb2ad38..da5acb446b1ce9871d21e0bed6968974c3449b31 100644 (file)
@@ -29,6 +29,8 @@ enum {
 #define IRQ_PER_CPU            GOT_YOU_MORON
 #undef IRQ_NO_BALANCING
 #define IRQ_NO_BALANCING       GOT_YOU_MORON
+#undef IRQ_AFFINITY_SET
+#define IRQ_AFFINITY_SET       GOT_YOU_MORON
 #undef IRQF_MODIFY_MASK
 #define IRQF_MODIFY_MASK       GOT_YOU_MORON
 
This page took 0.029103 seconds and 5 git commands to generate.