From 2bdd10558c8d93009cb6c32ce9e30800fbb08add Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 8 Feb 2011 17:22:00 +0100 Subject: [PATCH] genirq: Move IRQ_AFFINITY_SET to core Keep status in sync until last abuser is gone. Signed-off-by: Thomas Gleixner --- include/linux/irq.h | 9 ++++++++- kernel/irq/compat.h | 11 +++++++++++ kernel/irq/internals.h | 4 ++++ kernel/irq/manage.c | 11 +++++++---- kernel/irq/settings.h | 2 ++ 5 files changed, 32 insertions(+), 5 deletions(-) diff --git a/include/linux/irq.h b/include/linux/irq.h index d5312e6fe1aa..8da1782ecfca 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -60,6 +60,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #define IRQ_MASKED 0x00002000 /* DEPRECATED */ /* DEPRECATED use irq_setaffinity_pending() instead*/ #define IRQ_MOVE_PENDING 0x00004000 +#define IRQ_AFFINITY_SET 0x02000000 /* DEPRECATED */ #endif #define IRQ_LEVEL 0x00008000 /* IRQ level triggered */ @@ -70,7 +71,6 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ -#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ #define IRQF_MODIFY_MASK \ @@ -134,12 +134,14 @@ struct irq_data { * IRQD_SETAFFINITY_PENDING - Affinity setting is pending * IRQD_NO_BALANCING - Balancing disabled for this IRQ * IRQD_PER_CPU - Interrupt is per cpu + * IRQD_AFFINITY_SET - Interrupt affinity was set */ enum { /* Bit 0 - 7 reserved for TYPE will use later */ IRQD_SETAFFINITY_PENDING = (1 << 8), IRQD_NO_BALANCING = (1 << 10), IRQD_PER_CPU = (1 << 11), + IRQD_AFFINITY_SET = (1 << 12), }; static inline bool irqd_is_setaffinity_pending(struct irq_data *d) @@ -157,6 +159,11 @@ static inline bool irqd_can_balance(struct irq_data *d) return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); } +static inline bool irqd_affinity_was_set(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_AFFINITY_SET; +} + /** * struct irq_chip - hardware interrupt chip descriptor * diff --git a/kernel/irq/compat.h b/kernel/irq/compat.h index 5e33aadadacc..6bbaf66aca85 100644 --- a/kernel/irq/compat.h +++ b/kernel/irq/compat.h @@ -46,6 +46,15 @@ static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { desc->status &= ~IRQ_MOVE_PENDING; } +static inline void irq_compat_set_affinity(struct irq_desc *desc) +{ + desc->status |= IRQ_AFFINITY_SET; +} + +static inline void irq_compat_clr_affinity(struct irq_desc *desc) +{ + desc->status &= ~IRQ_AFFINITY_SET; +} #else static inline void irq_compat_set_progress(struct irq_desc *desc) { } static inline void irq_compat_clr_progress(struct irq_desc *desc) { } @@ -57,5 +66,7 @@ static inline void irq_compat_set_masked(struct irq_desc *desc) { } static inline void irq_compat_clr_masked(struct irq_desc *desc) { } static inline void irq_compat_set_move_pending(struct irq_desc *desc) { } static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { } +static inline void irq_compat_set_affinity(struct irq_desc *desc) { } +static inline void irq_compat_clr_affinity(struct irq_desc *desc) { } #endif diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index a80b44d2735e..6776453c454c 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -150,3 +150,7 @@ static inline void irqd_set(struct irq_data *d, unsigned int mask) d->state_use_accessors |= mask; } +static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) +{ + return d->state_use_accessors & mask; +} diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 550ae97a0040..8246afc81956 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -164,7 +164,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask) kref_get(&desc->affinity_notify->kref); schedule_work(&desc->affinity_notify->work); } - desc->status |= IRQ_AFFINITY_SET; + irq_compat_set_affinity(desc); + irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } @@ -272,12 +273,14 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) * Preserve an userspace affinity setup, but make sure that * one of the targets is online. */ - if (desc->status & (IRQ_AFFINITY_SET)) { + if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { if (cpumask_intersects(desc->irq_data.affinity, cpu_online_mask)) set = desc->irq_data.affinity; - else - desc->status &= ~IRQ_AFFINITY_SET; + else { + irq_compat_clr_affinity(desc); + irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); + } } cpumask_and(mask, cpu_online_mask, set); diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index ba0fffe410ad..da5acb446b1c 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -29,6 +29,8 @@ enum { #define IRQ_PER_CPU GOT_YOU_MORON #undef IRQ_NO_BALANCING #define IRQ_NO_BALANCING GOT_YOU_MORON +#undef IRQ_AFFINITY_SET +#define IRQ_AFFINITY_SET GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON -- 2.34.1