static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
struct workqueue_struct *system_wq __read_mostly;
-EXPORT_SYMBOL_GPL(system_wq);
+EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_highpri_wq);
struct workqueue_struct *system_long_wq __read_mostly;
local_irq_restore(flags);
return ret;
}
-EXPORT_SYMBOL_GPL(queue_work_on);
+EXPORT_SYMBOL(queue_work_on);
void delayed_work_timer_fn(unsigned long __data)
{
local_irq_restore(flags);
return ret;
}
-EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+EXPORT_SYMBOL(queue_delayed_work_on);
/**
* mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
spin_unlock_irq(&pool->lock);
mutex_lock(&pool->manager_mutex);
+ spin_lock_irq(&pool->lock);
ret = true;
}
* no synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
*
+ * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
+ * Note that both per-cpu and unbound workqueues may be associated with
+ * multiple pool_workqueues which have separate congested states. A
+ * workqueue being congested on one CPU doesn't mean the workqueue is also
+ * contested on other CPUs / NUMA nodes.
+ *
* RETURNS:
* %true if congested, %false otherwise.
*/
rcu_read_lock_sched();
+ if (cpu == WORK_CPU_UNBOUND)
+ cpu = smp_processor_id();
+
if (!(wq->flags & WQ_UNBOUND))
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
else
BUG_ON(!tbl);
for_each_node(node)
- BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node));
+ BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+ node_online(node) ? node : NUMA_NO_NODE));
for_each_possible_cpu(cpu) {
node = cpu_to_node(cpu);