1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
44 #include <asm/mmu_context.h>
46 #include <asm/spu_csa.h>
47 #include <asm/spu_priv1.h>
50 struct spu_prio_array
{
51 DECLARE_BITMAP(bitmap
, MAX_PRIO
);
52 struct list_head runq
[MAX_PRIO
];
54 struct list_head active_list
[MAX_NUMNODES
];
55 struct mutex active_mutex
[MAX_NUMNODES
];
56 int nr_active
[MAX_NUMNODES
];
60 static unsigned long spu_avenrun
[3];
61 static struct spu_prio_array
*spu_prio
;
62 static struct task_struct
*spusched_task
;
63 static struct timer_list spusched_timer
;
66 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
68 #define NORMAL_PRIO 120
71 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
72 * tick for every 10 CPU scheduler ticks.
74 #define SPUSCHED_TICK (10)
77 * These are the 'tuning knobs' of the scheduler:
79 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
80 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
82 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
83 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
85 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
86 #define SCALE_PRIO(x, prio) \
87 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
90 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
91 * [800ms ... 100ms ... 5ms]
93 * The higher a thread's priority, the bigger timeslices
94 * it gets during one round of execution. But even the lowest
95 * priority thread gets MIN_TIMESLICE worth of execution time.
97 void spu_set_timeslice(struct spu_context
*ctx
)
99 if (ctx
->prio
< NORMAL_PRIO
)
100 ctx
->time_slice
= SCALE_PRIO(DEF_SPU_TIMESLICE
* 4, ctx
->prio
);
102 ctx
->time_slice
= SCALE_PRIO(DEF_SPU_TIMESLICE
, ctx
->prio
);
106 * Update scheduling information from the owning thread.
108 void __spu_update_sched_info(struct spu_context
*ctx
)
111 * 32-Bit assignment are atomic on powerpc, and we don't care about
112 * memory ordering here because retriving the controlling thread is
113 * per defintion racy.
115 ctx
->tid
= current
->pid
;
118 * We do our own priority calculations, so we normally want
119 * ->static_prio to start with. Unfortunately thies field
120 * contains junk for threads with a realtime scheduling
121 * policy so we have to look at ->prio in this case.
123 if (rt_prio(current
->prio
))
124 ctx
->prio
= current
->prio
;
126 ctx
->prio
= current
->static_prio
;
127 ctx
->policy
= current
->policy
;
130 * A lot of places that don't hold active_mutex poke into
131 * cpus_allowed, including grab_runnable_context which
132 * already holds the runq_lock. So abuse runq_lock
133 * to protect this field aswell.
135 spin_lock(&spu_prio
->runq_lock
);
136 ctx
->cpus_allowed
= current
->cpus_allowed
;
137 spin_unlock(&spu_prio
->runq_lock
);
140 void spu_update_sched_info(struct spu_context
*ctx
)
142 int node
= ctx
->spu
->node
;
144 mutex_lock(&spu_prio
->active_mutex
[node
]);
145 __spu_update_sched_info(ctx
);
146 mutex_unlock(&spu_prio
->active_mutex
[node
]);
149 static int __node_allowed(struct spu_context
*ctx
, int node
)
151 if (nr_cpus_node(node
)) {
152 cpumask_t mask
= node_to_cpumask(node
);
154 if (cpus_intersects(mask
, ctx
->cpus_allowed
))
161 static int node_allowed(struct spu_context
*ctx
, int node
)
165 spin_lock(&spu_prio
->runq_lock
);
166 rval
= __node_allowed(ctx
, node
);
167 spin_unlock(&spu_prio
->runq_lock
);
173 * spu_add_to_active_list - add spu to active list
174 * @spu: spu to add to the active list
176 static void spu_add_to_active_list(struct spu
*spu
)
178 int node
= spu
->node
;
180 mutex_lock(&spu_prio
->active_mutex
[node
]);
181 spu_prio
->nr_active
[node
]++;
182 list_add_tail(&spu
->list
, &spu_prio
->active_list
[node
]);
183 mutex_unlock(&spu_prio
->active_mutex
[node
]);
186 static void __spu_remove_from_active_list(struct spu
*spu
)
188 list_del_init(&spu
->list
);
189 spu_prio
->nr_active
[spu
->node
]--;
193 * spu_remove_from_active_list - remove spu from active list
194 * @spu: spu to remove from the active list
196 static void spu_remove_from_active_list(struct spu
*spu
)
198 int node
= spu
->node
;
200 mutex_lock(&spu_prio
->active_mutex
[node
]);
201 __spu_remove_from_active_list(spu
);
202 mutex_unlock(&spu_prio
->active_mutex
[node
]);
205 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier
);
207 static void spu_switch_notify(struct spu
*spu
, struct spu_context
*ctx
)
209 blocking_notifier_call_chain(&spu_switch_notifier
,
210 ctx
? ctx
->object_id
: 0, spu
);
213 int spu_switch_event_register(struct notifier_block
* n
)
215 return blocking_notifier_chain_register(&spu_switch_notifier
, n
);
218 int spu_switch_event_unregister(struct notifier_block
* n
)
220 return blocking_notifier_chain_unregister(&spu_switch_notifier
, n
);
224 * spu_bind_context - bind spu context to physical spu
225 * @spu: physical spu to bind to
226 * @ctx: context to bind
228 static void spu_bind_context(struct spu
*spu
, struct spu_context
*ctx
)
230 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__
, current
->pid
,
231 spu
->number
, spu
->node
);
233 ctx
->stats
.slb_flt_base
= spu
->stats
.slb_flt
;
234 ctx
->stats
.class2_intr_base
= spu
->stats
.class2_intr
;
239 ctx
->ops
= &spu_hw_ops
;
240 spu
->pid
= current
->pid
;
241 spu_associate_mm(spu
, ctx
->owner
);
242 spu
->ibox_callback
= spufs_ibox_callback
;
243 spu
->wbox_callback
= spufs_wbox_callback
;
244 spu
->stop_callback
= spufs_stop_callback
;
245 spu
->mfc_callback
= spufs_mfc_callback
;
246 spu
->dma_callback
= spufs_dma_callback
;
248 spu_unmap_mappings(ctx
);
249 spu_restore(&ctx
->csa
, spu
);
250 spu
->timestamp
= jiffies
;
251 spu_cpu_affinity_set(spu
, raw_smp_processor_id());
252 spu_switch_notify(spu
, ctx
);
253 ctx
->state
= SPU_STATE_RUNNABLE
;
254 spu_switch_state(spu
, SPU_UTIL_SYSTEM
);
258 * spu_unbind_context - unbind spu context from physical spu
259 * @spu: physical spu to unbind from
260 * @ctx: context to unbind
262 static void spu_unbind_context(struct spu
*spu
, struct spu_context
*ctx
)
264 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__
,
265 spu
->pid
, spu
->number
, spu
->node
);
267 spu_switch_state(spu
, SPU_UTIL_IDLE
);
269 spu_switch_notify(spu
, NULL
);
270 spu_unmap_mappings(ctx
);
271 spu_save(&ctx
->csa
, spu
);
272 spu
->timestamp
= jiffies
;
273 ctx
->state
= SPU_STATE_SAVED
;
274 spu
->ibox_callback
= NULL
;
275 spu
->wbox_callback
= NULL
;
276 spu
->stop_callback
= NULL
;
277 spu
->mfc_callback
= NULL
;
278 spu
->dma_callback
= NULL
;
279 spu_associate_mm(spu
, NULL
);
281 ctx
->ops
= &spu_backing_ops
;
286 ctx
->stats
.slb_flt
+=
287 (spu
->stats
.slb_flt
- ctx
->stats
.slb_flt_base
);
288 ctx
->stats
.class2_intr
+=
289 (spu
->stats
.class2_intr
- ctx
->stats
.class2_intr_base
);
293 * spu_add_to_rq - add a context to the runqueue
294 * @ctx: context to add
296 static void __spu_add_to_rq(struct spu_context
*ctx
)
299 * Unfortunately this code path can be called from multiple threads
300 * on behalf of a single context due to the way the problem state
301 * mmap support works.
303 * Fortunately we need to wake up all these threads at the same time
304 * and can simply skip the runqueue addition for every but the first
305 * thread getting into this codepath.
307 * It's still quite hacky, and long-term we should proxy all other
308 * threads through the owner thread so that spu_run is in control
309 * of all the scheduling activity for a given context.
311 if (list_empty(&ctx
->rq
)) {
312 list_add_tail(&ctx
->rq
, &spu_prio
->runq
[ctx
->prio
]);
313 set_bit(ctx
->prio
, spu_prio
->bitmap
);
314 if (!spu_prio
->nr_waiting
++)
315 __mod_timer(&spusched_timer
, jiffies
+ SPUSCHED_TICK
);
319 static void __spu_del_from_rq(struct spu_context
*ctx
)
321 int prio
= ctx
->prio
;
323 if (!list_empty(&ctx
->rq
)) {
324 if (!--spu_prio
->nr_waiting
)
325 del_timer(&spusched_timer
);
326 list_del_init(&ctx
->rq
);
328 if (list_empty(&spu_prio
->runq
[prio
]))
329 clear_bit(prio
, spu_prio
->bitmap
);
333 static void spu_prio_wait(struct spu_context
*ctx
)
337 spin_lock(&spu_prio
->runq_lock
);
338 prepare_to_wait_exclusive(&ctx
->stop_wq
, &wait
, TASK_INTERRUPTIBLE
);
339 if (!signal_pending(current
)) {
340 __spu_add_to_rq(ctx
);
341 spin_unlock(&spu_prio
->runq_lock
);
342 mutex_unlock(&ctx
->state_mutex
);
344 mutex_lock(&ctx
->state_mutex
);
345 spin_lock(&spu_prio
->runq_lock
);
346 __spu_del_from_rq(ctx
);
348 spin_unlock(&spu_prio
->runq_lock
);
349 __set_current_state(TASK_RUNNING
);
350 remove_wait_queue(&ctx
->stop_wq
, &wait
);
353 static struct spu
*spu_get_idle(struct spu_context
*ctx
)
355 struct spu
*spu
= NULL
;
356 int node
= cpu_to_node(raw_smp_processor_id());
359 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
360 node
= (node
< MAX_NUMNODES
) ? node
: 0;
361 if (!node_allowed(ctx
, node
))
363 spu
= spu_alloc_node(node
);
371 * find_victim - find a lower priority context to preempt
372 * @ctx: canidate context for running
374 * Returns the freed physical spu to run the new context on.
376 static struct spu
*find_victim(struct spu_context
*ctx
)
378 struct spu_context
*victim
= NULL
;
383 * Look for a possible preemption candidate on the local node first.
384 * If there is no candidate look at the other nodes. This isn't
385 * exactly fair, but so far the whole spu schedule tries to keep
386 * a strong node affinity. We might want to fine-tune this in
390 node
= cpu_to_node(raw_smp_processor_id());
391 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
392 node
= (node
< MAX_NUMNODES
) ? node
: 0;
393 if (!node_allowed(ctx
, node
))
396 mutex_lock(&spu_prio
->active_mutex
[node
]);
397 list_for_each_entry(spu
, &spu_prio
->active_list
[node
], list
) {
398 struct spu_context
*tmp
= spu
->ctx
;
400 if (tmp
->prio
> ctx
->prio
&&
401 (!victim
|| tmp
->prio
> victim
->prio
))
404 mutex_unlock(&spu_prio
->active_mutex
[node
]);
408 * This nests ctx->state_mutex, but we always lock
409 * higher priority contexts before lower priority
410 * ones, so this is safe until we introduce
411 * priority inheritance schemes.
413 if (!mutex_trylock(&victim
->state_mutex
)) {
421 * This race can happen because we've dropped
422 * the active list mutex. No a problem, just
423 * restart the search.
425 mutex_unlock(&victim
->state_mutex
);
429 spu_remove_from_active_list(spu
);
430 spu_unbind_context(spu
, victim
);
431 victim
->stats
.invol_ctx_switch
++;
432 spu
->stats
.invol_ctx_switch
++;
433 mutex_unlock(&victim
->state_mutex
);
435 * We need to break out of the wait loop in spu_run
436 * manually to ensure this context gets put on the
437 * runqueue again ASAP.
439 wake_up(&victim
->stop_wq
);
448 * spu_activate - find a free spu for a context and execute it
449 * @ctx: spu context to schedule
450 * @flags: flags (currently ignored)
452 * Tries to find a free spu to run @ctx. If no free spu is available
453 * add the context to the runqueue so it gets woken up once an spu
456 int spu_activate(struct spu_context
*ctx
, unsigned long flags
)
458 spuctx_switch_state(ctx
, SPUCTX_UTIL_SYSTEM
);
464 * If there are multiple threads waiting for a single context
465 * only one actually binds the context while the others will
466 * only be able to acquire the state_mutex once the context
467 * already is in runnable state.
472 spu
= spu_get_idle(ctx
);
474 * If this is a realtime thread we try to get it running by
475 * preempting a lower priority thread.
477 if (!spu
&& rt_prio(ctx
->prio
))
478 spu
= find_victim(ctx
);
480 spu_bind_context(spu
, ctx
);
481 spu_add_to_active_list(spu
);
486 } while (!signal_pending(current
));
492 * grab_runnable_context - try to find a runnable context
494 * Remove the highest priority context on the runqueue and return it
495 * to the caller. Returns %NULL if no runnable context was found.
497 static struct spu_context
*grab_runnable_context(int prio
, int node
)
499 struct spu_context
*ctx
;
502 spin_lock(&spu_prio
->runq_lock
);
503 best
= sched_find_first_bit(spu_prio
->bitmap
);
504 while (best
< prio
) {
505 struct list_head
*rq
= &spu_prio
->runq
[best
];
507 list_for_each_entry(ctx
, rq
, rq
) {
508 /* XXX(hch): check for affinity here aswell */
509 if (__node_allowed(ctx
, node
)) {
510 __spu_del_from_rq(ctx
);
518 spin_unlock(&spu_prio
->runq_lock
);
522 static int __spu_deactivate(struct spu_context
*ctx
, int force
, int max_prio
)
524 struct spu
*spu
= ctx
->spu
;
525 struct spu_context
*new = NULL
;
528 new = grab_runnable_context(max_prio
, spu
->node
);
530 spu_remove_from_active_list(spu
);
531 spu_unbind_context(spu
, ctx
);
532 ctx
->stats
.vol_ctx_switch
++;
533 spu
->stats
.vol_ctx_switch
++;
536 wake_up(&new->stop_wq
);
545 * spu_deactivate - unbind a context from it's physical spu
546 * @ctx: spu context to unbind
548 * Unbind @ctx from the physical spu it is running on and schedule
549 * the highest priority context to run on the freed physical spu.
551 void spu_deactivate(struct spu_context
*ctx
)
554 * We must never reach this for a nosched context,
555 * but handle the case gracefull instead of panicing.
557 if (ctx
->flags
& SPU_CREATE_NOSCHED
) {
562 __spu_deactivate(ctx
, 1, MAX_PRIO
);
563 spuctx_switch_state(ctx
, SPUCTX_UTIL_USER
);
567 * spu_yield - yield a physical spu if others are waiting
568 * @ctx: spu context to yield
570 * Check if there is a higher priority context waiting and if yes
571 * unbind @ctx from the physical spu and schedule the highest
572 * priority context to run on the freed physical spu instead.
574 void spu_yield(struct spu_context
*ctx
)
576 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
)) {
577 mutex_lock(&ctx
->state_mutex
);
578 if (__spu_deactivate(ctx
, 0, MAX_PRIO
))
579 spuctx_switch_state(ctx
, SPUCTX_UTIL_USER
);
581 spuctx_switch_state(ctx
, SPUCTX_UTIL_LOADED
);
582 spu_switch_state(ctx
->spu
, SPU_UTIL_USER
);
584 mutex_unlock(&ctx
->state_mutex
);
588 static void spusched_tick(struct spu_context
*ctx
)
590 if (ctx
->flags
& SPU_CREATE_NOSCHED
)
592 if (ctx
->policy
== SCHED_FIFO
)
595 if (--ctx
->time_slice
)
599 * Unfortunately active_mutex ranks outside of state_mutex, so
600 * we have to trylock here. If we fail give the context another
601 * tick and try again.
603 if (mutex_trylock(&ctx
->state_mutex
)) {
604 struct spu
*spu
= ctx
->spu
;
605 struct spu_context
*new;
607 new = grab_runnable_context(ctx
->prio
+ 1, spu
->node
);
610 __spu_remove_from_active_list(spu
);
611 spu_unbind_context(spu
, ctx
);
612 ctx
->stats
.invol_ctx_switch
++;
613 spu
->stats
.invol_ctx_switch
++;
615 wake_up(&new->stop_wq
);
617 * We need to break out of the wait loop in
618 * spu_run manually to ensure this context
619 * gets put on the runqueue again ASAP.
621 wake_up(&ctx
->stop_wq
);
623 spu_set_timeslice(ctx
);
624 mutex_unlock(&ctx
->state_mutex
);
631 * count_active_contexts - count nr of active tasks
633 * Return the number of tasks currently running or waiting to run.
635 * Note that we don't take runq_lock / active_mutex here. Reading
636 * a single 32bit value is atomic on powerpc, and we don't care
637 * about memory ordering issues here.
639 static unsigned long count_active_contexts(void)
641 int nr_active
= 0, node
;
643 for (node
= 0; node
< MAX_NUMNODES
; node
++)
644 nr_active
+= spu_prio
->nr_active
[node
];
645 nr_active
+= spu_prio
->nr_waiting
;
651 * spu_calc_load - given tick count, update the avenrun load estimates.
654 * No locking against reading these values from userspace, as for
655 * the CPU loadavg code.
657 static void spu_calc_load(unsigned long ticks
)
659 unsigned long active_tasks
; /* fixed-point */
660 static int count
= LOAD_FREQ
;
664 if (unlikely(count
< 0)) {
665 active_tasks
= count_active_contexts() * FIXED_1
;
667 CALC_LOAD(spu_avenrun
[0], EXP_1
, active_tasks
);
668 CALC_LOAD(spu_avenrun
[1], EXP_5
, active_tasks
);
669 CALC_LOAD(spu_avenrun
[2], EXP_15
, active_tasks
);
675 static void spusched_wake(unsigned long data
)
677 mod_timer(&spusched_timer
, jiffies
+ SPUSCHED_TICK
);
678 wake_up_process(spusched_task
);
679 spu_calc_load(SPUSCHED_TICK
);
682 static int spusched_thread(void *unused
)
684 struct spu
*spu
, *next
;
687 while (!kthread_should_stop()) {
688 set_current_state(TASK_INTERRUPTIBLE
);
690 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
691 mutex_lock(&spu_prio
->active_mutex
[node
]);
692 list_for_each_entry_safe(spu
, next
,
693 &spu_prio
->active_list
[node
],
695 spusched_tick(spu
->ctx
);
696 mutex_unlock(&spu_prio
->active_mutex
[node
]);
703 #define LOAD_INT(x) ((x) >> FSHIFT)
704 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
706 static int show_spu_loadavg(struct seq_file
*s
, void *private)
710 a
= spu_avenrun
[0] + (FIXED_1
/200);
711 b
= spu_avenrun
[1] + (FIXED_1
/200);
712 c
= spu_avenrun
[2] + (FIXED_1
/200);
715 * Note that last_pid doesn't really make much sense for the
716 * SPU loadavg (it even seems very odd on the CPU side..),
717 * but we include it here to have a 100% compatible interface.
719 seq_printf(s
, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
720 LOAD_INT(a
), LOAD_FRAC(a
),
721 LOAD_INT(b
), LOAD_FRAC(b
),
722 LOAD_INT(c
), LOAD_FRAC(c
),
723 count_active_contexts(),
724 atomic_read(&nr_spu_contexts
),
725 current
->nsproxy
->pid_ns
->last_pid
);
729 static int spu_loadavg_open(struct inode
*inode
, struct file
*file
)
731 return single_open(file
, show_spu_loadavg
, NULL
);
734 static const struct file_operations spu_loadavg_fops
= {
735 .open
= spu_loadavg_open
,
738 .release
= single_release
,
741 int __init
spu_sched_init(void)
743 struct proc_dir_entry
*entry
;
744 int err
= -ENOMEM
, i
;
746 spu_prio
= kzalloc(sizeof(struct spu_prio_array
), GFP_KERNEL
);
750 for (i
= 0; i
< MAX_PRIO
; i
++) {
751 INIT_LIST_HEAD(&spu_prio
->runq
[i
]);
752 __clear_bit(i
, spu_prio
->bitmap
);
754 __set_bit(MAX_PRIO
, spu_prio
->bitmap
);
755 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
756 mutex_init(&spu_prio
->active_mutex
[i
]);
757 INIT_LIST_HEAD(&spu_prio
->active_list
[i
]);
759 spin_lock_init(&spu_prio
->runq_lock
);
761 setup_timer(&spusched_timer
, spusched_wake
, 0);
763 spusched_task
= kthread_run(spusched_thread
, NULL
, "spusched");
764 if (IS_ERR(spusched_task
)) {
765 err
= PTR_ERR(spusched_task
);
766 goto out_free_spu_prio
;
769 entry
= create_proc_entry("spu_loadavg", 0, NULL
);
771 goto out_stop_kthread
;
772 entry
->proc_fops
= &spu_loadavg_fops
;
774 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
775 SPUSCHED_TICK
, MIN_SPU_TIMESLICE
, DEF_SPU_TIMESLICE
);
779 kthread_stop(spusched_task
);
786 void __exit
spu_sched_exit(void)
788 struct spu
*spu
, *tmp
;
791 remove_proc_entry("spu_loadavg", NULL
);
793 del_timer_sync(&spusched_timer
);
794 kthread_stop(spusched_task
);
796 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
797 mutex_lock(&spu_prio
->active_mutex
[node
]);
798 list_for_each_entry_safe(spu
, tmp
, &spu_prio
->active_list
[node
],
800 list_del_init(&spu
->list
);
803 mutex_unlock(&spu_prio
->active_mutex
[node
]);