Commit | Line | Data |
---|---|---|
1c33be57 NP |
1 | /* |
2 | * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver | |
3 | * | |
4 | * Created by: Nicolas Pitre, March 2012 | |
5 | * Copyright: (C) 2012-2013 Linaro Limited | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/cpu_pm.h> | |
71ce1dee | 18 | #include <linux/cpu.h> |
3f09d479 | 19 | #include <linux/cpumask.h> |
71ce1dee NP |
20 | #include <linux/kthread.h> |
21 | #include <linux/wait.h> | |
3f09d479 LP |
22 | #include <linux/clockchips.h> |
23 | #include <linux/hrtimer.h> | |
24 | #include <linux/tick.h> | |
1c33be57 NP |
25 | #include <linux/mm.h> |
26 | #include <linux/string.h> | |
6b7437ae | 27 | #include <linux/sysfs.h> |
1c33be57 | 28 | #include <linux/irqchip/arm-gic.h> |
c4821c05 | 29 | #include <linux/moduleparam.h> |
1c33be57 NP |
30 | |
31 | #include <asm/smp_plat.h> | |
32 | #include <asm/suspend.h> | |
33 | #include <asm/mcpm.h> | |
34 | #include <asm/bL_switcher.h> | |
35 | ||
36 | ||
37 | /* | |
38 | * Use our own MPIDR accessors as the generic ones in asm/cputype.h have | |
39 | * __attribute_const__ and we don't want the compiler to assume any | |
40 | * constness here as the value _does_ change along some code paths. | |
41 | */ | |
42 | ||
43 | static int read_mpidr(void) | |
44 | { | |
45 | unsigned int id; | |
46 | asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); | |
47 | return id & MPIDR_HWID_BITMASK; | |
48 | } | |
49 | ||
50 | /* | |
51 | * bL switcher core code. | |
52 | */ | |
53 | ||
54 | static void bL_do_switch(void *_unused) | |
55 | { | |
56 | unsigned mpidr, cpuid, clusterid, ob_cluster, ib_cluster; | |
57 | ||
1c33be57 NP |
58 | pr_debug("%s\n", __func__); |
59 | ||
60 | mpidr = read_mpidr(); | |
61 | cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
62 | clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
63 | ob_cluster = clusterid; | |
64 | ib_cluster = clusterid ^ 1; | |
65 | ||
66 | /* | |
67 | * Our state has been saved at this point. Let's release our | |
68 | * inbound CPU. | |
69 | */ | |
70 | mcpm_set_entry_vector(cpuid, ib_cluster, cpu_resume); | |
71 | sev(); | |
72 | ||
73 | /* | |
74 | * From this point, we must assume that our counterpart CPU might | |
75 | * have taken over in its parallel world already, as if execution | |
76 | * just returned from cpu_suspend(). It is therefore important to | |
77 | * be very careful not to make any change the other guy is not | |
78 | * expecting. This is why we need stack isolation. | |
79 | * | |
80 | * Fancy under cover tasks could be performed here. For now | |
81 | * we have none. | |
82 | */ | |
83 | ||
84 | /* Let's put ourself down. */ | |
85 | mcpm_cpu_power_down(); | |
86 | ||
87 | /* should never get here */ | |
88 | BUG(); | |
89 | } | |
90 | ||
91 | /* | |
c052de26 NP |
92 | * Stack isolation. To ensure 'current' remains valid, we just use another |
93 | * piece of our thread's stack space which should be fairly lightly used. | |
94 | * The selected area starts just above the thread_info structure located | |
95 | * at the very bottom of the stack, aligned to a cache line, and indexed | |
96 | * with the cluster number. | |
1c33be57 | 97 | */ |
c052de26 | 98 | #define STACK_SIZE 512 |
1c33be57 NP |
99 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); |
100 | static int bL_switchpoint(unsigned long _arg) | |
101 | { | |
102 | unsigned int mpidr = read_mpidr(); | |
1c33be57 | 103 | unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
c052de26 | 104 | void *stack = current_thread_info() + 1; |
1c33be57 | 105 | stack = PTR_ALIGN(stack, L1_CACHE_BYTES); |
c052de26 | 106 | stack += clusterid * STACK_SIZE + STACK_SIZE; |
1c33be57 NP |
107 | call_with_stack(bL_do_switch, (void *)_arg, stack); |
108 | BUG(); | |
109 | } | |
110 | ||
111 | /* | |
112 | * Generic switcher interface | |
113 | */ | |
114 | ||
ed96762e NP |
115 | static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS]; |
116 | ||
1c33be57 NP |
117 | /* |
118 | * bL_switch_to - Switch to a specific cluster for the current CPU | |
119 | * @new_cluster_id: the ID of the cluster to switch to. | |
120 | * | |
121 | * This function must be called on the CPU to be switched. | |
122 | * Returns 0 on success, else a negative status code. | |
123 | */ | |
124 | static int bL_switch_to(unsigned int new_cluster_id) | |
125 | { | |
126 | unsigned int mpidr, cpuid, clusterid, ob_cluster, ib_cluster, this_cpu; | |
3f09d479 LP |
127 | struct tick_device *tdev; |
128 | enum clock_event_mode tdev_mode; | |
1c33be57 NP |
129 | int ret; |
130 | ||
131 | mpidr = read_mpidr(); | |
132 | cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
133 | clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
134 | ob_cluster = clusterid; | |
135 | ib_cluster = clusterid ^ 1; | |
136 | ||
137 | if (new_cluster_id == clusterid) | |
138 | return 0; | |
139 | ||
140 | pr_debug("before switch: CPU %d in cluster %d\n", cpuid, clusterid); | |
141 | ||
142 | /* Close the gate for our entry vectors */ | |
143 | mcpm_set_entry_vector(cpuid, ob_cluster, NULL); | |
144 | mcpm_set_entry_vector(cpuid, ib_cluster, NULL); | |
145 | ||
146 | /* | |
147 | * Let's wake up the inbound CPU now in case it requires some delay | |
148 | * to come online, but leave it gated in our entry vector code. | |
149 | */ | |
150 | ret = mcpm_cpu_power_up(cpuid, ib_cluster); | |
151 | if (ret) { | |
152 | pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret); | |
153 | return ret; | |
154 | } | |
155 | ||
156 | /* | |
157 | * From this point we are entering the switch critical zone | |
158 | * and can't take any interrupts anymore. | |
159 | */ | |
160 | local_irq_disable(); | |
161 | local_fiq_disable(); | |
162 | ||
163 | this_cpu = smp_processor_id(); | |
164 | ||
165 | /* redirect GIC's SGIs to our counterpart */ | |
ed96762e | 166 | gic_migrate_target(bL_gic_id[cpuid][ib_cluster]); |
1c33be57 NP |
167 | |
168 | /* | |
169 | * Raise a SGI on the inbound CPU to make sure it doesn't stall | |
170 | * in a possible WFI, such as in mcpm_power_down(). | |
171 | */ | |
172 | arch_send_wakeup_ipi_mask(cpumask_of(this_cpu)); | |
173 | ||
3f09d479 LP |
174 | tdev = tick_get_device(this_cpu); |
175 | if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu))) | |
176 | tdev = NULL; | |
177 | if (tdev) { | |
178 | tdev_mode = tdev->evtdev->mode; | |
179 | clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | |
180 | } | |
181 | ||
1c33be57 NP |
182 | ret = cpu_pm_enter(); |
183 | ||
184 | /* we can not tolerate errors at this point */ | |
185 | if (ret) | |
186 | panic("%s: cpu_pm_enter() returned %d\n", __func__, ret); | |
187 | ||
188 | /* Flip the cluster in the CPU logical map for this CPU. */ | |
189 | cpu_logical_map(this_cpu) ^= (1 << 8); | |
190 | ||
191 | /* Let's do the actual CPU switch. */ | |
192 | ret = cpu_suspend(0, bL_switchpoint); | |
193 | if (ret > 0) | |
194 | panic("%s: cpu_suspend() returned %d\n", __func__, ret); | |
195 | ||
196 | /* We are executing on the inbound CPU at this point */ | |
197 | mpidr = read_mpidr(); | |
198 | cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
199 | clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
200 | pr_debug("after switch: CPU %d in cluster %d\n", cpuid, clusterid); | |
201 | BUG_ON(clusterid != ib_cluster); | |
202 | ||
203 | mcpm_cpu_powered_up(); | |
204 | ||
205 | ret = cpu_pm_exit(); | |
206 | ||
3f09d479 LP |
207 | if (tdev) { |
208 | clockevents_set_mode(tdev->evtdev, tdev_mode); | |
209 | clockevents_program_event(tdev->evtdev, | |
210 | tdev->evtdev->next_event, 1); | |
211 | } | |
212 | ||
1c33be57 NP |
213 | local_fiq_enable(); |
214 | local_irq_enable(); | |
215 | ||
216 | if (ret) | |
217 | pr_err("%s exiting with error %d\n", __func__, ret); | |
218 | return ret; | |
219 | } | |
220 | ||
71ce1dee NP |
221 | struct bL_thread { |
222 | struct task_struct *task; | |
223 | wait_queue_head_t wq; | |
224 | int wanted_cluster; | |
6b7437ae | 225 | struct completion started; |
1c33be57 NP |
226 | }; |
227 | ||
71ce1dee NP |
228 | static struct bL_thread bL_threads[NR_CPUS]; |
229 | ||
230 | static int bL_switcher_thread(void *arg) | |
231 | { | |
232 | struct bL_thread *t = arg; | |
233 | struct sched_param param = { .sched_priority = 1 }; | |
234 | int cluster; | |
235 | ||
236 | sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); | |
6b7437ae | 237 | complete(&t->started); |
71ce1dee NP |
238 | |
239 | do { | |
240 | if (signal_pending(current)) | |
241 | flush_signals(current); | |
242 | wait_event_interruptible(t->wq, | |
243 | t->wanted_cluster != -1 || | |
244 | kthread_should_stop()); | |
245 | cluster = xchg(&t->wanted_cluster, -1); | |
246 | if (cluster != -1) | |
247 | bL_switch_to(cluster); | |
248 | } while (!kthread_should_stop()); | |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
6b7437ae | 253 | static struct task_struct *bL_switcher_thread_create(int cpu, void *arg) |
1c33be57 | 254 | { |
71ce1dee NP |
255 | struct task_struct *task; |
256 | ||
257 | task = kthread_create_on_node(bL_switcher_thread, arg, | |
258 | cpu_to_node(cpu), "kswitcher_%d", cpu); | |
259 | if (!IS_ERR(task)) { | |
260 | kthread_bind(task, cpu); | |
261 | wake_up_process(task); | |
262 | } else | |
263 | pr_err("%s failed for CPU %d\n", __func__, cpu); | |
264 | return task; | |
1c33be57 NP |
265 | } |
266 | ||
267 | /* | |
268 | * bL_switch_request - Switch to a specific cluster for the given CPU | |
269 | * | |
270 | * @cpu: the CPU to switch | |
271 | * @new_cluster_id: the ID of the cluster to switch to. | |
272 | * | |
71ce1dee NP |
273 | * This function causes a cluster switch on the given CPU by waking up |
274 | * the appropriate switcher thread. This function may or may not return | |
275 | * before the switch has occurred. | |
1c33be57 | 276 | */ |
71ce1dee | 277 | int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id) |
1c33be57 | 278 | { |
71ce1dee | 279 | struct bL_thread *t; |
1c33be57 | 280 | |
71ce1dee NP |
281 | if (cpu >= ARRAY_SIZE(bL_threads)) { |
282 | pr_err("%s: cpu %d out of bounds\n", __func__, cpu); | |
283 | return -EINVAL; | |
1c33be57 | 284 | } |
1c33be57 | 285 | |
71ce1dee NP |
286 | t = &bL_threads[cpu]; |
287 | if (IS_ERR(t->task)) | |
288 | return PTR_ERR(t->task); | |
289 | if (!t->task) | |
290 | return -ESRCH; | |
291 | ||
292 | t->wanted_cluster = new_cluster_id; | |
293 | wake_up(&t->wq); | |
294 | return 0; | |
1c33be57 NP |
295 | } |
296 | EXPORT_SYMBOL_GPL(bL_switch_request); | |
71ce1dee | 297 | |
9797a0e9 NP |
298 | /* |
299 | * Activation and configuration code. | |
300 | */ | |
301 | ||
6b7437ae NP |
302 | static unsigned int bL_switcher_active; |
303 | static unsigned int bL_switcher_cpu_original_cluster[MAX_CPUS_PER_CLUSTER]; | |
9797a0e9 NP |
304 | static cpumask_t bL_switcher_removed_logical_cpus; |
305 | ||
6b7437ae | 306 | static void bL_switcher_restore_cpus(void) |
9797a0e9 NP |
307 | { |
308 | int i; | |
309 | ||
310 | for_each_cpu(i, &bL_switcher_removed_logical_cpus) | |
311 | cpu_up(i); | |
312 | } | |
313 | ||
6b7437ae | 314 | static int bL_switcher_halve_cpus(void) |
9797a0e9 NP |
315 | { |
316 | int cpu, cluster, i, ret; | |
317 | cpumask_t cluster_mask[2], common_mask; | |
318 | ||
319 | cpumask_clear(&bL_switcher_removed_logical_cpus); | |
320 | cpumask_clear(&cluster_mask[0]); | |
321 | cpumask_clear(&cluster_mask[1]); | |
322 | ||
323 | for_each_online_cpu(i) { | |
324 | cpu = cpu_logical_map(i) & 0xff; | |
325 | cluster = (cpu_logical_map(i) >> 8) & 0xff; | |
326 | if (cluster >= 2) { | |
327 | pr_err("%s: only dual cluster systems are supported\n", __func__); | |
328 | return -EINVAL; | |
329 | } | |
330 | cpumask_set_cpu(cpu, &cluster_mask[cluster]); | |
331 | } | |
332 | ||
333 | if (!cpumask_and(&common_mask, &cluster_mask[0], &cluster_mask[1])) { | |
334 | pr_err("%s: no common set of CPUs\n", __func__); | |
335 | return -EINVAL; | |
336 | } | |
337 | ||
338 | for_each_online_cpu(i) { | |
339 | cpu = cpu_logical_map(i) & 0xff; | |
340 | cluster = (cpu_logical_map(i) >> 8) & 0xff; | |
341 | ||
342 | if (cpumask_test_cpu(cpu, &common_mask)) { | |
ed96762e NP |
343 | /* Let's take note of the GIC ID for this CPU */ |
344 | int gic_id = gic_get_cpu_id(i); | |
345 | if (gic_id < 0) { | |
346 | pr_err("%s: bad GIC ID for CPU %d\n", __func__, i); | |
347 | return -EINVAL; | |
348 | } | |
349 | bL_gic_id[cpu][cluster] = gic_id; | |
350 | pr_info("GIC ID for CPU %u cluster %u is %u\n", | |
351 | cpu, cluster, gic_id); | |
352 | ||
9797a0e9 NP |
353 | /* |
354 | * We keep only those logical CPUs which number | |
355 | * is equal to their physical CPU number. This is | |
356 | * not perfect but good enough for now. | |
357 | */ | |
6b7437ae NP |
358 | if (cpu == i) { |
359 | bL_switcher_cpu_original_cluster[cpu] = cluster; | |
9797a0e9 | 360 | continue; |
6b7437ae | 361 | } |
9797a0e9 NP |
362 | } |
363 | ||
364 | ret = cpu_down(i); | |
365 | if (ret) { | |
366 | bL_switcher_restore_cpus(); | |
367 | return ret; | |
368 | } | |
369 | cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus); | |
370 | } | |
371 | ||
372 | return 0; | |
373 | } | |
374 | ||
6b7437ae | 375 | static int bL_switcher_enable(void) |
71ce1dee | 376 | { |
9797a0e9 | 377 | int cpu, ret; |
71ce1dee | 378 | |
6b7437ae NP |
379 | cpu_hotplug_driver_lock(); |
380 | if (bL_switcher_active) { | |
381 | cpu_hotplug_driver_unlock(); | |
382 | return 0; | |
9797a0e9 NP |
383 | } |
384 | ||
6b7437ae NP |
385 | pr_info("big.LITTLE switcher initializing\n"); |
386 | ||
9797a0e9 NP |
387 | ret = bL_switcher_halve_cpus(); |
388 | if (ret) { | |
389 | cpu_hotplug_driver_unlock(); | |
390 | return ret; | |
391 | } | |
392 | ||
71ce1dee NP |
393 | for_each_online_cpu(cpu) { |
394 | struct bL_thread *t = &bL_threads[cpu]; | |
395 | init_waitqueue_head(&t->wq); | |
6b7437ae | 396 | init_completion(&t->started); |
71ce1dee NP |
397 | t->wanted_cluster = -1; |
398 | t->task = bL_switcher_thread_create(cpu, t); | |
399 | } | |
6b7437ae NP |
400 | |
401 | bL_switcher_active = 1; | |
9797a0e9 | 402 | cpu_hotplug_driver_unlock(); |
71ce1dee NP |
403 | |
404 | pr_info("big.LITTLE switcher initialized\n"); | |
405 | return 0; | |
406 | } | |
407 | ||
6b7437ae NP |
408 | #ifdef CONFIG_SYSFS |
409 | ||
410 | static void bL_switcher_disable(void) | |
411 | { | |
412 | unsigned int cpu, cluster, i; | |
413 | struct bL_thread *t; | |
414 | struct task_struct *task; | |
415 | ||
416 | cpu_hotplug_driver_lock(); | |
417 | if (!bL_switcher_active) { | |
418 | cpu_hotplug_driver_unlock(); | |
419 | return; | |
420 | } | |
421 | bL_switcher_active = 0; | |
422 | ||
423 | /* | |
424 | * To deactivate the switcher, we must shut down the switcher | |
425 | * threads to prevent any other requests from being accepted. | |
426 | * Then, if the final cluster for given logical CPU is not the | |
427 | * same as the original one, we'll recreate a switcher thread | |
428 | * just for the purpose of switching the CPU back without any | |
429 | * possibility for interference from external requests. | |
430 | */ | |
431 | for_each_online_cpu(cpu) { | |
432 | BUG_ON(cpu != (cpu_logical_map(cpu) & 0xff)); | |
433 | t = &bL_threads[cpu]; | |
434 | task = t->task; | |
435 | t->task = NULL; | |
436 | if (!task || IS_ERR(task)) | |
437 | continue; | |
438 | kthread_stop(task); | |
439 | /* no more switch may happen on this CPU at this point */ | |
440 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); | |
441 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) | |
442 | continue; | |
443 | init_completion(&t->started); | |
444 | t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu]; | |
445 | task = bL_switcher_thread_create(cpu, t); | |
446 | if (!IS_ERR(task)) { | |
447 | wait_for_completion(&t->started); | |
448 | kthread_stop(task); | |
449 | cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); | |
450 | if (cluster == bL_switcher_cpu_original_cluster[cpu]) | |
451 | continue; | |
452 | } | |
453 | /* If execution gets here, we're in trouble. */ | |
454 | pr_crit("%s: unable to restore original cluster for CPU %d\n", | |
455 | __func__, cpu); | |
456 | for_each_cpu(i, &bL_switcher_removed_logical_cpus) { | |
457 | if ((cpu_logical_map(i) & 0xff) != cpu) | |
458 | continue; | |
459 | pr_crit("%s: CPU %d can't be restored\n", | |
460 | __func__, i); | |
461 | cpumask_clear_cpu(i, &bL_switcher_removed_logical_cpus); | |
462 | break; | |
463 | } | |
464 | } | |
465 | ||
466 | bL_switcher_restore_cpus(); | |
467 | cpu_hotplug_driver_unlock(); | |
468 | } | |
469 | ||
470 | static ssize_t bL_switcher_active_show(struct kobject *kobj, | |
471 | struct kobj_attribute *attr, char *buf) | |
472 | { | |
473 | return sprintf(buf, "%u\n", bL_switcher_active); | |
474 | } | |
475 | ||
476 | static ssize_t bL_switcher_active_store(struct kobject *kobj, | |
477 | struct kobj_attribute *attr, const char *buf, size_t count) | |
478 | { | |
479 | int ret; | |
480 | ||
481 | switch (buf[0]) { | |
482 | case '0': | |
483 | bL_switcher_disable(); | |
484 | ret = 0; | |
485 | break; | |
486 | case '1': | |
487 | ret = bL_switcher_enable(); | |
488 | break; | |
489 | default: | |
490 | ret = -EINVAL; | |
491 | } | |
492 | ||
493 | return (ret >= 0) ? count : ret; | |
494 | } | |
495 | ||
496 | static struct kobj_attribute bL_switcher_active_attr = | |
497 | __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store); | |
498 | ||
499 | static struct attribute *bL_switcher_attrs[] = { | |
500 | &bL_switcher_active_attr.attr, | |
501 | NULL, | |
502 | }; | |
503 | ||
504 | static struct attribute_group bL_switcher_attr_group = { | |
505 | .attrs = bL_switcher_attrs, | |
506 | }; | |
507 | ||
508 | static struct kobject *bL_switcher_kobj; | |
509 | ||
510 | static int __init bL_switcher_sysfs_init(void) | |
511 | { | |
512 | int ret; | |
513 | ||
514 | bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj); | |
515 | if (!bL_switcher_kobj) | |
516 | return -ENOMEM; | |
517 | ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group); | |
518 | if (ret) | |
519 | kobject_put(bL_switcher_kobj); | |
520 | return ret; | |
521 | } | |
522 | ||
523 | #endif /* CONFIG_SYSFS */ | |
524 | ||
c4821c05 NP |
525 | static bool no_bL_switcher; |
526 | core_param(no_bL_switcher, no_bL_switcher, bool, 0644); | |
527 | ||
6b7437ae NP |
528 | static int __init bL_switcher_init(void) |
529 | { | |
530 | int ret; | |
531 | ||
532 | if (MAX_NR_CLUSTERS != 2) { | |
533 | pr_err("%s: only dual cluster systems are supported\n", __func__); | |
534 | return -EINVAL; | |
535 | } | |
536 | ||
c4821c05 NP |
537 | if (!no_bL_switcher) { |
538 | ret = bL_switcher_enable(); | |
539 | if (ret) | |
540 | return ret; | |
541 | } | |
6b7437ae NP |
542 | |
543 | #ifdef CONFIG_SYSFS | |
544 | ret = bL_switcher_sysfs_init(); | |
545 | if (ret) | |
546 | pr_err("%s: unable to create sysfs entry\n", __func__); | |
547 | #endif | |
548 | ||
549 | return 0; | |
550 | } | |
551 | ||
71ce1dee | 552 | late_initcall(bL_switcher_init); |