Commit | Line | Data |
---|---|---|
89eb8eb9 DN |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
a374c57b | 6 | * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. |
89eb8eb9 DN |
7 | */ |
8 | ||
89eb8eb9 DN |
9 | /* |
10 | * Cross Partition Communication (XPC) support - standard version. | |
11 | * | |
12 | * XPC provides a message passing capability that crosses partition | |
13 | * boundaries. This module is made up of two parts: | |
14 | * | |
15 | * partition This part detects the presence/absence of other | |
16 | * partitions. It provides a heartbeat and monitors | |
17 | * the heartbeats of other partitions. | |
18 | * | |
19 | * channel This part manages the channels and sends/receives | |
20 | * messages across them to/from other partitions. | |
21 | * | |
22 | * There are a couple of additional functions residing in XP, which | |
23 | * provide an interface to XPC for its users. | |
24 | * | |
25 | * | |
26 | * Caveats: | |
27 | * | |
7fb5e59d | 28 | * . Currently on sn2, we have no way to determine which nasid an IRQ |
c39838ce DN |
29 | * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write |
30 | * followed by an IPI. The amo indicates where data is to be pulled | |
31 | * from, so after the IPI arrives, the remote partition checks the amo | |
32 | * word. The IPI can actually arrive before the amo however, so other | |
33 | * code must periodically check for this case. Also, remote amo | |
7fb5e59d DN |
34 | * operations do not reliably time out. Thus we do a remote PIO read |
35 | * solely to know whether the remote partition is down and whether we | |
36 | * should stop sending IPIs to it. This remote PIO read operation is | |
37 | * set up in a special nofault region so SAL knows to ignore (and | |
c39838ce | 38 | * cleanup) any errors due to the remote amo write, PIO read, and/or |
7fb5e59d | 39 | * PIO write operations. |
89eb8eb9 DN |
40 | * |
41 | * If/when new hardware solves this IPI problem, we should abandon | |
42 | * the current approach. | |
43 | * | |
44 | */ | |
45 | ||
89eb8eb9 | 46 | #include <linux/module.h> |
5a0e3ad6 | 47 | #include <linux/slab.h> |
261f3b49 DN |
48 | #include <linux/sysctl.h> |
49 | #include <linux/device.h> | |
69913927 | 50 | #include <linux/delay.h> |
a607c389 | 51 | #include <linux/reboot.h> |
1eeb66a1 | 52 | #include <linux/kdebug.h> |
2c2b94f9 | 53 | #include <linux/kthread.h> |
45d9ca49 | 54 | #include "xpc.h" |
89eb8eb9 | 55 | |
891348ca RH |
56 | #ifdef CONFIG_X86_64 |
57 | #include <asm/traps.h> | |
58 | #endif | |
59 | ||
89eb8eb9 DN |
60 | /* define two XPC debug device structures to be used with dev_dbg() et al */ |
61 | ||
62 | struct device_driver xpc_dbg_name = { | |
63 | .name = "xpc" | |
64 | }; | |
65 | ||
66 | struct device xpc_part_dbg_subname = { | |
bb0dc43e | 67 | .init_name = "", /* set to "part" at xpc_init() time */ |
89eb8eb9 DN |
68 | .driver = &xpc_dbg_name |
69 | }; | |
70 | ||
71 | struct device xpc_chan_dbg_subname = { | |
bb0dc43e | 72 | .init_name = "", /* set to "chan" at xpc_init() time */ |
89eb8eb9 DN |
73 | .driver = &xpc_dbg_name |
74 | }; | |
75 | ||
76 | struct device *xpc_part = &xpc_part_dbg_subname; | |
77 | struct device *xpc_chan = &xpc_chan_dbg_subname; | |
78 | ||
1f4674b2 DN |
79 | static int xpc_kdebug_ignore; |
80 | ||
89eb8eb9 DN |
81 | /* systune related variables for /proc/sys directories */ |
82 | ||
a607c389 DN |
83 | static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; |
84 | static int xpc_hb_min_interval = 1; | |
85 | static int xpc_hb_max_interval = 10; | |
89eb8eb9 | 86 | |
a607c389 DN |
87 | static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; |
88 | static int xpc_hb_check_min_interval = 10; | |
89 | static int xpc_hb_check_max_interval = 120; | |
89eb8eb9 | 90 | |
a47d5dac DN |
91 | int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT; |
92 | static int xpc_disengage_min_timelimit; /* = 0 */ | |
93 | static int xpc_disengage_max_timelimit = 120; | |
89eb8eb9 | 94 | |
f0b76558 | 95 | static struct ctl_table xpc_sys_xpc_hb_dir[] = { |
89eb8eb9 | 96 | { |
35190506 DN |
97 | .procname = "hb_interval", |
98 | .data = &xpc_hb_interval, | |
99 | .maxlen = sizeof(int), | |
100 | .mode = 0644, | |
6d456111 | 101 | .proc_handler = proc_dointvec_minmax, |
35190506 DN |
102 | .extra1 = &xpc_hb_min_interval, |
103 | .extra2 = &xpc_hb_max_interval}, | |
89eb8eb9 | 104 | { |
35190506 DN |
105 | .procname = "hb_check_interval", |
106 | .data = &xpc_hb_check_interval, | |
107 | .maxlen = sizeof(int), | |
108 | .mode = 0644, | |
6d456111 | 109 | .proc_handler = proc_dointvec_minmax, |
35190506 DN |
110 | .extra1 = &xpc_hb_check_min_interval, |
111 | .extra2 = &xpc_hb_check_max_interval}, | |
68cbf075 | 112 | {} |
89eb8eb9 | 113 | }; |
f0b76558 | 114 | static struct ctl_table xpc_sys_xpc_dir[] = { |
89eb8eb9 | 115 | { |
35190506 DN |
116 | .procname = "hb", |
117 | .mode = 0555, | |
118 | .child = xpc_sys_xpc_hb_dir}, | |
e54af724 | 119 | { |
a47d5dac DN |
120 | .procname = "disengage_timelimit", |
121 | .data = &xpc_disengage_timelimit, | |
35190506 DN |
122 | .maxlen = sizeof(int), |
123 | .mode = 0644, | |
6d456111 | 124 | .proc_handler = proc_dointvec_minmax, |
a47d5dac DN |
125 | .extra1 = &xpc_disengage_min_timelimit, |
126 | .extra2 = &xpc_disengage_max_timelimit}, | |
68cbf075 | 127 | {} |
89eb8eb9 | 128 | }; |
f0b76558 | 129 | static struct ctl_table xpc_sys_dir[] = { |
89eb8eb9 | 130 | { |
35190506 DN |
131 | .procname = "xpc", |
132 | .mode = 0555, | |
133 | .child = xpc_sys_xpc_dir}, | |
68cbf075 | 134 | {} |
89eb8eb9 DN |
135 | }; |
136 | static struct ctl_table_header *xpc_sysctl; | |
137 | ||
a47d5dac DN |
138 | /* non-zero if any remote partition disengage was timed out */ |
139 | int xpc_disengage_timedout; | |
89eb8eb9 | 140 | |
5b8669df DN |
141 | /* #of activate IRQs received and not yet processed */ |
142 | int xpc_activate_IRQ_rcvd; | |
143 | DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock); | |
89eb8eb9 DN |
144 | |
145 | /* IRQ handler notifies this wait queue on receipt of an IRQ */ | |
6e41017a | 146 | DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); |
89eb8eb9 DN |
147 | |
148 | static unsigned long xpc_hb_check_timeout; | |
33ba3c77 | 149 | static struct timer_list xpc_hb_timer; |
89eb8eb9 | 150 | |
e54af724 | 151 | /* notification that the xpc_hb_checker thread has exited */ |
f9e505a9 | 152 | static DECLARE_COMPLETION(xpc_hb_checker_exited); |
89eb8eb9 | 153 | |
e54af724 | 154 | /* notification that the xpc_discovery thread has exited */ |
f9e505a9 | 155 | static DECLARE_COMPLETION(xpc_discovery_exited); |
89eb8eb9 | 156 | |
89eb8eb9 DN |
157 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); |
158 | ||
a607c389 DN |
159 | static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); |
160 | static struct notifier_block xpc_reboot_notifier = { | |
161 | .notifier_call = xpc_system_reboot, | |
162 | }; | |
163 | ||
780d09e8 DN |
164 | static int xpc_system_die(struct notifier_block *, unsigned long, void *); |
165 | static struct notifier_block xpc_die_notifier = { | |
166 | .notifier_call = xpc_system_die, | |
167 | }; | |
168 | ||
a7665b0a | 169 | struct xpc_arch_operations xpc_arch_ops; |
94bd2708 | 170 | |
a607c389 | 171 | /* |
a47d5dac | 172 | * Timer function to enforce the timelimit on the partition disengage. |
a607c389 DN |
173 | */ |
174 | static void | |
a47d5dac | 175 | xpc_timeout_partition_disengage(unsigned long data) |
a607c389 | 176 | { |
35190506 | 177 | struct xpc_partition *part = (struct xpc_partition *)data; |
a607c389 | 178 | |
a47d5dac | 179 | DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); |
a607c389 | 180 | |
35190506 | 181 | (void)xpc_partition_disengaged(part); |
a607c389 | 182 | |
a47d5dac | 183 | DBUG_ON(part->disengage_timeout != 0); |
a7665b0a | 184 | DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); |
a607c389 DN |
185 | } |
186 | ||
89eb8eb9 DN |
187 | /* |
188 | * Timer to produce the heartbeat. The timer structures function is | |
189 | * already set when this is initially called. A tunable is used to | |
190 | * specify when the next timeout should occur. | |
191 | */ | |
192 | static void | |
193 | xpc_hb_beater(unsigned long dummy) | |
194 | { | |
a7665b0a | 195 | xpc_arch_ops.increment_heartbeat(); |
89eb8eb9 | 196 | |
aaa3cd69 | 197 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) |
6e41017a | 198 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
89eb8eb9 DN |
199 | |
200 | xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); | |
201 | add_timer(&xpc_hb_timer); | |
202 | } | |
203 | ||
33ba3c77 DN |
204 | static void |
205 | xpc_start_hb_beater(void) | |
206 | { | |
a7665b0a | 207 | xpc_arch_ops.heartbeat_init(); |
33ba3c77 DN |
208 | init_timer(&xpc_hb_timer); |
209 | xpc_hb_timer.function = xpc_hb_beater; | |
210 | xpc_hb_beater(0); | |
211 | } | |
212 | ||
213 | static void | |
214 | xpc_stop_hb_beater(void) | |
215 | { | |
216 | del_timer_sync(&xpc_hb_timer); | |
a7665b0a | 217 | xpc_arch_ops.heartbeat_exit(); |
33ba3c77 DN |
218 | } |
219 | ||
61deb86e DN |
220 | /* |
221 | * At periodic intervals, scan through all active partitions and ensure | |
222 | * their heartbeat is still active. If not, the partition is deactivated. | |
223 | */ | |
224 | static void | |
225 | xpc_check_remote_hb(void) | |
226 | { | |
227 | struct xpc_partition *part; | |
228 | short partid; | |
229 | enum xp_retval ret; | |
230 | ||
231 | for (partid = 0; partid < xp_max_npartitions; partid++) { | |
232 | ||
233 | if (xpc_exiting) | |
234 | break; | |
235 | ||
236 | if (partid == xp_partition_id) | |
237 | continue; | |
238 | ||
239 | part = &xpc_partitions[partid]; | |
240 | ||
83469b55 DN |
241 | if (part->act_state == XPC_P_AS_INACTIVE || |
242 | part->act_state == XPC_P_AS_DEACTIVATING) { | |
61deb86e DN |
243 | continue; |
244 | } | |
245 | ||
a7665b0a | 246 | ret = xpc_arch_ops.get_remote_heartbeat(part); |
61deb86e DN |
247 | if (ret != xpSuccess) |
248 | XPC_DEACTIVATE_PARTITION(part, ret); | |
249 | } | |
250 | } | |
251 | ||
89eb8eb9 DN |
252 | /* |
253 | * This thread is responsible for nearly all of the partition | |
254 | * activation/deactivation. | |
255 | */ | |
256 | static int | |
257 | xpc_hb_checker(void *ignore) | |
258 | { | |
35190506 | 259 | int force_IRQ = 0; |
89eb8eb9 DN |
260 | |
261 | /* this thread was marked active by xpc_hb_init() */ | |
262 | ||
f7df8ed1 | 263 | set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU)); |
89eb8eb9 | 264 | |
4c013f5c | 265 | /* set our heartbeating to other partitions into motion */ |
89eb8eb9 | 266 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
33ba3c77 | 267 | xpc_start_hb_beater(); |
89eb8eb9 | 268 | |
2c2b94f9 | 269 | while (!xpc_exiting) { |
89eb8eb9 | 270 | |
89eb8eb9 DN |
271 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " |
272 | "been received\n", | |
35190506 | 273 | (int)(xpc_hb_check_timeout - jiffies), |
5b8669df | 274 | xpc_activate_IRQ_rcvd); |
89eb8eb9 | 275 | |
89eb8eb9 | 276 | /* checking of remote heartbeats is skewed by IRQ handling */ |
aaa3cd69 | 277 | if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { |
5b8669df DN |
278 | xpc_hb_check_timeout = jiffies + |
279 | (xpc_hb_check_interval * HZ); | |
280 | ||
89eb8eb9 DN |
281 | dev_dbg(xpc_part, "checking remote heartbeats\n"); |
282 | xpc_check_remote_hb(); | |
283 | ||
284 | /* | |
5b8669df DN |
285 | * On sn2 we need to periodically recheck to ensure no |
286 | * IRQ/amo pairs have been missed. | |
89eb8eb9 | 287 | */ |
5b8669df DN |
288 | if (is_shub()) |
289 | force_IRQ = 1; | |
89eb8eb9 DN |
290 | } |
291 | ||
a607c389 | 292 | /* check for outstanding IRQs */ |
5b8669df | 293 | if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) { |
89eb8eb9 | 294 | force_IRQ = 0; |
5b8669df DN |
295 | dev_dbg(xpc_part, "processing activate IRQs " |
296 | "received\n"); | |
a7665b0a | 297 | xpc_arch_ops.process_activate_IRQ_rcvd(); |
89eb8eb9 | 298 | } |
a607c389 DN |
299 | |
300 | /* wait for IRQ or timeout */ | |
6e41017a | 301 | (void)wait_event_interruptible(xpc_activate_IRQ_wq, |
5b8669df | 302 | (time_is_before_eq_jiffies( |
aaa3cd69 | 303 | xpc_hb_check_timeout) || |
5b8669df | 304 | xpc_activate_IRQ_rcvd > 0 || |
2c2b94f9 | 305 | xpc_exiting)); |
89eb8eb9 DN |
306 | } |
307 | ||
33ba3c77 DN |
308 | xpc_stop_hb_beater(); |
309 | ||
89eb8eb9 DN |
310 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); |
311 | ||
e54af724 | 312 | /* mark this thread as having exited */ |
f9e505a9 | 313 | complete(&xpc_hb_checker_exited); |
89eb8eb9 DN |
314 | return 0; |
315 | } | |
316 | ||
89eb8eb9 DN |
317 | /* |
318 | * This thread will attempt to discover other partitions to activate | |
319 | * based on info provided by SAL. This new thread is short lived and | |
320 | * will exit once discovery is complete. | |
321 | */ | |
322 | static int | |
323 | xpc_initiate_discovery(void *ignore) | |
324 | { | |
89eb8eb9 DN |
325 | xpc_discovery(); |
326 | ||
327 | dev_dbg(xpc_part, "discovery thread is exiting\n"); | |
328 | ||
e54af724 | 329 | /* mark this thread as having exited */ |
f9e505a9 | 330 | complete(&xpc_discovery_exited); |
89eb8eb9 DN |
331 | return 0; |
332 | } | |
333 | ||
89eb8eb9 DN |
334 | /* |
335 | * The first kthread assigned to a newly activated partition is the one | |
e17d416b | 336 | * created by XPC HB with which it calls xpc_activating(). XPC hangs on to |
89eb8eb9 DN |
337 | * that kthread until the partition is brought down, at which time that kthread |
338 | * returns back to XPC HB. (The return of that kthread will signify to XPC HB | |
339 | * that XPC has dismantled all communication infrastructure for the associated | |
340 | * partition.) This kthread becomes the channel manager for that partition. | |
341 | * | |
342 | * Each active partition has a channel manager, who, besides connecting and | |
343 | * disconnecting channels, will ensure that each of the partition's connected | |
344 | * channels has the required number of assigned kthreads to get the work done. | |
345 | */ | |
346 | static void | |
347 | xpc_channel_mgr(struct xpc_partition *part) | |
348 | { | |
83469b55 | 349 | while (part->act_state != XPC_P_AS_DEACTIVATING || |
35190506 DN |
350 | atomic_read(&part->nchannels_active) > 0 || |
351 | !xpc_partition_disengaged(part)) { | |
89eb8eb9 | 352 | |
7fb5e59d | 353 | xpc_process_sent_chctl_flags(part); |
89eb8eb9 | 354 | |
89eb8eb9 DN |
355 | /* |
356 | * Wait until we've been requested to activate kthreads or | |
357 | * all of the channel's message queues have been torn down or | |
358 | * a signal is pending. | |
359 | * | |
360 | * The channel_mgr_requests is set to 1 after being awakened, | |
361 | * This is done to prevent the channel mgr from making one pass | |
362 | * through the loop for each request, since he will | |
363 | * be servicing all the requests in one pass. The reason it's | |
364 | * set to 1 instead of 0 is so that other kthreads will know | |
365 | * that the channel mgr is running and won't bother trying to | |
366 | * wake him up. | |
367 | */ | |
368 | atomic_dec(&part->channel_mgr_requests); | |
35190506 | 369 | (void)wait_event_interruptible(part->channel_mgr_wq, |
2c2b94f9 | 370 | (atomic_read(&part->channel_mgr_requests) > 0 || |
7fb5e59d | 371 | part->chctl.all_flags != 0 || |
83469b55 | 372 | (part->act_state == XPC_P_AS_DEACTIVATING && |
2c2b94f9 DN |
373 | atomic_read(&part->nchannels_active) == 0 && |
374 | xpc_partition_disengaged(part)))); | |
89eb8eb9 | 375 | atomic_set(&part->channel_mgr_requests, 1); |
89eb8eb9 DN |
376 | } |
377 | } | |
378 | ||
5b8669df DN |
379 | /* |
380 | * Guarantee that the kzalloc'd memory is cacheline aligned. | |
381 | */ | |
382 | void * | |
383 | xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |
384 | { | |
385 | /* see if kzalloc will give us cachline aligned memory by default */ | |
386 | *base = kzalloc(size, flags); | |
387 | if (*base == NULL) | |
388 | return NULL; | |
389 | ||
390 | if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) | |
391 | return *base; | |
392 | ||
393 | kfree(*base); | |
394 | ||
395 | /* nope, we'll have to do it ourselves */ | |
396 | *base = kzalloc(size + L1_CACHE_BYTES, flags); | |
397 | if (*base == NULL) | |
398 | return NULL; | |
399 | ||
400 | return (void *)L1_CACHE_ALIGN((u64)*base); | |
401 | } | |
402 | ||
403 | /* | |
404 | * Setup the channel structures necessary to support XPartition Communication | |
405 | * between the specified remote partition and the local one. | |
406 | */ | |
407 | static enum xp_retval | |
408 | xpc_setup_ch_structures(struct xpc_partition *part) | |
409 | { | |
410 | enum xp_retval ret; | |
411 | int ch_number; | |
412 | struct xpc_channel *ch; | |
413 | short partid = XPC_PARTID(part); | |
414 | ||
415 | /* | |
416 | * Allocate all of the channel structures as a contiguous chunk of | |
417 | * memory. | |
418 | */ | |
419 | DBUG_ON(part->channels != NULL); | |
420 | part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, | |
421 | GFP_KERNEL); | |
422 | if (part->channels == NULL) { | |
423 | dev_err(xpc_chan, "can't get memory for channels\n"); | |
424 | return xpNoMemory; | |
425 | } | |
426 | ||
427 | /* allocate the remote open and close args */ | |
428 | ||
429 | part->remote_openclose_args = | |
430 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, | |
431 | GFP_KERNEL, &part-> | |
432 | remote_openclose_args_base); | |
433 | if (part->remote_openclose_args == NULL) { | |
434 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); | |
435 | ret = xpNoMemory; | |
436 | goto out_1; | |
437 | } | |
438 | ||
439 | part->chctl.all_flags = 0; | |
440 | spin_lock_init(&part->chctl_lock); | |
441 | ||
442 | atomic_set(&part->channel_mgr_requests, 1); | |
443 | init_waitqueue_head(&part->channel_mgr_wq); | |
444 | ||
445 | part->nchannels = XPC_MAX_NCHANNELS; | |
446 | ||
447 | atomic_set(&part->nchannels_active, 0); | |
448 | atomic_set(&part->nchannels_engaged, 0); | |
449 | ||
450 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | |
451 | ch = &part->channels[ch_number]; | |
452 | ||
453 | ch->partid = partid; | |
454 | ch->number = ch_number; | |
455 | ch->flags = XPC_C_DISCONNECTED; | |
456 | ||
457 | atomic_set(&ch->kthreads_assigned, 0); | |
458 | atomic_set(&ch->kthreads_idle, 0); | |
459 | atomic_set(&ch->kthreads_active, 0); | |
460 | ||
461 | atomic_set(&ch->references, 0); | |
462 | atomic_set(&ch->n_to_notify, 0); | |
463 | ||
464 | spin_lock_init(&ch->lock); | |
465 | init_completion(&ch->wdisconnect_wait); | |
466 | ||
467 | atomic_set(&ch->n_on_msg_allocate_wq, 0); | |
468 | init_waitqueue_head(&ch->msg_allocate_wq); | |
469 | init_waitqueue_head(&ch->idle_wq); | |
470 | } | |
471 | ||
a7665b0a | 472 | ret = xpc_arch_ops.setup_ch_structures(part); |
5b8669df DN |
473 | if (ret != xpSuccess) |
474 | goto out_2; | |
475 | ||
476 | /* | |
477 | * With the setting of the partition setup_state to XPC_P_SS_SETUP, | |
478 | * we're declaring that this partition is ready to go. | |
479 | */ | |
480 | part->setup_state = XPC_P_SS_SETUP; | |
481 | ||
482 | return xpSuccess; | |
483 | ||
484 | /* setup of ch structures failed */ | |
485 | out_2: | |
486 | kfree(part->remote_openclose_args_base); | |
487 | part->remote_openclose_args = NULL; | |
488 | out_1: | |
489 | kfree(part->channels); | |
490 | part->channels = NULL; | |
491 | return ret; | |
492 | } | |
493 | ||
494 | /* | |
495 | * Teardown the channel structures necessary to support XPartition Communication | |
496 | * between the specified remote partition and the local one. | |
497 | */ | |
498 | static void | |
499 | xpc_teardown_ch_structures(struct xpc_partition *part) | |
500 | { | |
501 | DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); | |
502 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); | |
503 | ||
504 | /* | |
505 | * Make this partition inaccessible to local processes by marking it | |
506 | * as no longer setup. Then wait before proceeding with the teardown | |
507 | * until all existing references cease. | |
508 | */ | |
509 | DBUG_ON(part->setup_state != XPC_P_SS_SETUP); | |
510 | part->setup_state = XPC_P_SS_WTEARDOWN; | |
511 | ||
512 | wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); | |
513 | ||
514 | /* now we can begin tearing down the infrastructure */ | |
515 | ||
a7665b0a | 516 | xpc_arch_ops.teardown_ch_structures(part); |
5b8669df DN |
517 | |
518 | kfree(part->remote_openclose_args_base); | |
519 | part->remote_openclose_args = NULL; | |
520 | kfree(part->channels); | |
521 | part->channels = NULL; | |
522 | ||
523 | part->setup_state = XPC_P_SS_TORNDOWN; | |
524 | } | |
525 | ||
89eb8eb9 DN |
526 | /* |
527 | * When XPC HB determines that a partition has come up, it will create a new | |
528 | * kthread and that kthread will call this function to attempt to set up the | |
529 | * basic infrastructure used for Cross Partition Communication with the newly | |
530 | * upped partition. | |
531 | * | |
532 | * The kthread that was created by XPC HB and which setup the XPC | |
e17d416b DN |
533 | * infrastructure will remain assigned to the partition becoming the channel |
534 | * manager for that partition until the partition is deactivating, at which | |
535 | * time the kthread will teardown the XPC infrastructure and then exit. | |
89eb8eb9 | 536 | */ |
89eb8eb9 DN |
537 | static int |
538 | xpc_activating(void *__partid) | |
539 | { | |
64d032ba | 540 | short partid = (u64)__partid; |
89eb8eb9 DN |
541 | struct xpc_partition *part = &xpc_partitions[partid]; |
542 | unsigned long irq_flags; | |
89eb8eb9 | 543 | |
bc63d387 | 544 | DBUG_ON(partid < 0 || partid >= xp_max_npartitions); |
89eb8eb9 DN |
545 | |
546 | spin_lock_irqsave(&part->act_lock, irq_flags); | |
547 | ||
83469b55 DN |
548 | if (part->act_state == XPC_P_AS_DEACTIVATING) { |
549 | part->act_state = XPC_P_AS_INACTIVE; | |
89eb8eb9 DN |
550 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
551 | part->remote_rp_pa = 0; | |
552 | return 0; | |
553 | } | |
554 | ||
555 | /* indicate the thread is activating */ | |
83469b55 DN |
556 | DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ); |
557 | part->act_state = XPC_P_AS_ACTIVATING; | |
89eb8eb9 DN |
558 | |
559 | XPC_SET_REASON(part, 0, 0); | |
560 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | |
561 | ||
e17d416b | 562 | dev_dbg(xpc_part, "activating partition %d\n", partid); |
89eb8eb9 | 563 | |
a7665b0a | 564 | xpc_arch_ops.allow_hb(partid); |
89eb8eb9 | 565 | |
5b8669df | 566 | if (xpc_setup_ch_structures(part) == xpSuccess) { |
e17d416b DN |
567 | (void)xpc_part_ref(part); /* this will always succeed */ |
568 | ||
a7665b0a | 569 | if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { |
e17d416b DN |
570 | xpc_mark_partition_active(part); |
571 | xpc_channel_mgr(part); | |
572 | /* won't return until partition is deactivating */ | |
573 | } | |
574 | ||
575 | xpc_part_deref(part); | |
5b8669df | 576 | xpc_teardown_ch_structures(part); |
e17d416b | 577 | } |
89eb8eb9 | 578 | |
a7665b0a | 579 | xpc_arch_ops.disallow_hb(partid); |
89eb8eb9 DN |
580 | xpc_mark_partition_inactive(part); |
581 | ||
65c17b80 | 582 | if (part->reason == xpReactivating) { |
89eb8eb9 | 583 | /* interrupting ourselves results in activating partition */ |
a7665b0a | 584 | xpc_arch_ops.request_partition_reactivation(part); |
89eb8eb9 DN |
585 | } |
586 | ||
587 | return 0; | |
588 | } | |
589 | ||
89eb8eb9 DN |
590 | void |
591 | xpc_activate_partition(struct xpc_partition *part) | |
592 | { | |
64d032ba | 593 | short partid = XPC_PARTID(part); |
89eb8eb9 | 594 | unsigned long irq_flags; |
2c2b94f9 | 595 | struct task_struct *kthread; |
89eb8eb9 | 596 | |
89eb8eb9 DN |
597 | spin_lock_irqsave(&part->act_lock, irq_flags); |
598 | ||
83469b55 | 599 | DBUG_ON(part->act_state != XPC_P_AS_INACTIVE); |
89eb8eb9 | 600 | |
83469b55 | 601 | part->act_state = XPC_P_AS_ACTIVATION_REQ; |
65c17b80 | 602 | XPC_SET_REASON(part, xpCloneKThread, __LINE__); |
89eb8eb9 DN |
603 | |
604 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | |
7c6c6636 | 605 | |
2c2b94f9 DN |
606 | kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d", |
607 | partid); | |
608 | if (IS_ERR(kthread)) { | |
7c6c6636 | 609 | spin_lock_irqsave(&part->act_lock, irq_flags); |
83469b55 | 610 | part->act_state = XPC_P_AS_INACTIVE; |
65c17b80 | 611 | XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); |
7c6c6636 RH |
612 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
613 | } | |
89eb8eb9 DN |
614 | } |
615 | ||
89eb8eb9 DN |
616 | void |
617 | xpc_activate_kthreads(struct xpc_channel *ch, int needed) | |
618 | { | |
619 | int idle = atomic_read(&ch->kthreads_idle); | |
620 | int assigned = atomic_read(&ch->kthreads_assigned); | |
621 | int wakeup; | |
622 | ||
89eb8eb9 DN |
623 | DBUG_ON(needed <= 0); |
624 | ||
625 | if (idle > 0) { | |
626 | wakeup = (needed > idle) ? idle : needed; | |
627 | needed -= wakeup; | |
628 | ||
629 | dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " | |
630 | "channel=%d\n", wakeup, ch->partid, ch->number); | |
631 | ||
632 | /* only wakeup the requested number of kthreads */ | |
633 | wake_up_nr(&ch->idle_wq, wakeup); | |
634 | } | |
635 | ||
2c2b94f9 | 636 | if (needed <= 0) |
89eb8eb9 | 637 | return; |
89eb8eb9 DN |
638 | |
639 | if (needed + assigned > ch->kthreads_assigned_limit) { | |
640 | needed = ch->kthreads_assigned_limit - assigned; | |
2c2b94f9 | 641 | if (needed <= 0) |
89eb8eb9 | 642 | return; |
89eb8eb9 DN |
643 | } |
644 | ||
645 | dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", | |
646 | needed, ch->partid, ch->number); | |
647 | ||
a460ef8d | 648 | xpc_create_kthreads(ch, needed, 0); |
89eb8eb9 DN |
649 | } |
650 | ||
89eb8eb9 DN |
651 | /* |
652 | * This function is where XPC's kthreads wait for messages to deliver. | |
653 | */ | |
654 | static void | |
655 | xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | |
656 | { | |
a7665b0a RH |
657 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = |
658 | xpc_arch_ops.n_of_deliverable_payloads; | |
659 | ||
89eb8eb9 DN |
660 | do { |
661 | /* deliver messages to their intended recipients */ | |
662 | ||
a7665b0a | 663 | while (n_of_deliverable_payloads(ch) > 0 && |
2c2b94f9 | 664 | !(ch->flags & XPC_C_DISCONNECTING)) { |
bd3e64c1 | 665 | xpc_deliver_payload(ch); |
89eb8eb9 DN |
666 | } |
667 | ||
668 | if (atomic_inc_return(&ch->kthreads_idle) > | |
35190506 | 669 | ch->kthreads_idle_limit) { |
89eb8eb9 DN |
670 | /* too many idle kthreads on this channel */ |
671 | atomic_dec(&ch->kthreads_idle); | |
672 | break; | |
673 | } | |
674 | ||
675 | dev_dbg(xpc_chan, "idle kthread calling " | |
676 | "wait_event_interruptible_exclusive()\n"); | |
677 | ||
35190506 | 678 | (void)wait_event_interruptible_exclusive(ch->idle_wq, |
a7665b0a | 679 | (n_of_deliverable_payloads(ch) > 0 || |
2c2b94f9 | 680 | (ch->flags & XPC_C_DISCONNECTING))); |
89eb8eb9 DN |
681 | |
682 | atomic_dec(&ch->kthreads_idle); | |
683 | ||
2c2b94f9 | 684 | } while (!(ch->flags & XPC_C_DISCONNECTING)); |
89eb8eb9 DN |
685 | } |
686 | ||
89eb8eb9 | 687 | static int |
2c2b94f9 | 688 | xpc_kthread_start(void *args) |
89eb8eb9 | 689 | { |
64d032ba | 690 | short partid = XPC_UNPACK_ARG1(args); |
89eb8eb9 DN |
691 | u16 ch_number = XPC_UNPACK_ARG2(args); |
692 | struct xpc_partition *part = &xpc_partitions[partid]; | |
693 | struct xpc_channel *ch; | |
694 | int n_needed; | |
e54af724 | 695 | unsigned long irq_flags; |
a7665b0a RH |
696 | int (*n_of_deliverable_payloads) (struct xpc_channel *) = |
697 | xpc_arch_ops.n_of_deliverable_payloads; | |
89eb8eb9 | 698 | |
89eb8eb9 DN |
699 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", |
700 | partid, ch_number); | |
701 | ||
702 | ch = &part->channels[ch_number]; | |
703 | ||
704 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | |
89eb8eb9 DN |
705 | |
706 | /* let registerer know that connection has been established */ | |
707 | ||
e54af724 | 708 | spin_lock_irqsave(&ch->lock, irq_flags); |
4c2cd966 DN |
709 | if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { |
710 | ch->flags |= XPC_C_CONNECTEDCALLOUT; | |
e54af724 DN |
711 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
712 | ||
89eb8eb9 DN |
713 | xpc_connected_callout(ch); |
714 | ||
4c2cd966 DN |
715 | spin_lock_irqsave(&ch->lock, irq_flags); |
716 | ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; | |
717 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
718 | ||
89eb8eb9 DN |
719 | /* |
720 | * It is possible that while the callout was being | |
721 | * made that the remote partition sent some messages. | |
722 | * If that is the case, we may need to activate | |
723 | * additional kthreads to help deliver them. We only | |
724 | * need one less than total #of messages to deliver. | |
725 | */ | |
a7665b0a | 726 | n_needed = n_of_deliverable_payloads(ch) - 1; |
2c2b94f9 | 727 | if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) |
89eb8eb9 | 728 | xpc_activate_kthreads(ch, n_needed); |
2c2b94f9 | 729 | |
e54af724 DN |
730 | } else { |
731 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
89eb8eb9 DN |
732 | } |
733 | ||
734 | xpc_kthread_waitmsgs(part, ch); | |
735 | } | |
736 | ||
a460ef8d | 737 | /* let registerer know that connection is disconnecting */ |
e54af724 | 738 | |
a460ef8d DN |
739 | spin_lock_irqsave(&ch->lock, irq_flags); |
740 | if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | |
35190506 | 741 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
a460ef8d | 742 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; |
4c2cd966 | 743 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
a460ef8d | 744 | |
65c17b80 | 745 | xpc_disconnect_callout(ch, xpDisconnecting); |
a460ef8d DN |
746 | |
747 | spin_lock_irqsave(&ch->lock, irq_flags); | |
748 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; | |
749 | } | |
750 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
751 | ||
a47d5dac DN |
752 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
753 | atomic_dec_return(&part->nchannels_engaged) == 0) { | |
a7665b0a | 754 | xpc_arch_ops.indicate_partition_disengaged(part); |
89eb8eb9 DN |
755 | } |
756 | ||
89eb8eb9 DN |
757 | xpc_msgqueue_deref(ch); |
758 | ||
759 | dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", | |
760 | partid, ch_number); | |
761 | ||
762 | xpc_part_deref(part); | |
763 | return 0; | |
764 | } | |
765 | ||
89eb8eb9 DN |
766 | /* |
767 | * For each partition that XPC has established communications with, there is | |
768 | * a minimum of one kernel thread assigned to perform any operation that | |
769 | * may potentially sleep or block (basically the callouts to the asynchronous | |
770 | * functions registered via xpc_connect()). | |
771 | * | |
772 | * Additional kthreads are created and destroyed by XPC as the workload | |
773 | * demands. | |
774 | * | |
775 | * A kthread is assigned to one of the active channels that exists for a given | |
776 | * partition. | |
777 | */ | |
778 | void | |
a460ef8d | 779 | xpc_create_kthreads(struct xpc_channel *ch, int needed, |
35190506 | 780 | int ignore_disconnecting) |
89eb8eb9 DN |
781 | { |
782 | unsigned long irq_flags; | |
89eb8eb9 | 783 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
a607c389 | 784 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
2c2b94f9 | 785 | struct task_struct *kthread; |
a7665b0a RH |
786 | void (*indicate_partition_disengaged) (struct xpc_partition *) = |
787 | xpc_arch_ops.indicate_partition_disengaged; | |
89eb8eb9 | 788 | |
89eb8eb9 | 789 | while (needed-- > 0) { |
e54af724 DN |
790 | |
791 | /* | |
792 | * The following is done on behalf of the newly created | |
793 | * kthread. That kthread is responsible for doing the | |
794 | * counterpart to the following before it exits. | |
795 | */ | |
a460ef8d DN |
796 | if (ignore_disconnecting) { |
797 | if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { | |
798 | /* kthreads assigned had gone to zero */ | |
799 | BUG_ON(!(ch->flags & | |
35190506 | 800 | XPC_C_DISCONNECTINGCALLOUT_MADE)); |
a460ef8d DN |
801 | break; |
802 | } | |
803 | ||
804 | } else if (ch->flags & XPC_C_DISCONNECTING) { | |
805 | break; | |
806 | ||
a47d5dac DN |
807 | } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && |
808 | atomic_inc_return(&part->nchannels_engaged) == 1) { | |
a7665b0a | 809 | xpc_arch_ops.indicate_partition_engaged(part); |
a460ef8d | 810 | } |
35190506 | 811 | (void)xpc_part_ref(part); |
e54af724 | 812 | xpc_msgqueue_ref(ch); |
e54af724 | 813 | |
2c2b94f9 DN |
814 | kthread = kthread_run(xpc_kthread_start, (void *)args, |
815 | "xpc%02dc%d", ch->partid, ch->number); | |
816 | if (IS_ERR(kthread)) { | |
89eb8eb9 | 817 | /* the fork failed */ |
a460ef8d DN |
818 | |
819 | /* | |
820 | * NOTE: if (ignore_disconnecting && | |
821 | * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, | |
822 | * then we'll deadlock if all other kthreads assigned | |
823 | * to this channel are blocked in the channel's | |
824 | * registerer, because the only thing that will unblock | |
65c17b80 | 825 | * them is the xpDisconnecting callout that this |
2c2b94f9 | 826 | * failed kthread_run() would have made. |
a460ef8d DN |
827 | */ |
828 | ||
e54af724 DN |
829 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
830 | atomic_dec_return(&part->nchannels_engaged) == 0) { | |
a7665b0a | 831 | indicate_partition_disengaged(part); |
e54af724 DN |
832 | } |
833 | xpc_msgqueue_deref(ch); | |
834 | xpc_part_deref(part); | |
89eb8eb9 DN |
835 | |
836 | if (atomic_read(&ch->kthreads_assigned) < | |
35190506 | 837 | ch->kthreads_idle_limit) { |
89eb8eb9 DN |
838 | /* |
839 | * Flag this as an error only if we have an | |
840 | * insufficient #of kthreads for the channel | |
841 | * to function. | |
89eb8eb9 DN |
842 | */ |
843 | spin_lock_irqsave(&ch->lock, irq_flags); | |
65c17b80 | 844 | XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, |
35190506 | 845 | &irq_flags); |
89eb8eb9 DN |
846 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
847 | } | |
848 | break; | |
849 | } | |
89eb8eb9 DN |
850 | } |
851 | } | |
852 | ||
89eb8eb9 DN |
853 | void |
854 | xpc_disconnect_wait(int ch_number) | |
855 | { | |
a607c389 | 856 | unsigned long irq_flags; |
64d032ba | 857 | short partid; |
89eb8eb9 DN |
858 | struct xpc_partition *part; |
859 | struct xpc_channel *ch; | |
e54af724 | 860 | int wakeup_channel_mgr; |
89eb8eb9 | 861 | |
89eb8eb9 | 862 | /* now wait for all callouts to the caller's function to cease */ |
bc63d387 | 863 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
89eb8eb9 DN |
864 | part = &xpc_partitions[partid]; |
865 | ||
2c2b94f9 | 866 | if (!xpc_part_ref(part)) |
e54af724 | 867 | continue; |
89eb8eb9 | 868 | |
e54af724 | 869 | ch = &part->channels[ch_number]; |
89eb8eb9 | 870 | |
e54af724 | 871 | if (!(ch->flags & XPC_C_WDISCONNECT)) { |
89eb8eb9 | 872 | xpc_part_deref(part); |
e54af724 | 873 | continue; |
89eb8eb9 | 874 | } |
e54af724 | 875 | |
f9e505a9 | 876 | wait_for_completion(&ch->wdisconnect_wait); |
e54af724 DN |
877 | |
878 | spin_lock_irqsave(&ch->lock, irq_flags); | |
879 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); | |
880 | wakeup_channel_mgr = 0; | |
881 | ||
7fb5e59d | 882 | if (ch->delayed_chctl_flags) { |
83469b55 | 883 | if (part->act_state != XPC_P_AS_DEACTIVATING) { |
7fb5e59d DN |
884 | spin_lock(&part->chctl_lock); |
885 | part->chctl.flags[ch->number] |= | |
886 | ch->delayed_chctl_flags; | |
887 | spin_unlock(&part->chctl_lock); | |
e54af724 DN |
888 | wakeup_channel_mgr = 1; |
889 | } | |
7fb5e59d | 890 | ch->delayed_chctl_flags = 0; |
89eb8eb9 | 891 | } |
e54af724 DN |
892 | |
893 | ch->flags &= ~XPC_C_WDISCONNECT; | |
894 | spin_unlock_irqrestore(&ch->lock, irq_flags); | |
895 | ||
2c2b94f9 | 896 | if (wakeup_channel_mgr) |
e54af724 | 897 | xpc_wakeup_channel_mgr(part); |
e54af724 DN |
898 | |
899 | xpc_part_deref(part); | |
89eb8eb9 DN |
900 | } |
901 | } | |
902 | ||
5b8669df DN |
903 | static int |
904 | xpc_setup_partitions(void) | |
905 | { | |
906 | short partid; | |
907 | struct xpc_partition *part; | |
908 | ||
909 | xpc_partitions = kzalloc(sizeof(struct xpc_partition) * | |
910 | xp_max_npartitions, GFP_KERNEL); | |
911 | if (xpc_partitions == NULL) { | |
912 | dev_err(xpc_part, "can't get memory for partition structure\n"); | |
913 | return -ENOMEM; | |
914 | } | |
915 | ||
916 | /* | |
917 | * The first few fields of each entry of xpc_partitions[] need to | |
918 | * be initialized now so that calls to xpc_connect() and | |
919 | * xpc_disconnect() can be made prior to the activation of any remote | |
920 | * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE | |
921 | * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING | |
922 | * PARTITION HAS BEEN ACTIVATED. | |
923 | */ | |
924 | for (partid = 0; partid < xp_max_npartitions; partid++) { | |
925 | part = &xpc_partitions[partid]; | |
926 | ||
927 | DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); | |
928 | ||
929 | part->activate_IRQ_rcvd = 0; | |
930 | spin_lock_init(&part->act_lock); | |
931 | part->act_state = XPC_P_AS_INACTIVE; | |
932 | XPC_SET_REASON(part, 0, 0); | |
933 | ||
934 | init_timer(&part->disengage_timer); | |
935 | part->disengage_timer.function = | |
936 | xpc_timeout_partition_disengage; | |
937 | part->disengage_timer.data = (unsigned long)part; | |
938 | ||
939 | part->setup_state = XPC_P_SS_UNSET; | |
940 | init_waitqueue_head(&part->teardown_wq); | |
941 | atomic_set(&part->references, 0); | |
942 | } | |
943 | ||
a7665b0a | 944 | return xpc_arch_ops.setup_partitions(); |
5b8669df DN |
945 | } |
946 | ||
947 | static void | |
948 | xpc_teardown_partitions(void) | |
949 | { | |
a7665b0a | 950 | xpc_arch_ops.teardown_partitions(); |
5b8669df DN |
951 | kfree(xpc_partitions); |
952 | } | |
953 | ||
89eb8eb9 | 954 | static void |
65c17b80 | 955 | xpc_do_exit(enum xp_retval reason) |
89eb8eb9 | 956 | { |
64d032ba | 957 | short partid; |
1ecaded8 | 958 | int active_part_count, printed_waiting_msg = 0; |
89eb8eb9 | 959 | struct xpc_partition *part; |
a47d5dac | 960 | unsigned long printmsg_time, disengage_timeout = 0; |
89eb8eb9 | 961 | |
a607c389 DN |
962 | /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ |
963 | DBUG_ON(xpc_exiting == 1); | |
89eb8eb9 DN |
964 | |
965 | /* | |
a607c389 DN |
966 | * Let the heartbeat checker thread and the discovery thread |
967 | * (if one is running) know that they should exit. Also wake up | |
968 | * the heartbeat checker thread in case it's sleeping. | |
89eb8eb9 DN |
969 | */ |
970 | xpc_exiting = 1; | |
6e41017a | 971 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
89eb8eb9 | 972 | |
e54af724 | 973 | /* wait for the discovery thread to exit */ |
f9e505a9 | 974 | wait_for_completion(&xpc_discovery_exited); |
89eb8eb9 | 975 | |
e54af724 | 976 | /* wait for the heartbeat checker thread to exit */ |
f9e505a9 | 977 | wait_for_completion(&xpc_hb_checker_exited); |
89eb8eb9 | 978 | |
a607c389 | 979 | /* sleep for a 1/3 of a second or so */ |
35190506 | 980 | (void)msleep_interruptible(300); |
89eb8eb9 DN |
981 | |
982 | /* wait for all partitions to become inactive */ | |
983 | ||
a47d5dac DN |
984 | printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); |
985 | xpc_disengage_timedout = 0; | |
a607c389 | 986 | |
89eb8eb9 DN |
987 | do { |
988 | active_part_count = 0; | |
989 | ||
bc63d387 | 990 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
89eb8eb9 | 991 | part = &xpc_partitions[partid]; |
89eb8eb9 | 992 | |
a607c389 | 993 | if (xpc_partition_disengaged(part) && |
83469b55 | 994 | part->act_state == XPC_P_AS_INACTIVE) { |
a607c389 | 995 | continue; |
89eb8eb9 | 996 | } |
a607c389 DN |
997 | |
998 | active_part_count++; | |
999 | ||
1000 | XPC_DEACTIVATE_PARTITION(part, reason); | |
89eb8eb9 | 1001 | |
a47d5dac DN |
1002 | if (part->disengage_timeout > disengage_timeout) |
1003 | disengage_timeout = part->disengage_timeout; | |
a607c389 | 1004 | } |
89eb8eb9 | 1005 | |
a7665b0a | 1006 | if (xpc_arch_ops.any_partition_engaged()) { |
aaa3cd69 | 1007 | if (time_is_before_jiffies(printmsg_time)) { |
1ecaded8 | 1008 | dev_info(xpc_part, "waiting for remote " |
a47d5dac DN |
1009 | "partitions to deactivate, timeout in " |
1010 | "%ld seconds\n", (disengage_timeout - | |
1011 | jiffies) / HZ); | |
1ecaded8 | 1012 | printmsg_time = jiffies + |
a47d5dac | 1013 | (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ); |
1ecaded8 DN |
1014 | printed_waiting_msg = 1; |
1015 | } | |
1016 | ||
1017 | } else if (active_part_count > 0) { | |
1018 | if (printed_waiting_msg) { | |
1019 | dev_info(xpc_part, "waiting for local partition" | |
a47d5dac | 1020 | " to deactivate\n"); |
1ecaded8 DN |
1021 | printed_waiting_msg = 0; |
1022 | } | |
1023 | ||
1024 | } else { | |
a47d5dac | 1025 | if (!xpc_disengage_timedout) { |
1ecaded8 | 1026 | dev_info(xpc_part, "all partitions have " |
a47d5dac | 1027 | "deactivated\n"); |
1ecaded8 DN |
1028 | } |
1029 | break; | |
89eb8eb9 DN |
1030 | } |
1031 | ||
a607c389 | 1032 | /* sleep for a 1/3 of a second or so */ |
35190506 | 1033 | (void)msleep_interruptible(300); |
a607c389 DN |
1034 | |
1035 | } while (1); | |
1036 | ||
a7665b0a | 1037 | DBUG_ON(xpc_arch_ops.any_partition_engaged()); |
a607c389 | 1038 | |
5b8669df | 1039 | xpc_teardown_rsvd_page(); |
a607c389 | 1040 | |
65c17b80 | 1041 | if (reason == xpUnloading) { |
35190506 | 1042 | (void)unregister_die_notifier(&xpc_die_notifier); |
bc63d387 | 1043 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
0752c670 | 1044 | } |
780d09e8 | 1045 | |
89eb8eb9 DN |
1046 | /* clear the interface to XPC's functions */ |
1047 | xpc_clear_interface(); | |
1048 | ||
2c2b94f9 | 1049 | if (xpc_sysctl) |
89eb8eb9 | 1050 | unregister_sysctl_table(xpc_sysctl); |
7682a4c6 | 1051 | |
5b8669df | 1052 | xpc_teardown_partitions(); |
6e41017a DN |
1053 | |
1054 | if (is_shub()) | |
1055 | xpc_exit_sn2(); | |
b7f7b074 | 1056 | else if (is_uv()) |
6e41017a | 1057 | xpc_exit_uv(); |
89eb8eb9 DN |
1058 | } |
1059 | ||
780d09e8 | 1060 | /* |
d6ad033a DN |
1061 | * This function is called when the system is being rebooted. |
1062 | */ | |
1063 | static int | |
1064 | xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) | |
1065 | { | |
65c17b80 | 1066 | enum xp_retval reason; |
d6ad033a | 1067 | |
d6ad033a DN |
1068 | switch (event) { |
1069 | case SYS_RESTART: | |
65c17b80 | 1070 | reason = xpSystemReboot; |
d6ad033a DN |
1071 | break; |
1072 | case SYS_HALT: | |
65c17b80 | 1073 | reason = xpSystemHalt; |
d6ad033a DN |
1074 | break; |
1075 | case SYS_POWER_OFF: | |
65c17b80 | 1076 | reason = xpSystemPoweroff; |
d6ad033a DN |
1077 | break; |
1078 | default: | |
65c17b80 | 1079 | reason = xpSystemGoingDown; |
d6ad033a DN |
1080 | } |
1081 | ||
1082 | xpc_do_exit(reason); | |
1083 | return NOTIFY_DONE; | |
1084 | } | |
1085 | ||
891348ca RH |
1086 | /* Used to only allow one cpu to complete disconnect */ |
1087 | static unsigned int xpc_die_disconnecting; | |
1088 | ||
d6ad033a | 1089 | /* |
a47d5dac DN |
1090 | * Notify other partitions to deactivate from us by first disengaging from all |
1091 | * references to our memory. | |
780d09e8 DN |
1092 | */ |
1093 | static void | |
a47d5dac | 1094 | xpc_die_deactivate(void) |
780d09e8 DN |
1095 | { |
1096 | struct xpc_partition *part; | |
64d032ba | 1097 | short partid; |
a47d5dac | 1098 | int any_engaged; |
261f3b49 DN |
1099 | long keep_waiting; |
1100 | long wait_to_print; | |
780d09e8 | 1101 | |
891348ca RH |
1102 | if (cmpxchg(&xpc_die_disconnecting, 0, 1)) |
1103 | return; | |
1104 | ||
780d09e8 DN |
1105 | /* keep xpc_hb_checker thread from doing anything (just in case) */ |
1106 | xpc_exiting = 1; | |
1107 | ||
a7665b0a | 1108 | xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */ |
780d09e8 | 1109 | |
bc63d387 | 1110 | for (partid = 0; partid < xp_max_npartitions; partid++) { |
780d09e8 DN |
1111 | part = &xpc_partitions[partid]; |
1112 | ||
a7665b0a | 1113 | if (xpc_arch_ops.partition_engaged(partid) || |
83469b55 | 1114 | part->act_state != XPC_P_AS_INACTIVE) { |
a7665b0a RH |
1115 | xpc_arch_ops.request_partition_deactivation(part); |
1116 | xpc_arch_ops.indicate_partition_disengaged(part); | |
780d09e8 DN |
1117 | } |
1118 | } | |
1119 | ||
a47d5dac DN |
1120 | /* |
1121 | * Though we requested that all other partitions deactivate from us, | |
261f3b49 DN |
1122 | * we only wait until they've all disengaged or we've reached the |
1123 | * defined timelimit. | |
1124 | * | |
1125 | * Given that one iteration through the following while-loop takes | |
1126 | * approximately 200 microseconds, calculate the #of loops to take | |
1127 | * before bailing and the #of loops before printing a waiting message. | |
a47d5dac | 1128 | */ |
261f3b49 DN |
1129 | keep_waiting = xpc_disengage_timelimit * 1000 * 5; |
1130 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; | |
780d09e8 | 1131 | |
1ecaded8 | 1132 | while (1) { |
a7665b0a | 1133 | any_engaged = xpc_arch_ops.any_partition_engaged(); |
a47d5dac DN |
1134 | if (!any_engaged) { |
1135 | dev_info(xpc_part, "all partitions have deactivated\n"); | |
1ecaded8 DN |
1136 | break; |
1137 | } | |
780d09e8 | 1138 | |
261f3b49 | 1139 | if (!keep_waiting--) { |
bc63d387 DN |
1140 | for (partid = 0; partid < xp_max_npartitions; |
1141 | partid++) { | |
a7665b0a | 1142 | if (xpc_arch_ops.partition_engaged(partid)) { |
a47d5dac | 1143 | dev_info(xpc_part, "deactivate from " |
35190506 DN |
1144 | "remote partition %d timed " |
1145 | "out\n", partid); | |
1ecaded8 DN |
1146 | } |
1147 | } | |
1148 | break; | |
1149 | } | |
1150 | ||
261f3b49 | 1151 | if (!wait_to_print--) { |
780d09e8 | 1152 | dev_info(xpc_part, "waiting for remote partitions to " |
a47d5dac | 1153 | "deactivate, timeout in %ld seconds\n", |
261f3b49 DN |
1154 | keep_waiting / (1000 * 5)); |
1155 | wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * | |
1156 | 1000 * 5; | |
780d09e8 | 1157 | } |
261f3b49 DN |
1158 | |
1159 | udelay(200); | |
780d09e8 | 1160 | } |
780d09e8 DN |
1161 | } |
1162 | ||
780d09e8 | 1163 | /* |
1f4674b2 DN |
1164 | * This function is called when the system is being restarted or halted due |
1165 | * to some sort of system failure. If this is the case we need to notify the | |
1166 | * other partitions to disengage from all references to our memory. | |
1167 | * This function can also be called when our heartbeater could be offlined | |
1168 | * for a time. In this case we need to notify other partitions to not worry | |
1169 | * about the lack of a heartbeat. | |
780d09e8 DN |
1170 | */ |
1171 | static int | |
891348ca | 1172 | xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) |
780d09e8 | 1173 | { |
261f3b49 | 1174 | #ifdef CONFIG_IA64 /* !!! temporary kludge */ |
780d09e8 DN |
1175 | switch (event) { |
1176 | case DIE_MACHINE_RESTART: | |
1177 | case DIE_MACHINE_HALT: | |
a47d5dac | 1178 | xpc_die_deactivate(); |
780d09e8 | 1179 | break; |
1f4674b2 DN |
1180 | |
1181 | case DIE_KDEBUG_ENTER: | |
1182 | /* Should lack of heartbeat be ignored by other partitions? */ | |
2c2b94f9 | 1183 | if (!xpc_kdebug_ignore) |
1f4674b2 | 1184 | break; |
2c2b94f9 | 1185 | |
1f4674b2 | 1186 | /* fall through */ |
780d09e8 DN |
1187 | case DIE_MCA_MONARCH_ENTER: |
1188 | case DIE_INIT_MONARCH_ENTER: | |
a7665b0a | 1189 | xpc_arch_ops.offline_heartbeat(); |
780d09e8 | 1190 | break; |
1f4674b2 DN |
1191 | |
1192 | case DIE_KDEBUG_LEAVE: | |
1193 | /* Is lack of heartbeat being ignored by other partitions? */ | |
2c2b94f9 | 1194 | if (!xpc_kdebug_ignore) |
1f4674b2 | 1195 | break; |
2c2b94f9 | 1196 | |
1f4674b2 | 1197 | /* fall through */ |
780d09e8 DN |
1198 | case DIE_MCA_MONARCH_LEAVE: |
1199 | case DIE_INIT_MONARCH_LEAVE: | |
a7665b0a | 1200 | xpc_arch_ops.online_heartbeat(); |
780d09e8 DN |
1201 | break; |
1202 | } | |
261f3b49 | 1203 | #else |
891348ca RH |
1204 | struct die_args *die_args = _die_args; |
1205 | ||
1206 | switch (event) { | |
1207 | case DIE_TRAP: | |
1208 | if (die_args->trapnr == X86_TRAP_DF) | |
1209 | xpc_die_deactivate(); | |
1210 | ||
1211 | if (((die_args->trapnr == X86_TRAP_MF) || | |
1212 | (die_args->trapnr == X86_TRAP_XF)) && | |
f39b6f0e | 1213 | !user_mode(die_args->regs)) |
891348ca RH |
1214 | xpc_die_deactivate(); |
1215 | ||
1216 | break; | |
1217 | case DIE_INT3: | |
1218 | case DIE_DEBUG: | |
1219 | break; | |
1220 | case DIE_OOPS: | |
1221 | case DIE_GPF: | |
1222 | default: | |
1223 | xpc_die_deactivate(); | |
1224 | } | |
261f3b49 | 1225 | #endif |
780d09e8 DN |
1226 | |
1227 | return NOTIFY_DONE; | |
1228 | } | |
1229 | ||
89eb8eb9 DN |
1230 | int __init |
1231 | xpc_init(void) | |
1232 | { | |
1233 | int ret; | |
2c2b94f9 | 1234 | struct task_struct *kthread; |
ee6665e3 | 1235 | |
bb0dc43e KS |
1236 | dev_set_name(xpc_part, "part"); |
1237 | dev_set_name(xpc_chan, "chan"); | |
89eb8eb9 | 1238 | |
94bd2708 DN |
1239 | if (is_shub()) { |
1240 | /* | |
1241 | * The ia64-sn2 architecture supports at most 64 partitions. | |
c39838ce | 1242 | * And the inability to unregister remote amos restricts us |
94bd2708 DN |
1243 | * further to only support exactly 64 partitions on this |
1244 | * architecture, no less. | |
1245 | */ | |
5b8669df DN |
1246 | if (xp_max_npartitions != 64) { |
1247 | dev_err(xpc_part, "max #of partitions not set to 64\n"); | |
1248 | ret = -EINVAL; | |
1249 | } else { | |
1250 | ret = xpc_init_sn2(); | |
1251 | } | |
94bd2708 DN |
1252 | |
1253 | } else if (is_uv()) { | |
5b8669df | 1254 | ret = xpc_init_uv(); |
94bd2708 DN |
1255 | |
1256 | } else { | |
5b8669df | 1257 | ret = -ENODEV; |
94bd2708 | 1258 | } |
408865ce | 1259 | |
5b8669df DN |
1260 | if (ret != 0) |
1261 | return ret; | |
1262 | ||
1263 | ret = xpc_setup_partitions(); | |
1264 | if (ret != 0) { | |
bc63d387 | 1265 | dev_err(xpc_part, "can't get memory for partition structure\n"); |
ee6665e3 | 1266 | goto out_1; |
bc63d387 | 1267 | } |
89eb8eb9 | 1268 | |
bc63d387 DN |
1269 | xpc_sysctl = register_sysctl_table(xpc_sys_dir); |
1270 | ||
89eb8eb9 DN |
1271 | /* |
1272 | * Fill the partition reserved page with the information needed by | |
1273 | * other partitions to discover we are alive and establish initial | |
1274 | * communications. | |
1275 | */ | |
5b8669df DN |
1276 | ret = xpc_setup_rsvd_page(); |
1277 | if (ret != 0) { | |
bc63d387 | 1278 | dev_err(xpc_part, "can't setup our reserved page\n"); |
ee6665e3 | 1279 | goto out_2; |
89eb8eb9 DN |
1280 | } |
1281 | ||
a607c389 DN |
1282 | /* add ourselves to the reboot_notifier_list */ |
1283 | ret = register_reboot_notifier(&xpc_reboot_notifier); | |
2c2b94f9 | 1284 | if (ret != 0) |
a607c389 | 1285 | dev_warn(xpc_part, "can't register reboot notifier\n"); |
a607c389 | 1286 | |
1eeb66a1 | 1287 | /* add ourselves to the die_notifier list */ |
780d09e8 | 1288 | ret = register_die_notifier(&xpc_die_notifier); |
2c2b94f9 | 1289 | if (ret != 0) |
780d09e8 | 1290 | dev_warn(xpc_part, "can't register die notifier\n"); |
780d09e8 | 1291 | |
89eb8eb9 DN |
1292 | /* |
1293 | * The real work-horse behind xpc. This processes incoming | |
1294 | * interrupts and monitors remote heartbeats. | |
1295 | */ | |
2c2b94f9 DN |
1296 | kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); |
1297 | if (IS_ERR(kthread)) { | |
89eb8eb9 | 1298 | dev_err(xpc_part, "failed while forking hb check thread\n"); |
bc63d387 | 1299 | ret = -EBUSY; |
ee6665e3 | 1300 | goto out_3; |
89eb8eb9 DN |
1301 | } |
1302 | ||
89eb8eb9 DN |
1303 | /* |
1304 | * Startup a thread that will attempt to discover other partitions to | |
1305 | * activate based on info provided by SAL. This new thread is short | |
1306 | * lived and will exit once discovery is complete. | |
1307 | */ | |
2c2b94f9 DN |
1308 | kthread = kthread_run(xpc_initiate_discovery, NULL, |
1309 | XPC_DISCOVERY_THREAD_NAME); | |
1310 | if (IS_ERR(kthread)) { | |
89eb8eb9 DN |
1311 | dev_err(xpc_part, "failed while forking discovery thread\n"); |
1312 | ||
1313 | /* mark this new thread as a non-starter */ | |
f9e505a9 | 1314 | complete(&xpc_discovery_exited); |
89eb8eb9 | 1315 | |
65c17b80 | 1316 | xpc_do_exit(xpUnloading); |
89eb8eb9 DN |
1317 | return -EBUSY; |
1318 | } | |
1319 | ||
89eb8eb9 DN |
1320 | /* set the interface to point at XPC's functions */ |
1321 | xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, | |
97bf1aa1 DN |
1322 | xpc_initiate_send, xpc_initiate_send_notify, |
1323 | xpc_initiate_received, xpc_initiate_partid_to_nasids); | |
89eb8eb9 DN |
1324 | |
1325 | return 0; | |
bc63d387 DN |
1326 | |
1327 | /* initialization was not successful */ | |
ee6665e3 | 1328 | out_3: |
5b8669df | 1329 | xpc_teardown_rsvd_page(); |
94bd2708 | 1330 | |
bc63d387 DN |
1331 | (void)unregister_die_notifier(&xpc_die_notifier); |
1332 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | |
ee6665e3 | 1333 | out_2: |
bc63d387 DN |
1334 | if (xpc_sysctl) |
1335 | unregister_sysctl_table(xpc_sysctl); | |
5b8669df DN |
1336 | |
1337 | xpc_teardown_partitions(); | |
6e41017a DN |
1338 | out_1: |
1339 | if (is_shub()) | |
1340 | xpc_exit_sn2(); | |
b7f7b074 | 1341 | else if (is_uv()) |
6e41017a | 1342 | xpc_exit_uv(); |
bc63d387 | 1343 | return ret; |
89eb8eb9 | 1344 | } |
89eb8eb9 | 1345 | |
35190506 | 1346 | module_init(xpc_init); |
89eb8eb9 DN |
1347 | |
1348 | void __exit | |
1349 | xpc_exit(void) | |
1350 | { | |
65c17b80 | 1351 | xpc_do_exit(xpUnloading); |
89eb8eb9 | 1352 | } |
89eb8eb9 | 1353 | |
35190506 | 1354 | module_exit(xpc_exit); |
89eb8eb9 DN |
1355 | |
1356 | MODULE_AUTHOR("Silicon Graphics, Inc."); | |
1357 | MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); | |
1358 | MODULE_LICENSE("GPL"); | |
1359 | ||
1360 | module_param(xpc_hb_interval, int, 0); | |
1361 | MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " | |
35190506 | 1362 | "heartbeat increments."); |
89eb8eb9 DN |
1363 | |
1364 | module_param(xpc_hb_check_interval, int, 0); | |
1365 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " | |
35190506 | 1366 | "heartbeat checks."); |
89eb8eb9 | 1367 | |
a47d5dac DN |
1368 | module_param(xpc_disengage_timelimit, int, 0); |
1369 | MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait " | |
1370 | "for disengage to complete."); | |
e54af724 | 1371 | |
1f4674b2 DN |
1372 | module_param(xpc_kdebug_ignore, int, 0); |
1373 | MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " | |
35190506 | 1374 | "other partitions when dropping into kdebug."); |