8f1cec49953232d4a9ae222ec394ec8d197b16d5
[deliverable/linux.git] / drivers / s390 / cio / cio.c
1 /*
2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls
4 *
5 * Copyright IBM Corp. 1999,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 */
11
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/device.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/interrupt.h>
18 #include <asm/cio.h>
19 #include <asm/delay.h>
20 #include <asm/irq.h>
21 #include <asm/irq_regs.h>
22 #include <asm/setup.h>
23 #include <asm/reset.h>
24 #include <asm/ipl.h>
25 #include <asm/chpid.h>
26 #include <asm/airq.h>
27 #include <asm/isc.h>
28 #include <asm/cpu.h>
29 #include <asm/fcx.h>
30 #include "cio.h"
31 #include "css.h"
32 #include "chsc.h"
33 #include "ioasm.h"
34 #include "io_sch.h"
35 #include "blacklist.h"
36 #include "cio_debug.h"
37 #include "chp.h"
38 #include "../s390mach.h"
39
40 debug_info_t *cio_debug_msg_id;
41 debug_info_t *cio_debug_trace_id;
42 debug_info_t *cio_debug_crw_id;
43
44 /*
45 * Function: cio_debug_init
46 * Initializes three debug logs for common I/O:
47 * - cio_msg logs generic cio messages
48 * - cio_trace logs the calling of different functions
49 * - cio_crw logs machine check related cio messages
50 */
51 static int __init cio_debug_init(void)
52 {
53 cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
54 if (!cio_debug_msg_id)
55 goto out_unregister;
56 debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
57 debug_set_level(cio_debug_msg_id, 2);
58 cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
59 if (!cio_debug_trace_id)
60 goto out_unregister;
61 debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
62 debug_set_level(cio_debug_trace_id, 2);
63 cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
64 if (!cio_debug_crw_id)
65 goto out_unregister;
66 debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
67 debug_set_level(cio_debug_crw_id, 4);
68 return 0;
69
70 out_unregister:
71 if (cio_debug_msg_id)
72 debug_unregister(cio_debug_msg_id);
73 if (cio_debug_trace_id)
74 debug_unregister(cio_debug_trace_id);
75 if (cio_debug_crw_id)
76 debug_unregister(cio_debug_crw_id);
77 return -1;
78 }
79
80 arch_initcall (cio_debug_init);
81
82 int
83 cio_set_options (struct subchannel *sch, int flags)
84 {
85 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
86 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
87 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
88 return 0;
89 }
90
91 /* FIXME: who wants to use this? */
92 int
93 cio_get_options (struct subchannel *sch)
94 {
95 int flags;
96
97 flags = 0;
98 if (sch->options.suspend)
99 flags |= DOIO_ALLOW_SUSPEND;
100 if (sch->options.prefetch)
101 flags |= DOIO_DENY_PREFETCH;
102 if (sch->options.inter)
103 flags |= DOIO_SUPPRESS_INTER;
104 return flags;
105 }
106
107 static int
108 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
109 {
110 char dbf_text[15];
111
112 if (lpm != 0)
113 sch->lpm &= ~lpm;
114 else
115 sch->lpm = 0;
116
117 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
118 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
119 sch->schid.sch_no);
120
121 if (cio_update_schib(sch))
122 return -ENODEV;
123
124 sprintf(dbf_text, "no%s", dev_name(&sch->dev));
125 CIO_TRACE_EVENT(0, dbf_text);
126 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
127
128 return (sch->lpm ? -EACCES : -ENODEV);
129 }
130
131 int
132 cio_start_key (struct subchannel *sch, /* subchannel structure */
133 struct ccw1 * cpa, /* logical channel prog addr */
134 __u8 lpm, /* logical path mask */
135 __u8 key) /* storage key */
136 {
137 char dbf_txt[15];
138 int ccode;
139 union orb *orb;
140
141 CIO_TRACE_EVENT(4, "stIO");
142 CIO_TRACE_EVENT(4, dev_name(&sch->dev));
143
144 orb = &to_io_private(sch)->orb;
145 memset(orb, 0, sizeof(union orb));
146 /* sch is always under 2G. */
147 orb->cmd.intparm = (u32)(addr_t)sch;
148 orb->cmd.fmt = 1;
149
150 orb->cmd.pfch = sch->options.prefetch == 0;
151 orb->cmd.spnd = sch->options.suspend;
152 orb->cmd.ssic = sch->options.suspend && sch->options.inter;
153 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
154 #ifdef CONFIG_64BIT
155 /*
156 * for 64 bit we always support 64 bit IDAWs with 4k page size only
157 */
158 orb->cmd.c64 = 1;
159 orb->cmd.i2k = 0;
160 #endif
161 orb->cmd.key = key >> 4;
162 /* issue "Start Subchannel" */
163 orb->cmd.cpa = (__u32) __pa(cpa);
164 ccode = ssch(sch->schid, orb);
165
166 /* process condition code */
167 sprintf(dbf_txt, "ccode:%d", ccode);
168 CIO_TRACE_EVENT(4, dbf_txt);
169
170 switch (ccode) {
171 case 0:
172 /*
173 * initialize device status information
174 */
175 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
176 return 0;
177 case 1: /* status pending */
178 case 2: /* busy */
179 return -EBUSY;
180 case 3: /* device/path not operational */
181 return cio_start_handle_notoper(sch, lpm);
182 default:
183 return ccode;
184 }
185 }
186
187 int
188 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
189 {
190 return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
191 }
192
193 /*
194 * resume suspended I/O operation
195 */
196 int
197 cio_resume (struct subchannel *sch)
198 {
199 char dbf_txt[15];
200 int ccode;
201
202 CIO_TRACE_EVENT (4, "resIO");
203 CIO_TRACE_EVENT(4, dev_name(&sch->dev));
204
205 ccode = rsch (sch->schid);
206
207 sprintf (dbf_txt, "ccode:%d", ccode);
208 CIO_TRACE_EVENT (4, dbf_txt);
209
210 switch (ccode) {
211 case 0:
212 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
213 return 0;
214 case 1:
215 return -EBUSY;
216 case 2:
217 return -EINVAL;
218 default:
219 /*
220 * useless to wait for request completion
221 * as device is no longer operational !
222 */
223 return -ENODEV;
224 }
225 }
226
227 /*
228 * halt I/O operation
229 */
230 int
231 cio_halt(struct subchannel *sch)
232 {
233 char dbf_txt[15];
234 int ccode;
235
236 if (!sch)
237 return -ENODEV;
238
239 CIO_TRACE_EVENT (2, "haltIO");
240 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
241
242 /*
243 * Issue "Halt subchannel" and process condition code
244 */
245 ccode = hsch (sch->schid);
246
247 sprintf (dbf_txt, "ccode:%d", ccode);
248 CIO_TRACE_EVENT (2, dbf_txt);
249
250 switch (ccode) {
251 case 0:
252 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
253 return 0;
254 case 1: /* status pending */
255 case 2: /* busy */
256 return -EBUSY;
257 default: /* device not operational */
258 return -ENODEV;
259 }
260 }
261
262 /*
263 * Clear I/O operation
264 */
265 int
266 cio_clear(struct subchannel *sch)
267 {
268 char dbf_txt[15];
269 int ccode;
270
271 if (!sch)
272 return -ENODEV;
273
274 CIO_TRACE_EVENT (2, "clearIO");
275 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
276
277 /*
278 * Issue "Clear subchannel" and process condition code
279 */
280 ccode = csch (sch->schid);
281
282 sprintf (dbf_txt, "ccode:%d", ccode);
283 CIO_TRACE_EVENT (2, dbf_txt);
284
285 switch (ccode) {
286 case 0:
287 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
288 return 0;
289 default: /* device not operational */
290 return -ENODEV;
291 }
292 }
293
294 /*
295 * Function: cio_cancel
296 * Issues a "Cancel Subchannel" on the specified subchannel
297 * Note: We don't need any fancy intparms and flags here
298 * since xsch is executed synchronously.
299 * Only for common I/O internal use as for now.
300 */
301 int
302 cio_cancel (struct subchannel *sch)
303 {
304 char dbf_txt[15];
305 int ccode;
306
307 if (!sch)
308 return -ENODEV;
309
310 CIO_TRACE_EVENT (2, "cancelIO");
311 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
312
313 ccode = xsch (sch->schid);
314
315 sprintf (dbf_txt, "ccode:%d", ccode);
316 CIO_TRACE_EVENT (2, dbf_txt);
317
318 switch (ccode) {
319 case 0: /* success */
320 /* Update information in scsw. */
321 if (cio_update_schib(sch))
322 return -ENODEV;
323 return 0;
324 case 1: /* status pending */
325 return -EBUSY;
326 case 2: /* not applicable */
327 return -EINVAL;
328 default: /* not oper */
329 return -ENODEV;
330 }
331 }
332
333
334 static void cio_apply_config(struct subchannel *sch, struct schib *schib)
335 {
336 schib->pmcw.intparm = sch->config.intparm;
337 schib->pmcw.mbi = sch->config.mbi;
338 schib->pmcw.isc = sch->config.isc;
339 schib->pmcw.ena = sch->config.ena;
340 schib->pmcw.mme = sch->config.mme;
341 schib->pmcw.mp = sch->config.mp;
342 schib->pmcw.csense = sch->config.csense;
343 schib->pmcw.mbfc = sch->config.mbfc;
344 if (sch->config.mbfc)
345 schib->mba = sch->config.mba;
346 }
347
348 static int cio_check_config(struct subchannel *sch, struct schib *schib)
349 {
350 return (schib->pmcw.intparm == sch->config.intparm) &&
351 (schib->pmcw.mbi == sch->config.mbi) &&
352 (schib->pmcw.isc == sch->config.isc) &&
353 (schib->pmcw.ena == sch->config.ena) &&
354 (schib->pmcw.mme == sch->config.mme) &&
355 (schib->pmcw.mp == sch->config.mp) &&
356 (schib->pmcw.csense == sch->config.csense) &&
357 (schib->pmcw.mbfc == sch->config.mbfc) &&
358 (!sch->config.mbfc || (schib->mba == sch->config.mba));
359 }
360
361 /*
362 * cio_commit_config - apply configuration to the subchannel
363 */
364 int cio_commit_config(struct subchannel *sch)
365 {
366 struct schib schib;
367 int ccode, retry, ret = 0;
368
369 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
370 return -ENODEV;
371
372 for (retry = 0; retry < 5; retry++) {
373 /* copy desired changes to local schib */
374 cio_apply_config(sch, &schib);
375 ccode = msch_err(sch->schid, &schib);
376 if (ccode < 0) /* -EIO if msch gets a program check. */
377 return ccode;
378 switch (ccode) {
379 case 0: /* successfull */
380 if (stsch(sch->schid, &schib) ||
381 !css_sch_is_valid(&schib))
382 return -ENODEV;
383 if (cio_check_config(sch, &schib)) {
384 /* commit changes from local schib */
385 memcpy(&sch->schib, &schib, sizeof(schib));
386 return 0;
387 }
388 ret = -EAGAIN;
389 break;
390 case 1: /* status pending */
391 return -EBUSY;
392 case 2: /* busy */
393 udelay(100); /* allow for recovery */
394 ret = -EBUSY;
395 break;
396 case 3: /* not operational */
397 return -ENODEV;
398 }
399 }
400 return ret;
401 }
402
403 /**
404 * cio_update_schib - Perform stsch and update schib if subchannel is valid.
405 * @sch: subchannel on which to perform stsch
406 * Return zero on success, -ENODEV otherwise.
407 */
408 int cio_update_schib(struct subchannel *sch)
409 {
410 struct schib schib;
411
412 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
413 return -ENODEV;
414
415 memcpy(&sch->schib, &schib, sizeof(schib));
416 return 0;
417 }
418 EXPORT_SYMBOL_GPL(cio_update_schib);
419
420 /**
421 * cio_enable_subchannel - enable a subchannel.
422 * @sch: subchannel to be enabled
423 * @intparm: interruption parameter to set
424 */
425 int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
426 {
427 char dbf_txt[15];
428 int retry;
429 int ret;
430
431 CIO_TRACE_EVENT (2, "ensch");
432 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
433
434 if (sch_is_pseudo_sch(sch))
435 return -EINVAL;
436 if (cio_update_schib(sch))
437 return -ENODEV;
438
439 sch->config.ena = 1;
440 sch->config.isc = sch->isc;
441 sch->config.intparm = intparm;
442
443 for (retry = 0; retry < 3; retry++) {
444 ret = cio_commit_config(sch);
445 if (ret == -EIO) {
446 /*
447 * Got a program check in msch. Try without
448 * the concurrent sense bit the next time.
449 */
450 sch->config.csense = 0;
451 } else if (ret == -EBUSY) {
452 struct irb irb;
453 if (tsch(sch->schid, &irb) != 0)
454 break;
455 } else
456 break;
457 }
458 sprintf (dbf_txt, "ret:%d", ret);
459 CIO_TRACE_EVENT (2, dbf_txt);
460 return ret;
461 }
462 EXPORT_SYMBOL_GPL(cio_enable_subchannel);
463
464 /**
465 * cio_disable_subchannel - disable a subchannel.
466 * @sch: subchannel to disable
467 */
468 int cio_disable_subchannel(struct subchannel *sch)
469 {
470 char dbf_txt[15];
471 int ret;
472
473 CIO_TRACE_EVENT (2, "dissch");
474 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
475
476 if (sch_is_pseudo_sch(sch))
477 return 0;
478 if (cio_update_schib(sch))
479 return -ENODEV;
480
481 if (scsw_actl(&sch->schib.scsw) != 0)
482 /*
483 * the disable function must not be called while there are
484 * requests pending for completion !
485 */
486 return -EBUSY;
487
488 sch->config.ena = 0;
489 ret = cio_commit_config(sch);
490
491 sprintf (dbf_txt, "ret:%d", ret);
492 CIO_TRACE_EVENT (2, dbf_txt);
493 return ret;
494 }
495 EXPORT_SYMBOL_GPL(cio_disable_subchannel);
496
497 int cio_create_sch_lock(struct subchannel *sch)
498 {
499 sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
500 if (!sch->lock)
501 return -ENOMEM;
502 spin_lock_init(sch->lock);
503 return 0;
504 }
505
506 static int cio_check_devno_blacklisted(struct subchannel *sch)
507 {
508 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
509 /*
510 * This device must not be known to Linux. So we simply
511 * say that there is no device and return ENODEV.
512 */
513 CIO_MSG_EVENT(6, "Blacklisted device detected "
514 "at devno %04X, subchannel set %x\n",
515 sch->schib.pmcw.dev, sch->schid.ssid);
516 return -ENODEV;
517 }
518 return 0;
519 }
520
521 static int cio_validate_io_subchannel(struct subchannel *sch)
522 {
523 /* Initialization for io subchannels. */
524 if (!css_sch_is_valid(&sch->schib))
525 return -ENODEV;
526
527 /* Devno is valid. */
528 return cio_check_devno_blacklisted(sch);
529 }
530
531 static int cio_validate_msg_subchannel(struct subchannel *sch)
532 {
533 /* Initialization for message subchannels. */
534 if (!css_sch_is_valid(&sch->schib))
535 return -ENODEV;
536
537 /* Devno is valid. */
538 return cio_check_devno_blacklisted(sch);
539 }
540
541 /**
542 * cio_validate_subchannel - basic validation of subchannel
543 * @sch: subchannel structure to be filled out
544 * @schid: subchannel id
545 *
546 * Find out subchannel type and initialize struct subchannel.
547 * Return codes:
548 * 0 on success
549 * -ENXIO for non-defined subchannels
550 * -ENODEV for invalid subchannels or blacklisted devices
551 * -EIO for subchannels in an invalid subchannel set
552 */
553 int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
554 {
555 char dbf_txt[15];
556 int ccode;
557 int err;
558
559 sprintf(dbf_txt, "valsch%x", schid.sch_no);
560 CIO_TRACE_EVENT(4, dbf_txt);
561
562 /* Nuke all fields. */
563 memset(sch, 0, sizeof(struct subchannel));
564
565 sch->schid = schid;
566 if (cio_is_console(schid)) {
567 sch->lock = cio_get_console_lock();
568 } else {
569 err = cio_create_sch_lock(sch);
570 if (err)
571 goto out;
572 }
573 mutex_init(&sch->reg_mutex);
574 /* Set a name for the subchannel */
575 if (cio_is_console(schid))
576 sch->dev.init_name = cio_get_console_sch_name(schid);
577 else
578 dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
579
580 /*
581 * The first subchannel that is not-operational (ccode==3)
582 * indicates that there aren't any more devices available.
583 * If stsch gets an exception, it means the current subchannel set
584 * is not valid.
585 */
586 ccode = stsch_err (schid, &sch->schib);
587 if (ccode) {
588 err = (ccode == 3) ? -ENXIO : ccode;
589 goto out;
590 }
591 /* Copy subchannel type from path management control word. */
592 sch->st = sch->schib.pmcw.st;
593
594 switch (sch->st) {
595 case SUBCHANNEL_TYPE_IO:
596 err = cio_validate_io_subchannel(sch);
597 break;
598 case SUBCHANNEL_TYPE_MSG:
599 err = cio_validate_msg_subchannel(sch);
600 break;
601 default:
602 err = 0;
603 }
604 if (err)
605 goto out;
606
607 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
608 sch->schid.ssid, sch->schid.sch_no, sch->st);
609 return 0;
610 out:
611 if (!cio_is_console(schid))
612 kfree(sch->lock);
613 sch->lock = NULL;
614 return err;
615 }
616
617 /*
618 * do_IRQ() handles all normal I/O device IRQ's (the special
619 * SMP cross-CPU interrupts have their own specific
620 * handlers).
621 *
622 */
623 void
624 do_IRQ (struct pt_regs *regs)
625 {
626 struct tpi_info *tpi_info;
627 struct subchannel *sch;
628 struct irb *irb;
629 struct pt_regs *old_regs;
630
631 old_regs = set_irq_regs(regs);
632 irq_enter();
633 s390_idle_check();
634 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
635 /* Serve timer interrupts first. */
636 clock_comparator_work();
637 /*
638 * Get interrupt information from lowcore
639 */
640 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
641 irb = (struct irb *) __LC_IRB;
642 do {
643 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
644 /*
645 * Non I/O-subchannel thin interrupts are processed differently
646 */
647 if (tpi_info->adapter_IO == 1 &&
648 tpi_info->int_type == IO_INTERRUPT_TYPE) {
649 do_adapter_IO(tpi_info->isc);
650 continue;
651 }
652 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
653 if (!sch) {
654 /* Clear pending interrupt condition. */
655 tsch(tpi_info->schid, irb);
656 continue;
657 }
658 spin_lock(sch->lock);
659 /* Store interrupt response block to lowcore. */
660 if (tsch(tpi_info->schid, irb) == 0) {
661 /* Keep subchannel information word up to date. */
662 memcpy (&sch->schib.scsw, &irb->scsw,
663 sizeof (irb->scsw));
664 /* Call interrupt handler if there is one. */
665 if (sch->driver && sch->driver->irq)
666 sch->driver->irq(sch);
667 }
668 spin_unlock(sch->lock);
669 /*
670 * Are more interrupts pending?
671 * If so, the tpi instruction will update the lowcore
672 * to hold the info for the next interrupt.
673 * We don't do this for VM because a tpi drops the cpu
674 * out of the sie which costs more cycles than it saves.
675 */
676 } while (!MACHINE_IS_VM && tpi (NULL) != 0);
677 irq_exit();
678 set_irq_regs(old_regs);
679 }
680
681 #ifdef CONFIG_CCW_CONSOLE
682 static struct subchannel console_subchannel;
683 static char console_sch_name[10] = "0.x.xxxx";
684 static struct io_subchannel_private console_priv;
685 static int console_subchannel_in_use;
686
687 /*
688 * Use tpi to get a pending interrupt, call the interrupt handler and
689 * return a pointer to the subchannel structure.
690 */
691 static int cio_tpi(void)
692 {
693 struct tpi_info *tpi_info;
694 struct subchannel *sch;
695 struct irb *irb;
696 int irq_context;
697
698 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
699 if (tpi(NULL) != 1)
700 return 0;
701 irb = (struct irb *) __LC_IRB;
702 /* Store interrupt response block to lowcore. */
703 if (tsch(tpi_info->schid, irb) != 0)
704 /* Not status pending or not operational. */
705 return 1;
706 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
707 if (!sch)
708 return 1;
709 irq_context = in_interrupt();
710 if (!irq_context)
711 local_bh_disable();
712 irq_enter();
713 spin_lock(sch->lock);
714 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
715 if (sch->driver && sch->driver->irq)
716 sch->driver->irq(sch);
717 spin_unlock(sch->lock);
718 irq_exit();
719 if (!irq_context)
720 _local_bh_enable();
721 return 1;
722 }
723
724 void *cio_get_console_priv(void)
725 {
726 return &console_priv;
727 }
728
729 /*
730 * busy wait for the next interrupt on the console
731 */
732 void wait_cons_dev(void)
733 __releases(console_subchannel.lock)
734 __acquires(console_subchannel.lock)
735 {
736 unsigned long cr6 __attribute__ ((aligned (8)));
737 unsigned long save_cr6 __attribute__ ((aligned (8)));
738
739 /*
740 * before entering the spinlock we may already have
741 * processed the interrupt on a different CPU...
742 */
743 if (!console_subchannel_in_use)
744 return;
745
746 /* disable all but the console isc */
747 __ctl_store (save_cr6, 6, 6);
748 cr6 = 1UL << (31 - CONSOLE_ISC);
749 __ctl_load (cr6, 6, 6);
750
751 do {
752 spin_unlock(console_subchannel.lock);
753 if (!cio_tpi())
754 cpu_relax();
755 spin_lock(console_subchannel.lock);
756 } while (console_subchannel.schib.scsw.cmd.actl != 0);
757 /*
758 * restore previous isc value
759 */
760 __ctl_load (save_cr6, 6, 6);
761 }
762
763 static int
764 cio_test_for_console(struct subchannel_id schid, void *data)
765 {
766 if (stsch_err(schid, &console_subchannel.schib) != 0)
767 return -ENXIO;
768 if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
769 console_subchannel.schib.pmcw.dnv &&
770 (console_subchannel.schib.pmcw.dev == console_devno)) {
771 console_irq = schid.sch_no;
772 return 1; /* found */
773 }
774 return 0;
775 }
776
777
778 static int
779 cio_get_console_sch_no(void)
780 {
781 struct subchannel_id schid;
782
783 init_subchannel_id(&schid);
784 if (console_irq != -1) {
785 /* VM provided us with the irq number of the console. */
786 schid.sch_no = console_irq;
787 if (stsch(schid, &console_subchannel.schib) != 0 ||
788 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
789 !console_subchannel.schib.pmcw.dnv)
790 return -1;
791 console_devno = console_subchannel.schib.pmcw.dev;
792 } else if (console_devno != -1) {
793 /* At least the console device number is known. */
794 for_each_subchannel(cio_test_for_console, NULL);
795 if (console_irq == -1)
796 return -1;
797 } else {
798 /* unlike in 2.4, we cannot autoprobe here, since
799 * the channel subsystem is not fully initialized.
800 * With some luck, the HWC console can take over */
801 return -1;
802 }
803 return console_irq;
804 }
805
806 struct subchannel *
807 cio_probe_console(void)
808 {
809 int sch_no, ret;
810 struct subchannel_id schid;
811
812 if (xchg(&console_subchannel_in_use, 1) != 0)
813 return ERR_PTR(-EBUSY);
814 sch_no = cio_get_console_sch_no();
815 if (sch_no == -1) {
816 console_subchannel_in_use = 0;
817 printk(KERN_WARNING "cio: No ccw console found!\n");
818 return ERR_PTR(-ENODEV);
819 }
820 memset(&console_subchannel, 0, sizeof(struct subchannel));
821 init_subchannel_id(&schid);
822 schid.sch_no = sch_no;
823 ret = cio_validate_subchannel(&console_subchannel, schid);
824 if (ret) {
825 console_subchannel_in_use = 0;
826 return ERR_PTR(-ENODEV);
827 }
828
829 /*
830 * enable console I/O-interrupt subclass
831 */
832 isc_register(CONSOLE_ISC);
833 console_subchannel.config.isc = CONSOLE_ISC;
834 console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
835 ret = cio_commit_config(&console_subchannel);
836 if (ret) {
837 isc_unregister(CONSOLE_ISC);
838 console_subchannel_in_use = 0;
839 return ERR_PTR(ret);
840 }
841 return &console_subchannel;
842 }
843
844 void
845 cio_release_console(void)
846 {
847 console_subchannel.config.intparm = 0;
848 cio_commit_config(&console_subchannel);
849 isc_unregister(CONSOLE_ISC);
850 console_subchannel_in_use = 0;
851 }
852
853 /* Bah... hack to catch console special sausages. */
854 int
855 cio_is_console(struct subchannel_id schid)
856 {
857 if (!console_subchannel_in_use)
858 return 0;
859 return schid_equal(&schid, &console_subchannel.schid);
860 }
861
862 struct subchannel *
863 cio_get_console_subchannel(void)
864 {
865 if (!console_subchannel_in_use)
866 return NULL;
867 return &console_subchannel;
868 }
869
870 const char *cio_get_console_sch_name(struct subchannel_id schid)
871 {
872 snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
873 return (const char *)console_sch_name;
874 }
875
876 #endif
877 static int
878 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
879 {
880 int retry, cc;
881
882 cc = 0;
883 for (retry=0;retry<3;retry++) {
884 schib->pmcw.ena = 0;
885 cc = msch(schid, schib);
886 if (cc)
887 return (cc==3?-ENODEV:-EBUSY);
888 if (stsch(schid, schib) || !css_sch_is_valid(schib))
889 return -ENODEV;
890 if (!schib->pmcw.ena)
891 return 0;
892 }
893 return -EBUSY; /* uhm... */
894 }
895
896 static int
897 __clear_io_subchannel_easy(struct subchannel_id schid)
898 {
899 int retry;
900
901 if (csch(schid))
902 return -ENODEV;
903 for (retry=0;retry<20;retry++) {
904 struct tpi_info ti;
905
906 if (tpi(&ti)) {
907 tsch(ti.schid, (struct irb *)__LC_IRB);
908 if (schid_equal(&ti.schid, &schid))
909 return 0;
910 }
911 udelay_simple(100);
912 }
913 return -EBUSY;
914 }
915
916 static void __clear_chsc_subchannel_easy(void)
917 {
918 /* It seems we can only wait for a bit here :/ */
919 udelay_simple(100);
920 }
921
922 static int pgm_check_occured;
923
924 static void cio_reset_pgm_check_handler(void)
925 {
926 pgm_check_occured = 1;
927 }
928
929 static int stsch_reset(struct subchannel_id schid, struct schib *addr)
930 {
931 int rc;
932
933 pgm_check_occured = 0;
934 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
935 rc = stsch(schid, addr);
936 s390_base_pgm_handler_fn = NULL;
937
938 /* The program check handler could have changed pgm_check_occured. */
939 barrier();
940
941 if (pgm_check_occured)
942 return -EIO;
943 else
944 return rc;
945 }
946
947 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
948 {
949 struct schib schib;
950
951 if (stsch_reset(schid, &schib))
952 return -ENXIO;
953 if (!schib.pmcw.ena)
954 return 0;
955 switch(__disable_subchannel_easy(schid, &schib)) {
956 case 0:
957 case -ENODEV:
958 break;
959 default: /* -EBUSY */
960 switch (schib.pmcw.st) {
961 case SUBCHANNEL_TYPE_IO:
962 if (__clear_io_subchannel_easy(schid))
963 goto out; /* give up... */
964 break;
965 case SUBCHANNEL_TYPE_CHSC:
966 __clear_chsc_subchannel_easy();
967 break;
968 default:
969 /* No default clear strategy */
970 break;
971 }
972 stsch(schid, &schib);
973 __disable_subchannel_easy(schid, &schib);
974 }
975 out:
976 return 0;
977 }
978
979 static atomic_t chpid_reset_count;
980
981 static void s390_reset_chpids_mcck_handler(void)
982 {
983 struct crw crw;
984 struct mci *mci;
985
986 /* Check for pending channel report word. */
987 mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
988 if (!mci->cp)
989 return;
990 /* Process channel report words. */
991 while (stcrw(&crw) == 0) {
992 /* Check for responses to RCHP. */
993 if (crw.slct && crw.rsc == CRW_RSC_CPATH)
994 atomic_dec(&chpid_reset_count);
995 }
996 }
997
998 #define RCHP_TIMEOUT (30 * USEC_PER_SEC)
999 static void css_reset(void)
1000 {
1001 int i, ret;
1002 unsigned long long timeout;
1003 struct chp_id chpid;
1004
1005 /* Reset subchannels. */
1006 for_each_subchannel(__shutdown_subchannel_easy, NULL);
1007 /* Reset channel paths. */
1008 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
1009 /* Enable channel report machine checks. */
1010 __ctl_set_bit(14, 28);
1011 /* Temporarily reenable machine checks. */
1012 local_mcck_enable();
1013 chp_id_init(&chpid);
1014 for (i = 0; i <= __MAX_CHPID; i++) {
1015 chpid.id = i;
1016 ret = rchp(chpid);
1017 if ((ret == 0) || (ret == 2))
1018 /*
1019 * rchp either succeeded, or another rchp is already
1020 * in progress. In either case, we'll get a crw.
1021 */
1022 atomic_inc(&chpid_reset_count);
1023 }
1024 /* Wait for machine check for all channel paths. */
1025 timeout = get_clock() + (RCHP_TIMEOUT << 12);
1026 while (atomic_read(&chpid_reset_count) != 0) {
1027 if (get_clock() > timeout)
1028 break;
1029 cpu_relax();
1030 }
1031 /* Disable machine checks again. */
1032 local_mcck_disable();
1033 /* Disable channel report machine checks. */
1034 __ctl_clear_bit(14, 28);
1035 s390_base_mcck_handler_fn = NULL;
1036 }
1037
1038 static struct reset_call css_reset_call = {
1039 .fn = css_reset,
1040 };
1041
1042 static int __init init_css_reset_call(void)
1043 {
1044 atomic_set(&chpid_reset_count, 0);
1045 register_reset_call(&css_reset_call);
1046 return 0;
1047 }
1048
1049 arch_initcall(init_css_reset_call);
1050
1051 struct sch_match_id {
1052 struct subchannel_id schid;
1053 struct ccw_dev_id devid;
1054 int rc;
1055 };
1056
1057 static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
1058 {
1059 struct schib schib;
1060 struct sch_match_id *match_id = data;
1061
1062 if (stsch_reset(schid, &schib))
1063 return -ENXIO;
1064 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
1065 (schib.pmcw.dev == match_id->devid.devno) &&
1066 (schid.ssid == match_id->devid.ssid)) {
1067 match_id->schid = schid;
1068 match_id->rc = 0;
1069 return 1;
1070 }
1071 return 0;
1072 }
1073
1074 static int reipl_find_schid(struct ccw_dev_id *devid,
1075 struct subchannel_id *schid)
1076 {
1077 struct sch_match_id match_id;
1078
1079 match_id.devid = *devid;
1080 match_id.rc = -ENODEV;
1081 for_each_subchannel(__reipl_subchannel_match, &match_id);
1082 if (match_id.rc == 0)
1083 *schid = match_id.schid;
1084 return match_id.rc;
1085 }
1086
1087 extern void do_reipl_asm(__u32 schid);
1088
1089 /* Make sure all subchannels are quiet before we re-ipl an lpar. */
1090 void reipl_ccw_dev(struct ccw_dev_id *devid)
1091 {
1092 struct subchannel_id schid;
1093
1094 s390_reset_system();
1095 if (reipl_find_schid(devid, &schid) != 0)
1096 panic("IPL Device not found\n");
1097 do_reipl_asm(*((__u32*)&schid));
1098 }
1099
1100 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1101 {
1102 struct subchannel_id schid;
1103 struct schib schib;
1104
1105 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
1106 if (!schid.one)
1107 return -ENODEV;
1108 if (stsch(schid, &schib))
1109 return -ENODEV;
1110 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1111 return -ENODEV;
1112 if (!schib.pmcw.dnv)
1113 return -ENODEV;
1114 iplinfo->devno = schib.pmcw.dev;
1115 iplinfo->is_qdio = schib.pmcw.qf;
1116 return 0;
1117 }
1118
1119 /**
1120 * cio_tm_start_key - perform start function
1121 * @sch: subchannel on which to perform the start function
1122 * @tcw: transport-command word to be started
1123 * @lpm: mask of paths to use
1124 * @key: storage key to use for storage access
1125 *
1126 * Start the tcw on the given subchannel. Return zero on success, non-zero
1127 * otherwise.
1128 */
1129 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
1130 {
1131 int cc;
1132 union orb *orb = &to_io_private(sch)->orb;
1133
1134 memset(orb, 0, sizeof(union orb));
1135 orb->tm.intparm = (u32) (addr_t) sch;
1136 orb->tm.key = key >> 4;
1137 orb->tm.b = 1;
1138 orb->tm.lpm = lpm ? lpm : sch->lpm;
1139 orb->tm.tcw = (u32) (addr_t) tcw;
1140 cc = ssch(sch->schid, orb);
1141 switch (cc) {
1142 case 0:
1143 return 0;
1144 case 1:
1145 case 2:
1146 return -EBUSY;
1147 default:
1148 return cio_start_handle_notoper(sch, lpm);
1149 }
1150 }
1151
1152 /**
1153 * cio_tm_intrg - perform interrogate function
1154 * @sch - subchannel on which to perform the interrogate function
1155 *
1156 * If the specified subchannel is running in transport-mode, perform the
1157 * interrogate function. Return zero on success, non-zero otherwie.
1158 */
1159 int cio_tm_intrg(struct subchannel *sch)
1160 {
1161 int cc;
1162
1163 if (!to_io_private(sch)->orb.tm.b)
1164 return -EINVAL;
1165 cc = xsch(sch->schid);
1166 switch (cc) {
1167 case 0:
1168 case 2:
1169 return 0;
1170 case 1:
1171 return -EBUSY;
1172 default:
1173 return -ENODEV;
1174 }
1175 }
This page took 0.055923 seconds and 4 git commands to generate.