[S390] cio: make sense id procedure work with partial hardware response
[deliverable/linux.git] / drivers / s390 / cio / chsc.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
1da177e4
LT
4 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
4ce3b30c 8 * Cornelia Huck (cornelia.huck@de.ibm.com)
1da177e4
LT
9 * Arnd Bergmann (arndb@de.ibm.com)
10 */
11
12#include <linux/module.h>
1da177e4
LT
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/device.h>
16
17#include <asm/cio.h>
e5854a58 18#include <asm/chpid.h>
1da177e4
LT
19
20#include "css.h"
21#include "cio.h"
22#include "cio_debug.h"
23#include "ioasm.h"
e6b6e10a 24#include "chp.h"
1da177e4
LT
25#include "chsc.h"
26
1da177e4
LT
27static void *sei_page;
28
7ad6a249
PO
29struct chsc_ssd_area {
30 struct chsc_header request;
31 u16 :10;
32 u16 ssid:2;
33 u16 :4;
34 u16 f_sch; /* first subchannel */
35 u16 :16;
36 u16 l_sch; /* last subchannel */
37 u32 :32;
38 struct chsc_header response;
39 u32 :32;
40 u8 sch_valid : 1;
41 u8 dev_valid : 1;
42 u8 st : 3; /* subchannel type */
43 u8 zeroes : 3;
44 u8 unit_addr; /* unit address */
45 u16 devno; /* device number */
46 u8 path_mask;
47 u8 fla_valid_mask;
48 u16 sch; /* subchannel */
49 u8 chpid[8]; /* chpids 0-7 */
50 u16 fla[8]; /* full link addresses 0-7 */
51} __attribute__ ((packed));
52
53int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
1da177e4 54{
7ad6a249
PO
55 unsigned long page;
56 struct chsc_ssd_area *ssd_area;
57 int ccode;
58 int ret;
59 int i;
60 int mask;
1da177e4 61
7ad6a249
PO
62 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63 if (!page)
64 return -ENOMEM;
65 ssd_area = (struct chsc_ssd_area *) page;
495a5b45
CH
66 ssd_area->request.length = 0x0010;
67 ssd_area->request.code = 0x0004;
7ad6a249
PO
68 ssd_area->ssid = schid.ssid;
69 ssd_area->f_sch = schid.sch_no;
70 ssd_area->l_sch = schid.sch_no;
1da177e4
LT
71
72 ccode = chsc(ssd_area);
7ad6a249 73 /* Check response. */
1da177e4 74 if (ccode > 0) {
7ad6a249
PO
75 ret = (ccode == 3) ? -ENODEV : -EBUSY;
76 goto out_free;
1da177e4 77 }
7ad6a249
PO
78 if (ssd_area->response.code != 0x0001) {
79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
80 schid.ssid, schid.sch_no,
1da177e4 81 ssd_area->response.code);
7ad6a249
PO
82 ret = -EIO;
83 goto out_free;
1da177e4 84 }
7ad6a249
PO
85 if (!ssd_area->sch_valid) {
86 ret = -ENODEV;
87 goto out_free;
1da177e4 88 }
7ad6a249
PO
89 /* Copy data */
90 ret = 0;
91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
b279a4f5
CH
92 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
93 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
7ad6a249
PO
94 goto out_free;
95 ssd->path_mask = ssd_area->path_mask;
96 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
97 for (i = 0; i < 8; i++) {
98 mask = 0x80 >> i;
99 if (ssd_area->path_mask & mask) {
100 chp_id_init(&ssd->chpid[i]);
101 ssd->chpid[i].id = ssd_area->chpid[i];
1da177e4 102 }
7ad6a249
PO
103 if (ssd_area->fla_valid_mask & mask)
104 ssd->fla[i] = ssd_area->fla[i];
1da177e4 105 }
7ad6a249
PO
106out_free:
107 free_page(page);
1da177e4
LT
108 return ret;
109}
110
387b734f
SB
111static int check_for_io_on_path(struct subchannel *sch, int mask)
112{
113 int cc;
114
115 cc = stsch(sch->schid, &sch->schib);
116 if (cc)
117 return 0;
118 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
119 return 1;
120 return 0;
121}
122
123static void terminate_internal_io(struct subchannel *sch)
124{
125 if (cio_clear(sch)) {
126 /* Recheck device in case clear failed. */
127 sch->lpm = 0;
83b3370c
PO
128 if (device_trigger_verify(sch) != 0)
129 css_schedule_eval(sch->schid);
387b734f
SB
130 return;
131 }
132 /* Request retry of internal operation. */
133 device_set_intretry(sch);
134 /* Call handler. */
135 if (sch->driver && sch->driver->termination)
602b20f2 136 sch->driver->termination(sch);
387b734f
SB
137}
138
e82a1567 139static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
1da177e4
LT
140{
141 int j;
142 int mask;
e82a1567 143 struct chp_id *chpid = data;
1da177e4
LT
144 struct schib schib;
145
7e8ae7bf
CH
146 for (j = 0; j < 8; j++) {
147 mask = 0x80 >> j;
148 if ((sch->schib.pmcw.pim & mask) &&
e6b6e10a 149 (sch->schib.pmcw.chpid[j] == chpid->id))
1da177e4 150 break;
7e8ae7bf 151 }
1da177e4
LT
152 if (j >= 8)
153 return 0;
154
2ec22984 155 spin_lock_irq(sch->lock);
1da177e4 156
a8237fc4 157 stsch(sch->schid, &schib);
b279a4f5 158 if (!css_sch_is_valid(&schib))
1da177e4
LT
159 goto out_unreg;
160 memcpy(&sch->schib, &schib, sizeof(struct schib));
161 /* Check for single path devices. */
162 if (sch->schib.pmcw.pim == 0x80)
163 goto out_unreg;
1da177e4 164
387b734f
SB
165 if (check_for_io_on_path(sch, mask)) {
166 if (device_is_online(sch))
167 device_kill_io(sch);
168 else {
169 terminate_internal_io(sch);
170 /* Re-start path verification. */
171 if (sch->driver && sch->driver->verify)
602b20f2 172 sch->driver->verify(sch);
387b734f
SB
173 }
174 } else {
175 /* trigger path verification. */
176 if (sch->driver && sch->driver->verify)
602b20f2 177 sch->driver->verify(sch);
387b734f 178 else if (sch->lpm == mask)
1da177e4 179 goto out_unreg;
1da177e4
LT
180 }
181
2ec22984 182 spin_unlock_irq(sch->lock);
1da177e4 183 return 0;
387b734f 184
1da177e4 185out_unreg:
1da177e4 186 sch->lpm = 0;
387b734f 187 spin_unlock_irq(sch->lock);
83b3370c 188 css_schedule_eval(sch->schid);
1da177e4
LT
189 return 0;
190}
191
e6b6e10a 192void chsc_chp_offline(struct chp_id chpid)
1da177e4
LT
193{
194 char dbf_txt[15];
195
f86635fa 196 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
1da177e4
LT
197 CIO_TRACE_EVENT(2, dbf_txt);
198
e6b6e10a 199 if (chp_get_status(chpid) <= 0)
1da177e4 200 return;
e82a1567 201 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
1da177e4
LT
202}
203
e82a1567 204static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
f97a56fb
CH
205{
206 struct schib schib;
f97a56fb
CH
207 /*
208 * We don't know the device yet, but since a path
209 * may be available now to the device we'll have
210 * to do recognition again.
211 * Since we don't have any idea about which chpid
212 * that beast may be on we'll have to do a stsch
213 * on all devices, grr...
214 */
fb6958a5 215 if (stsch_err(schid, &schib))
f97a56fb 216 /* We're through */
83b3370c 217 return -ENXIO;
f97a56fb
CH
218
219 /* Put it on the slow path. */
83b3370c 220 css_schedule_eval(schid);
f97a56fb
CH
221 return 0;
222}
223
7ad6a249
PO
224struct res_acc_data {
225 struct chp_id chpid;
226 u32 fla_mask;
227 u16 fla;
228};
229
230static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
231 struct res_acc_data *data)
232{
233 int i;
234 int mask;
235
236 for (i = 0; i < 8; i++) {
237 mask = 0x80 >> i;
238 if (!(ssd->path_mask & mask))
239 continue;
240 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
241 continue;
242 if ((ssd->fla_valid_mask & mask) &&
243 ((ssd->fla[i] & data->fla_mask) != data->fla))
244 continue;
245 return mask;
246 }
247 return 0;
248}
249
e82a1567 250static int __s390_process_res_acc(struct subchannel *sch, void *data)
1da177e4 251{
f97a56fb 252 int chp_mask, old_lpm;
e82a1567 253 struct res_acc_data *res_data = data;
f97a56fb 254
2ec22984 255 spin_lock_irq(sch->lock);
7ad6a249
PO
256 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
257 if (chp_mask == 0)
258 goto out;
259 if (stsch(sch->schid, &sch->schib))
260 goto out;
f97a56fb
CH
261 old_lpm = sch->lpm;
262 sch->lpm = ((sch->schib.pmcw.pim &
263 sch->schib.pmcw.pam &
264 sch->schib.pmcw.pom)
265 | chp_mask) & sch->opm;
266 if (!old_lpm && sch->lpm)
267 device_trigger_reprobe(sch);
268 else if (sch->driver && sch->driver->verify)
602b20f2 269 sch->driver->verify(sch);
7ad6a249 270out:
2ec22984 271 spin_unlock_irq(sch->lock);
e82a1567 272
dd9963f9 273 return 0;
f97a56fb
CH
274}
275
83b3370c 276static void s390_process_res_acc (struct res_acc_data *res_data)
f97a56fb 277{
1da177e4
LT
278 char dbf_txt[15];
279
e6b6e10a
PO
280 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
281 res_data->chpid.id);
1da177e4 282 CIO_TRACE_EVENT( 2, dbf_txt);
f97a56fb
CH
283 if (res_data->fla != 0) {
284 sprintf(dbf_txt, "fla%x", res_data->fla);
1da177e4
LT
285 CIO_TRACE_EVENT( 2, dbf_txt);
286 }
287
288 /*
289 * I/O resources may have become accessible.
290 * Scan through all subchannels that may be concerned and
291 * do a validation on those.
292 * The more information we have (info), the less scanning
293 * will we have to do.
294 */
e82a1567
PO
295 for_each_subchannel_staged(__s390_process_res_acc,
296 s390_process_res_acc_new_sch, res_data);
1da177e4
LT
297}
298
299static int
300__get_chpid_from_lir(void *data)
301{
302 struct lir {
303 u8 iq;
304 u8 ic;
305 u16 sci;
306 /* incident-node descriptor */
307 u32 indesc[28];
308 /* attached-node descriptor */
309 u32 andesc[28];
310 /* incident-specific information */
311 u32 isinfo[28];
0f008aa3 312 } __attribute__ ((packed)) *lir;
1da177e4 313
12975aef 314 lir = data;
1da177e4
LT
315 if (!(lir->iq&0x80))
316 /* NULL link incident record */
317 return -EINVAL;
318 if (!(lir->indesc[0]&0xc0000000))
319 /* node descriptor not valid */
320 return -EINVAL;
321 if (!(lir->indesc[0]&0x10000000))
322 /* don't handle device-type nodes - FIXME */
323 return -EINVAL;
324 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
325
326 return (u16) (lir->indesc[0]&0x000000ff);
327}
328
184357a5
PO
329struct chsc_sei_area {
330 struct chsc_header request;
331 u32 reserved1;
332 u32 reserved2;
333 u32 reserved3;
334 struct chsc_header response;
335 u32 reserved4;
336 u8 flags;
337 u8 vf; /* validity flags */
338 u8 rs; /* reporting source */
339 u8 cc; /* content code */
340 u16 fla; /* full link address */
341 u16 rsid; /* reporting source id */
342 u32 reserved5;
343 u32 reserved6;
344 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
345 /* ccdf has to be big enough for a link-incident record */
346} __attribute__ ((packed));
347
83b3370c 348static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
184357a5 349{
f86635fa
PO
350 struct chp_id chpid;
351 int id;
184357a5
PO
352
353 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
354 sei_area->rs, sei_area->rsid);
355 if (sei_area->rs != 4)
83b3370c 356 return;
f86635fa
PO
357 id = __get_chpid_from_lir(sei_area->ccdf);
358 if (id < 0)
184357a5 359 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
f86635fa
PO
360 else {
361 chp_id_init(&chpid);
362 chpid.id = id;
e6b6e10a 363 chsc_chp_offline(chpid);
f86635fa 364 }
184357a5
PO
365}
366
83b3370c 367static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
1da177e4 368{
f97a56fb 369 struct res_acc_data res_data;
f86635fa 370 struct chp_id chpid;
184357a5 371 int status;
184357a5
PO
372
373 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
374 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
375 if (sei_area->rs != 4)
83b3370c 376 return;
f86635fa
PO
377 chp_id_init(&chpid);
378 chpid.id = sei_area->rsid;
184357a5 379 /* allocate a new channel path structure, if needed */
e6b6e10a 380 status = chp_get_status(chpid);
184357a5 381 if (status < 0)
e6b6e10a 382 chp_new(chpid);
184357a5 383 else if (!status)
83b3370c 384 return;
184357a5 385 memset(&res_data, 0, sizeof(struct res_acc_data));
e6b6e10a 386 res_data.chpid = chpid;
184357a5
PO
387 if ((sei_area->vf & 0xc0) != 0) {
388 res_data.fla = sei_area->fla;
389 if ((sei_area->vf & 0xc0) == 0xc0)
390 /* full link address */
391 res_data.fla_mask = 0xffff;
392 else
393 /* link address */
394 res_data.fla_mask = 0xff00;
395 }
83b3370c 396 s390_process_res_acc(&res_data);
184357a5
PO
397}
398
e5854a58
PO
399struct chp_config_data {
400 u8 map[32];
401 u8 op;
402 u8 pc;
403};
404
83b3370c 405static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
e5854a58
PO
406{
407 struct chp_config_data *data;
408 struct chp_id chpid;
409 int num;
410
411 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
412 if (sei_area->rs != 0)
83b3370c 413 return;
e5854a58
PO
414 data = (struct chp_config_data *) &(sei_area->ccdf);
415 chp_id_init(&chpid);
416 for (num = 0; num <= __MAX_CHPID; num++) {
417 if (!chp_test_bit(data->map, num))
418 continue;
419 chpid.id = num;
420 printk(KERN_WARNING "cio: processing configure event %d for "
421 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
422 switch (data->op) {
423 case 0:
424 chp_cfg_schedule(chpid, 1);
425 break;
426 case 1:
427 chp_cfg_schedule(chpid, 0);
428 break;
429 case 2:
430 chp_cfg_cancel_deconfigure(chpid);
431 break;
432 }
433 }
e5854a58
PO
434}
435
83b3370c 436static void chsc_process_sei(struct chsc_sei_area *sei_area)
184357a5 437{
184357a5 438 /* Check if we might have lost some information. */
83b3370c 439 if (sei_area->flags & 0x40) {
184357a5 440 CIO_CRW_EVENT(2, "chsc: event overflow\n");
83b3370c
PO
441 css_schedule_eval_all();
442 }
184357a5 443 /* which kind of information was stored? */
184357a5
PO
444 switch (sei_area->cc) {
445 case 1: /* link incident*/
83b3370c 446 chsc_process_sei_link_incident(sei_area);
184357a5
PO
447 break;
448 case 2: /* i/o resource accessibiliy */
83b3370c 449 chsc_process_sei_res_acc(sei_area);
184357a5 450 break;
e5854a58 451 case 8: /* channel-path-configuration notification */
83b3370c 452 chsc_process_sei_chp_config(sei_area);
e5854a58 453 break;
184357a5
PO
454 default: /* other stuff */
455 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
456 sei_area->cc);
457 break;
458 }
184357a5
PO
459}
460
83b3370c 461void chsc_process_crw(void)
184357a5
PO
462{
463 struct chsc_sei_area *sei_area;
1da177e4
LT
464
465 if (!sei_page)
83b3370c 466 return;
184357a5
PO
467 /* Access to sei_page is serialized through machine check handler
468 * thread, so no need for locking. */
1da177e4
LT
469 sei_area = sei_page;
470
471 CIO_TRACE_EVENT( 2, "prcss");
1da177e4 472 do {
1da177e4 473 memset(sei_area, 0, sizeof(*sei_area));
495a5b45
CH
474 sei_area->request.length = 0x0010;
475 sei_area->request.code = 0x000e;
184357a5
PO
476 if (chsc(sei_area))
477 break;
1da177e4 478
184357a5
PO
479 if (sei_area->response.code == 0x0001) {
480 CIO_CRW_EVENT(4, "chsc: sei successful\n");
83b3370c 481 chsc_process_sei(sei_area);
184357a5
PO
482 } else {
483 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
1da177e4 484 sei_area->response.code);
1da177e4
LT
485 break;
486 }
487 } while (sei_area->flags & 0x80);
1da177e4
LT
488}
489
e82a1567 490static int __chp_add_new_sch(struct subchannel_id schid, void *data)
f97a56fb
CH
491{
492 struct schib schib;
f97a56fb 493
758976f9 494 if (stsch_err(schid, &schib))
f97a56fb 495 /* We're through */
83b3370c 496 return -ENXIO;
f97a56fb
CH
497
498 /* Put it on the slow path. */
83b3370c 499 css_schedule_eval(schid);
f97a56fb
CH
500 return 0;
501}
502
503
e82a1567 504static int __chp_add(struct subchannel *sch, void *data)
1da177e4 505{
7e8ae7bf 506 int i, mask;
e82a1567
PO
507 struct chp_id *chpid = data;
508
2ec22984 509 spin_lock_irq(sch->lock);
7e8ae7bf
CH
510 for (i=0; i<8; i++) {
511 mask = 0x80 >> i;
512 if ((sch->schib.pmcw.pim & mask) &&
e82a1567 513 (sch->schib.pmcw.chpid[i] == chpid->id))
f97a56fb 514 break;
7e8ae7bf 515 }
f97a56fb 516 if (i==8) {
2ec22984 517 spin_unlock_irq(sch->lock);
f97a56fb
CH
518 return 0;
519 }
e82a1567
PO
520 if (stsch(sch->schid, &sch->schib)) {
521 spin_unlock_irq(sch->lock);
522 css_schedule_eval(sch->schid);
523 return 0;
524 }
f97a56fb
CH
525 sch->lpm = ((sch->schib.pmcw.pim &
526 sch->schib.pmcw.pam &
527 sch->schib.pmcw.pom)
7e8ae7bf 528 | mask) & sch->opm;
f97a56fb
CH
529
530 if (sch->driver && sch->driver->verify)
602b20f2 531 sch->driver->verify(sch);
f97a56fb 532
2ec22984 533 spin_unlock_irq(sch->lock);
e82a1567 534
f97a56fb
CH
535 return 0;
536}
537
83b3370c 538void chsc_chp_online(struct chp_id chpid)
f97a56fb 539{
1da177e4
LT
540 char dbf_txt[15];
541
f86635fa 542 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
1da177e4
LT
543 CIO_TRACE_EVENT(2, dbf_txt);
544
83b3370c 545 if (chp_get_status(chpid) != 0)
e82a1567
PO
546 for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
547 &chpid);
1da177e4
LT
548}
549
f86635fa
PO
550static void __s390_subchannel_vary_chpid(struct subchannel *sch,
551 struct chp_id chpid, int on)
1da177e4
LT
552{
553 int chp, old_lpm;
7ad6a249 554 int mask;
1da177e4
LT
555 unsigned long flags;
556
2ec22984 557 spin_lock_irqsave(sch->lock, flags);
1da177e4
LT
558 old_lpm = sch->lpm;
559 for (chp = 0; chp < 8; chp++) {
7ad6a249
PO
560 mask = 0x80 >> chp;
561 if (!(sch->ssd_info.path_mask & mask))
562 continue;
563 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
1da177e4
LT
564 continue;
565
566 if (on) {
7ad6a249
PO
567 sch->opm |= mask;
568 sch->lpm |= mask;
1da177e4
LT
569 if (!old_lpm)
570 device_trigger_reprobe(sch);
571 else if (sch->driver && sch->driver->verify)
602b20f2 572 sch->driver->verify(sch);
24cb5b48
CH
573 break;
574 }
7ad6a249
PO
575 sch->opm &= ~mask;
576 sch->lpm &= ~mask;
577 if (check_for_io_on_path(sch, mask)) {
d23861ff
CH
578 if (device_is_online(sch))
579 /* Path verification is done after killing. */
580 device_kill_io(sch);
387b734f 581 else {
d23861ff
CH
582 /* Kill and retry internal I/O. */
583 terminate_internal_io(sch);
387b734f
SB
584 /* Re-start path verification. */
585 if (sch->driver && sch->driver->verify)
602b20f2 586 sch->driver->verify(sch);
387b734f 587 }
d23861ff 588 } else if (!sch->lpm) {
83b3370c
PO
589 if (device_trigger_verify(sch) != 0)
590 css_schedule_eval(sch->schid);
24cb5b48 591 } else if (sch->driver && sch->driver->verify)
602b20f2 592 sch->driver->verify(sch);
1da177e4
LT
593 break;
594 }
2ec22984 595 spin_unlock_irqrestore(sch->lock, flags);
1da177e4
LT
596}
597
e82a1567 598static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
1da177e4 599{
e82a1567 600 struct chp_id *chpid = data;
1da177e4
LT
601
602 __s390_subchannel_vary_chpid(sch, *chpid, 0);
603 return 0;
604}
605
e82a1567 606static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
1da177e4 607{
e82a1567 608 struct chp_id *chpid = data;
1da177e4
LT
609
610 __s390_subchannel_vary_chpid(sch, *chpid, 1);
611 return 0;
612}
613
f97a56fb
CH
614static int
615__s390_vary_chpid_on(struct subchannel_id schid, void *data)
616{
617 struct schib schib;
f97a56fb 618
fb6958a5 619 if (stsch_err(schid, &schib))
f97a56fb
CH
620 /* We're through */
621 return -ENXIO;
622 /* Put it on the slow path. */
83b3370c 623 css_schedule_eval(schid);
f97a56fb
CH
624 return 0;
625}
626
e6b6e10a
PO
627/**
628 * chsc_chp_vary - propagate channel-path vary operation to subchannels
629 * @chpid: channl-path ID
630 * @on: non-zero for vary online, zero for vary offline
1da177e4 631 */
e6b6e10a 632int chsc_chp_vary(struct chp_id chpid, int on)
1da177e4 633{
1da177e4
LT
634 /*
635 * Redo PathVerification on the devices the chpid connects to
636 */
637
f97a56fb 638 if (on)
e82a1567
PO
639 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
640 __s390_vary_chpid_on, &chpid);
641 else
642 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
643 NULL, &chpid);
644
1da177e4
LT
645 return 0;
646}
647
495a5b45
CH
648static void
649chsc_remove_cmg_attr(struct channel_subsystem *css)
650{
651 int i;
652
653 for (i = 0; i <= __MAX_CHPID; i++) {
654 if (!css->chps[i])
655 continue;
e6b6e10a 656 chp_remove_cmg_attr(css->chps[i]);
495a5b45
CH
657 }
658}
659
660static int
661chsc_add_cmg_attr(struct channel_subsystem *css)
662{
663 int i, ret;
664
665 ret = 0;
666 for (i = 0; i <= __MAX_CHPID; i++) {
667 if (!css->chps[i])
668 continue;
e6b6e10a 669 ret = chp_add_cmg_attr(css->chps[i]);
495a5b45
CH
670 if (ret)
671 goto cleanup;
672 }
673 return ret;
674cleanup:
675 for (--i; i >= 0; i--) {
676 if (!css->chps[i])
677 continue;
e6b6e10a 678 chp_remove_cmg_attr(css->chps[i]);
495a5b45
CH
679 }
680 return ret;
681}
682
495a5b45
CH
683static int
684__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
685{
686 struct {
687 struct chsc_header request;
688 u32 operation_code : 2;
689 u32 : 30;
690 u32 key : 4;
691 u32 : 28;
692 u32 zeroes1;
693 u32 cub_addr1;
694 u32 zeroes2;
695 u32 cub_addr2;
696 u32 reserved[13];
697 struct chsc_header response;
698 u32 status : 8;
699 u32 : 4;
700 u32 fmt : 4;
701 u32 : 16;
0f008aa3 702 } __attribute__ ((packed)) *secm_area;
495a5b45
CH
703 int ret, ccode;
704
705 secm_area = page;
706 secm_area->request.length = 0x0050;
707 secm_area->request.code = 0x0016;
708
709 secm_area->key = PAGE_DEFAULT_KEY;
710 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
711 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
712
713 secm_area->operation_code = enable ? 0 : 1;
714
715 ccode = chsc(secm_area);
716 if (ccode > 0)
717 return (ccode == 3) ? -ENODEV : -EBUSY;
718
719 switch (secm_area->response.code) {
720 case 0x0001: /* Success. */
721 ret = 0;
722 break;
723 case 0x0003: /* Invalid block. */
724 case 0x0007: /* Invalid format. */
725 case 0x0008: /* Other invalid block. */
726 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
727 ret = -EINVAL;
728 break;
729 case 0x0004: /* Command not provided in model. */
730 CIO_CRW_EVENT(2, "Model does not provide secm\n");
731 ret = -EOPNOTSUPP;
732 break;
733 case 0x0102: /* cub adresses incorrect */
734 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
735 ret = -EINVAL;
736 break;
737 case 0x0103: /* key error */
738 CIO_CRW_EVENT(2, "Access key error in secm\n");
739 ret = -EINVAL;
740 break;
741 case 0x0105: /* error while starting */
742 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
743 ret = -EIO;
744 break;
745 default:
746 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
747 secm_area->response.code);
748 ret = -EIO;
749 }
750 return ret;
751}
752
753int
754chsc_secm(struct channel_subsystem *css, int enable)
755{
756 void *secm_area;
757 int ret;
758
759 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
760 if (!secm_area)
761 return -ENOMEM;
762
763 mutex_lock(&css->mutex);
764 if (enable && !css->cm_enabled) {
765 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
766 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
767 if (!css->cub_addr1 || !css->cub_addr2) {
768 free_page((unsigned long)css->cub_addr1);
769 free_page((unsigned long)css->cub_addr2);
770 free_page((unsigned long)secm_area);
771 mutex_unlock(&css->mutex);
772 return -ENOMEM;
773 }
774 }
775 ret = __chsc_do_secm(css, enable, secm_area);
776 if (!ret) {
777 css->cm_enabled = enable;
778 if (css->cm_enabled) {
779 ret = chsc_add_cmg_attr(css);
780 if (ret) {
781 memset(secm_area, 0, PAGE_SIZE);
782 __chsc_do_secm(css, 0, secm_area);
783 css->cm_enabled = 0;
784 }
785 } else
786 chsc_remove_cmg_attr(css);
787 }
8c4941c5 788 if (!css->cm_enabled) {
495a5b45
CH
789 free_page((unsigned long)css->cub_addr1);
790 free_page((unsigned long)css->cub_addr2);
791 }
792 mutex_unlock(&css->mutex);
793 free_page((unsigned long)secm_area);
794 return ret;
795}
796
e6b6e10a
PO
797int chsc_determine_channel_path_description(struct chp_id chpid,
798 struct channel_path_desc *desc)
1da177e4
LT
799{
800 int ccode, ret;
801
802 struct {
803 struct chsc_header request;
804 u32 : 24;
805 u32 first_chpid : 8;
806 u32 : 24;
807 u32 last_chpid : 8;
808 u32 zeroes1;
809 struct chsc_header response;
810 u32 zeroes2;
811 struct channel_path_desc desc;
0f008aa3 812 } __attribute__ ((packed)) *scpd_area;
1da177e4
LT
813
814 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
815 if (!scpd_area)
816 return -ENOMEM;
817
495a5b45
CH
818 scpd_area->request.length = 0x0010;
819 scpd_area->request.code = 0x0002;
1da177e4 820
f86635fa
PO
821 scpd_area->first_chpid = chpid.id;
822 scpd_area->last_chpid = chpid.id;
1da177e4
LT
823
824 ccode = chsc(scpd_area);
825 if (ccode > 0) {
826 ret = (ccode == 3) ? -ENODEV : -EBUSY;
827 goto out;
828 }
829
830 switch (scpd_area->response.code) {
831 case 0x0001: /* Success. */
832 memcpy(desc, &scpd_area->desc,
833 sizeof(struct channel_path_desc));
834 ret = 0;
835 break;
836 case 0x0003: /* Invalid block. */
837 case 0x0007: /* Invalid format. */
838 case 0x0008: /* Other invalid block. */
839 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
840 ret = -EINVAL;
841 break;
842 case 0x0004: /* Command not provided in model. */
843 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
844 ret = -EOPNOTSUPP;
845 break;
846 default:
847 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
848 scpd_area->response.code);
849 ret = -EIO;
850 }
851out:
852 free_page((unsigned long)scpd_area);
853 return ret;
854}
855
495a5b45
CH
856static void
857chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
858 struct cmg_chars *chars)
859{
860 switch (chp->cmg) {
861 case 2:
862 case 3:
863 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
864 GFP_KERNEL);
865 if (chp->cmg_chars) {
866 int i, mask;
867 struct cmg_chars *cmg_chars;
868
869 cmg_chars = chp->cmg_chars;
870 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
871 mask = 0x80 >> (i + 3);
872 if (cmcv & mask)
873 cmg_chars->values[i] = chars->values[i];
874 else
875 cmg_chars->values[i] = 0;
876 }
877 }
878 break;
879 default:
880 /* No cmg-dependent data. */
881 break;
882 }
883}
884
e6b6e10a 885int chsc_get_channel_measurement_chars(struct channel_path *chp)
495a5b45
CH
886{
887 int ccode, ret;
888
889 struct {
890 struct chsc_header request;
891 u32 : 24;
892 u32 first_chpid : 8;
893 u32 : 24;
894 u32 last_chpid : 8;
895 u32 zeroes1;
896 struct chsc_header response;
897 u32 zeroes2;
898 u32 not_valid : 1;
899 u32 shared : 1;
900 u32 : 22;
901 u32 chpid : 8;
902 u32 cmcv : 5;
903 u32 : 11;
904 u32 cmgq : 8;
905 u32 cmg : 8;
906 u32 zeroes3;
907 u32 data[NR_MEASUREMENT_CHARS];
0f008aa3 908 } __attribute__ ((packed)) *scmc_area;
495a5b45
CH
909
910 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
911 if (!scmc_area)
912 return -ENOMEM;
913
914 scmc_area->request.length = 0x0010;
915 scmc_area->request.code = 0x0022;
916
f86635fa
PO
917 scmc_area->first_chpid = chp->chpid.id;
918 scmc_area->last_chpid = chp->chpid.id;
495a5b45
CH
919
920 ccode = chsc(scmc_area);
921 if (ccode > 0) {
922 ret = (ccode == 3) ? -ENODEV : -EBUSY;
923 goto out;
924 }
925
926 switch (scmc_area->response.code) {
927 case 0x0001: /* Success. */
928 if (!scmc_area->not_valid) {
929 chp->cmg = scmc_area->cmg;
930 chp->shared = scmc_area->shared;
931 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
932 (struct cmg_chars *)
933 &scmc_area->data);
934 } else {
935 chp->cmg = -1;
936 chp->shared = -1;
937 }
938 ret = 0;
939 break;
940 case 0x0003: /* Invalid block. */
941 case 0x0007: /* Invalid format. */
942 case 0x0008: /* Invalid bit combination. */
943 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
944 ret = -EINVAL;
945 break;
946 case 0x0004: /* Command not provided. */
947 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
948 ret = -EOPNOTSUPP;
949 break;
950 default:
951 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
952 scmc_area->response.code);
953 ret = -EIO;
954 }
955out:
956 free_page((unsigned long)scmc_area);
957 return ret;
958}
959
4434a38c 960int __init chsc_alloc_sei_area(void)
1da177e4
LT
961{
962 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
963 if (!sei_page)
e556bbbd
CH
964 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
965 "chsc machine checks!\n");
1da177e4
LT
966 return (sei_page ? 0 : -ENOMEM);
967}
968
4434a38c
CH
969void __init chsc_free_sei_area(void)
970{
971 kfree(sei_page);
972}
973
fb6958a5
CH
974int __init
975chsc_enable_facility(int operation_code)
976{
977 int ret;
978 struct {
979 struct chsc_header request;
980 u8 reserved1:4;
981 u8 format:4;
982 u8 reserved2;
983 u16 operation_code;
984 u32 reserved3;
985 u32 reserved4;
986 u32 operation_data_area[252];
987 struct chsc_header response;
988 u32 reserved5:4;
989 u32 format2:4;
990 u32 reserved6:24;
0f008aa3 991 } __attribute__ ((packed)) *sda_area;
fb6958a5
CH
992
993 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
994 if (!sda_area)
995 return -ENOMEM;
495a5b45
CH
996 sda_area->request.length = 0x0400;
997 sda_area->request.code = 0x0031;
fb6958a5
CH
998 sda_area->operation_code = operation_code;
999
1000 ret = chsc(sda_area);
1001 if (ret > 0) {
1002 ret = (ret == 3) ? -ENODEV : -EBUSY;
1003 goto out;
1004 }
1005 switch (sda_area->response.code) {
15730ddb
CH
1006 case 0x0001: /* everything ok */
1007 ret = 0;
1008 break;
fb6958a5
CH
1009 case 0x0003: /* invalid request block */
1010 case 0x0007:
1011 ret = -EINVAL;
1012 break;
1013 case 0x0004: /* command not provided */
1014 case 0x0101: /* facility not provided */
1015 ret = -EOPNOTSUPP;
1016 break;
15730ddb
CH
1017 default: /* something went wrong */
1018 ret = -EIO;
fb6958a5
CH
1019 }
1020 out:
1021 free_page((unsigned long)sda_area);
1022 return ret;
1023}
1024
1da177e4
LT
1025struct css_general_char css_general_characteristics;
1026struct css_chsc_char css_chsc_characteristics;
1027
1028int __init
1029chsc_determine_css_characteristics(void)
1030{
1031 int result;
1032 struct {
1033 struct chsc_header request;
1034 u32 reserved1;
1035 u32 reserved2;
1036 u32 reserved3;
1037 struct chsc_header response;
1038 u32 reserved4;
1039 u32 general_char[510];
1040 u32 chsc_char[518];
0f008aa3 1041 } __attribute__ ((packed)) *scsc_area;
1da177e4
LT
1042
1043 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1044 if (!scsc_area) {
ceb3dfba 1045 CIO_MSG_EVENT(0, "Was not able to determine available "
e556bbbd 1046 "CHSCs due to no memory.\n");
1da177e4
LT
1047 return -ENOMEM;
1048 }
1049
495a5b45
CH
1050 scsc_area->request.length = 0x0010;
1051 scsc_area->request.code = 0x0010;
1da177e4
LT
1052
1053 result = chsc(scsc_area);
1054 if (result) {
e556bbbd
CH
1055 CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
1056 "cc=%i.\n", result);
1da177e4
LT
1057 result = -EIO;
1058 goto exit;
1059 }
1060
1061 if (scsc_area->response.code != 1) {
e556bbbd
CH
1062 CIO_MSG_EVENT(0, "Was not able to determine "
1063 "available CHSCs.\n");
1da177e4
LT
1064 result = -EIO;
1065 goto exit;
1066 }
1067 memcpy(&css_general_characteristics, scsc_area->general_char,
1068 sizeof(css_general_characteristics));
1069 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1070 sizeof(css_chsc_characteristics));
1071exit:
1072 free_page ((unsigned long) scsc_area);
1073 return result;
1074}
1075
1076EXPORT_SYMBOL_GPL(css_general_characteristics);
1077EXPORT_SYMBOL_GPL(css_chsc_characteristics);
This page took 0.515177 seconds and 5 git commands to generate.