Merge branch 'for-linus' of git://neil.brown.name/md
[deliverable/linux.git] / drivers / s390 / block / dasd_alias.c
1 /*
2 * PAV alias management for the DASD ECKD discipline
3 *
4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */
7
8 #define KMSG_COMPONENT "dasd"
9
10 #include <linux/list.h>
11 #include <asm/ebcdic.h>
12 #include "dasd_int.h"
13 #include "dasd_eckd.h"
14
15 #ifdef PRINTK_HEADER
16 #undef PRINTK_HEADER
17 #endif /* PRINTK_HEADER */
18 #define PRINTK_HEADER "dasd(eckd):"
19
20
21 /*
22 * General concept of alias management:
23 * - PAV and DASD alias management is specific to the eckd discipline.
24 * - A device is connected to an lcu as long as the device exists.
25 * dasd_alias_make_device_known_to_lcu will be called wenn the
26 * device is checked by the eckd discipline and
27 * dasd_alias_disconnect_device_from_lcu will be called
28 * before the device is deleted.
29 * - The dasd_alias_add_device / dasd_alias_remove_device
30 * functions mark the point when a device is 'ready for service'.
31 * - A summary unit check is a rare occasion, but it is mandatory to
32 * support it. It requires some complex recovery actions before the
33 * devices can be used again (see dasd_alias_handle_summary_unit_check).
34 * - dasd_alias_get_start_dev will find an alias device that can be used
35 * instead of the base device and does some (very simple) load balancing.
36 * This is the function that gets called for each I/O, so when improving
37 * something, this function should get faster or better, the rest has just
38 * to be correct.
39 */
40
41
42 static void summary_unit_check_handling_work(struct work_struct *);
43 static void lcu_update_work(struct work_struct *);
44 static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
45
46 static struct alias_root aliastree = {
47 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
48 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
49 };
50
51 static struct alias_server *_find_server(struct dasd_uid *uid)
52 {
53 struct alias_server *pos;
54 list_for_each_entry(pos, &aliastree.serverlist, server) {
55 if (!strncmp(pos->uid.vendor, uid->vendor,
56 sizeof(uid->vendor))
57 && !strncmp(pos->uid.serial, uid->serial,
58 sizeof(uid->serial)))
59 return pos;
60 };
61 return NULL;
62 }
63
64 static struct alias_lcu *_find_lcu(struct alias_server *server,
65 struct dasd_uid *uid)
66 {
67 struct alias_lcu *pos;
68 list_for_each_entry(pos, &server->lculist, lcu) {
69 if (pos->uid.ssid == uid->ssid)
70 return pos;
71 };
72 return NULL;
73 }
74
75 static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
76 struct dasd_uid *uid)
77 {
78 struct alias_pav_group *pos;
79 __u8 search_unit_addr;
80
81 /* for hyper pav there is only one group */
82 if (lcu->pav == HYPER_PAV) {
83 if (list_empty(&lcu->grouplist))
84 return NULL;
85 else
86 return list_first_entry(&lcu->grouplist,
87 struct alias_pav_group, group);
88 }
89
90 /* for base pav we have to find the group that matches the base */
91 if (uid->type == UA_BASE_DEVICE)
92 search_unit_addr = uid->real_unit_addr;
93 else
94 search_unit_addr = uid->base_unit_addr;
95 list_for_each_entry(pos, &lcu->grouplist, group) {
96 if (pos->uid.base_unit_addr == search_unit_addr &&
97 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
98 return pos;
99 };
100 return NULL;
101 }
102
103 static struct alias_server *_allocate_server(struct dasd_uid *uid)
104 {
105 struct alias_server *server;
106
107 server = kzalloc(sizeof(*server), GFP_KERNEL);
108 if (!server)
109 return ERR_PTR(-ENOMEM);
110 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
111 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
112 INIT_LIST_HEAD(&server->server);
113 INIT_LIST_HEAD(&server->lculist);
114 return server;
115 }
116
117 static void _free_server(struct alias_server *server)
118 {
119 kfree(server);
120 }
121
122 static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
123 {
124 struct alias_lcu *lcu;
125
126 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
127 if (!lcu)
128 return ERR_PTR(-ENOMEM);
129 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
130 if (!lcu->uac)
131 goto out_err1;
132 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
133 if (!lcu->rsu_cqr)
134 goto out_err2;
135 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
136 GFP_KERNEL | GFP_DMA);
137 if (!lcu->rsu_cqr->cpaddr)
138 goto out_err3;
139 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
140 if (!lcu->rsu_cqr->data)
141 goto out_err4;
142
143 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
144 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
145 lcu->uid.ssid = uid->ssid;
146 lcu->pav = NO_PAV;
147 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
148 INIT_LIST_HEAD(&lcu->lcu);
149 INIT_LIST_HEAD(&lcu->inactive_devices);
150 INIT_LIST_HEAD(&lcu->active_devices);
151 INIT_LIST_HEAD(&lcu->grouplist);
152 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
153 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
154 spin_lock_init(&lcu->lock);
155 return lcu;
156
157 out_err4:
158 kfree(lcu->rsu_cqr->cpaddr);
159 out_err3:
160 kfree(lcu->rsu_cqr);
161 out_err2:
162 kfree(lcu->uac);
163 out_err1:
164 kfree(lcu);
165 return ERR_PTR(-ENOMEM);
166 }
167
168 static void _free_lcu(struct alias_lcu *lcu)
169 {
170 kfree(lcu->rsu_cqr->data);
171 kfree(lcu->rsu_cqr->cpaddr);
172 kfree(lcu->rsu_cqr);
173 kfree(lcu->uac);
174 kfree(lcu);
175 }
176
177 /*
178 * This is the function that will allocate all the server and lcu data,
179 * so this function must be called first for a new device.
180 * If the return value is 1, the lcu was already known before, if it
181 * is 0, this is a new lcu.
182 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
183 */
184 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
185 {
186 struct dasd_eckd_private *private;
187 unsigned long flags;
188 struct alias_server *server, *newserver;
189 struct alias_lcu *lcu, *newlcu;
190 int is_lcu_known;
191 struct dasd_uid *uid;
192
193 private = (struct dasd_eckd_private *) device->private;
194 uid = &private->uid;
195 spin_lock_irqsave(&aliastree.lock, flags);
196 is_lcu_known = 1;
197 server = _find_server(uid);
198 if (!server) {
199 spin_unlock_irqrestore(&aliastree.lock, flags);
200 newserver = _allocate_server(uid);
201 if (IS_ERR(newserver))
202 return PTR_ERR(newserver);
203 spin_lock_irqsave(&aliastree.lock, flags);
204 server = _find_server(uid);
205 if (!server) {
206 list_add(&newserver->server, &aliastree.serverlist);
207 server = newserver;
208 is_lcu_known = 0;
209 } else {
210 /* someone was faster */
211 _free_server(newserver);
212 }
213 }
214
215 lcu = _find_lcu(server, uid);
216 if (!lcu) {
217 spin_unlock_irqrestore(&aliastree.lock, flags);
218 newlcu = _allocate_lcu(uid);
219 if (IS_ERR(newlcu))
220 return PTR_ERR(lcu);
221 spin_lock_irqsave(&aliastree.lock, flags);
222 lcu = _find_lcu(server, uid);
223 if (!lcu) {
224 list_add(&newlcu->lcu, &server->lculist);
225 lcu = newlcu;
226 is_lcu_known = 0;
227 } else {
228 /* someone was faster */
229 _free_lcu(newlcu);
230 }
231 is_lcu_known = 0;
232 }
233 spin_lock(&lcu->lock);
234 list_add(&device->alias_list, &lcu->inactive_devices);
235 private->lcu = lcu;
236 spin_unlock(&lcu->lock);
237 spin_unlock_irqrestore(&aliastree.lock, flags);
238
239 return is_lcu_known;
240 }
241
242 /*
243 * This function removes a device from the scope of alias management.
244 * The complicated part is to make sure that it is not in use by
245 * any of the workers. If necessary cancel the work.
246 */
247 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
248 {
249 struct dasd_eckd_private *private;
250 unsigned long flags;
251 struct alias_lcu *lcu;
252 struct alias_server *server;
253 int was_pending;
254
255 private = (struct dasd_eckd_private *) device->private;
256 lcu = private->lcu;
257 spin_lock_irqsave(&lcu->lock, flags);
258 list_del_init(&device->alias_list);
259 /* make sure that the workers don't use this device */
260 if (device == lcu->suc_data.device) {
261 spin_unlock_irqrestore(&lcu->lock, flags);
262 cancel_work_sync(&lcu->suc_data.worker);
263 spin_lock_irqsave(&lcu->lock, flags);
264 if (device == lcu->suc_data.device)
265 lcu->suc_data.device = NULL;
266 }
267 was_pending = 0;
268 if (device == lcu->ruac_data.device) {
269 spin_unlock_irqrestore(&lcu->lock, flags);
270 was_pending = 1;
271 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
272 spin_lock_irqsave(&lcu->lock, flags);
273 if (device == lcu->ruac_data.device)
274 lcu->ruac_data.device = NULL;
275 }
276 private->lcu = NULL;
277 spin_unlock_irqrestore(&lcu->lock, flags);
278
279 spin_lock_irqsave(&aliastree.lock, flags);
280 spin_lock(&lcu->lock);
281 if (list_empty(&lcu->grouplist) &&
282 list_empty(&lcu->active_devices) &&
283 list_empty(&lcu->inactive_devices)) {
284 list_del(&lcu->lcu);
285 spin_unlock(&lcu->lock);
286 _free_lcu(lcu);
287 lcu = NULL;
288 } else {
289 if (was_pending)
290 _schedule_lcu_update(lcu, NULL);
291 spin_unlock(&lcu->lock);
292 }
293 server = _find_server(&private->uid);
294 if (server && list_empty(&server->lculist)) {
295 list_del(&server->server);
296 _free_server(server);
297 }
298 spin_unlock_irqrestore(&aliastree.lock, flags);
299 }
300
301 /*
302 * This function assumes that the unit address configuration stored
303 * in the lcu is up to date and will update the device uid before
304 * adding it to a pav group.
305 */
306 static int _add_device_to_lcu(struct alias_lcu *lcu,
307 struct dasd_device *device)
308 {
309
310 struct dasd_eckd_private *private;
311 struct alias_pav_group *group;
312 struct dasd_uid *uid;
313
314 private = (struct dasd_eckd_private *) device->private;
315 uid = &private->uid;
316 uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
317 uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
318 dasd_set_uid(device->cdev, &private->uid);
319
320 /* if we have no PAV anyway, we don't need to bother with PAV groups */
321 if (lcu->pav == NO_PAV) {
322 list_move(&device->alias_list, &lcu->active_devices);
323 return 0;
324 }
325
326 group = _find_group(lcu, uid);
327 if (!group) {
328 group = kzalloc(sizeof(*group), GFP_ATOMIC);
329 if (!group)
330 return -ENOMEM;
331 memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
332 memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
333 group->uid.ssid = uid->ssid;
334 if (uid->type == UA_BASE_DEVICE)
335 group->uid.base_unit_addr = uid->real_unit_addr;
336 else
337 group->uid.base_unit_addr = uid->base_unit_addr;
338 memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
339 INIT_LIST_HEAD(&group->group);
340 INIT_LIST_HEAD(&group->baselist);
341 INIT_LIST_HEAD(&group->aliaslist);
342 list_add(&group->group, &lcu->grouplist);
343 }
344 if (uid->type == UA_BASE_DEVICE)
345 list_move(&device->alias_list, &group->baselist);
346 else
347 list_move(&device->alias_list, &group->aliaslist);
348 private->pavgroup = group;
349 return 0;
350 };
351
352 static void _remove_device_from_lcu(struct alias_lcu *lcu,
353 struct dasd_device *device)
354 {
355 struct dasd_eckd_private *private;
356 struct alias_pav_group *group;
357
358 private = (struct dasd_eckd_private *) device->private;
359 list_move(&device->alias_list, &lcu->inactive_devices);
360 group = private->pavgroup;
361 if (!group)
362 return;
363 private->pavgroup = NULL;
364 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
365 list_del(&group->group);
366 kfree(group);
367 return;
368 }
369 if (group->next == device)
370 group->next = NULL;
371 };
372
373 static int read_unit_address_configuration(struct dasd_device *device,
374 struct alias_lcu *lcu)
375 {
376 struct dasd_psf_prssd_data *prssdp;
377 struct dasd_ccw_req *cqr;
378 struct ccw1 *ccw;
379 int rc;
380 unsigned long flags;
381
382 cqr = dasd_kmalloc_request("ECKD",
383 1 /* PSF */ + 1 /* RSSD */ ,
384 (sizeof(struct dasd_psf_prssd_data)),
385 device);
386 if (IS_ERR(cqr))
387 return PTR_ERR(cqr);
388 cqr->startdev = device;
389 cqr->memdev = device;
390 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
391 cqr->retries = 10;
392 cqr->expires = 20 * HZ;
393
394 /* Prepare for Read Subsystem Data */
395 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
396 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
397 prssdp->order = PSF_ORDER_PRSSD;
398 prssdp->suborder = 0x0e; /* Read unit address configuration */
399 /* all other bytes of prssdp must be zero */
400
401 ccw = cqr->cpaddr;
402 ccw->cmd_code = DASD_ECKD_CCW_PSF;
403 ccw->count = sizeof(struct dasd_psf_prssd_data);
404 ccw->flags |= CCW_FLAG_CC;
405 ccw->cda = (__u32)(addr_t) prssdp;
406
407 /* Read Subsystem Data - feature codes */
408 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
409
410 ccw++;
411 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
412 ccw->count = sizeof(*(lcu->uac));
413 ccw->cda = (__u32)(addr_t) lcu->uac;
414
415 cqr->buildclk = get_clock();
416 cqr->status = DASD_CQR_FILLED;
417
418 /* need to unset flag here to detect race with summary unit check */
419 spin_lock_irqsave(&lcu->lock, flags);
420 lcu->flags &= ~NEED_UAC_UPDATE;
421 spin_unlock_irqrestore(&lcu->lock, flags);
422
423 do {
424 rc = dasd_sleep_on(cqr);
425 } while (rc && (cqr->retries > 0));
426 if (rc) {
427 spin_lock_irqsave(&lcu->lock, flags);
428 lcu->flags |= NEED_UAC_UPDATE;
429 spin_unlock_irqrestore(&lcu->lock, flags);
430 }
431 dasd_kfree_request(cqr, cqr->memdev);
432 return rc;
433 }
434
435 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
436 {
437 unsigned long flags;
438 struct alias_pav_group *pavgroup, *tempgroup;
439 struct dasd_device *device, *tempdev;
440 int i, rc;
441 struct dasd_eckd_private *private;
442
443 spin_lock_irqsave(&lcu->lock, flags);
444 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
445 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
446 alias_list) {
447 list_move(&device->alias_list, &lcu->active_devices);
448 private = (struct dasd_eckd_private *) device->private;
449 private->pavgroup = NULL;
450 }
451 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
452 alias_list) {
453 list_move(&device->alias_list, &lcu->active_devices);
454 private = (struct dasd_eckd_private *) device->private;
455 private->pavgroup = NULL;
456 }
457 list_del(&pavgroup->group);
458 kfree(pavgroup);
459 }
460 spin_unlock_irqrestore(&lcu->lock, flags);
461
462 rc = read_unit_address_configuration(refdev, lcu);
463 if (rc)
464 return rc;
465
466 spin_lock_irqsave(&lcu->lock, flags);
467 lcu->pav = NO_PAV;
468 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
469 switch (lcu->uac->unit[i].ua_type) {
470 case UA_BASE_PAV_ALIAS:
471 lcu->pav = BASE_PAV;
472 break;
473 case UA_HYPER_PAV_ALIAS:
474 lcu->pav = HYPER_PAV;
475 break;
476 }
477 if (lcu->pav != NO_PAV)
478 break;
479 }
480
481 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
482 alias_list) {
483 _add_device_to_lcu(lcu, device);
484 }
485 spin_unlock_irqrestore(&lcu->lock, flags);
486 return 0;
487 }
488
489 static void lcu_update_work(struct work_struct *work)
490 {
491 struct alias_lcu *lcu;
492 struct read_uac_work_data *ruac_data;
493 struct dasd_device *device;
494 unsigned long flags;
495 int rc;
496
497 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
498 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
499 device = ruac_data->device;
500 rc = _lcu_update(device, lcu);
501 /*
502 * Need to check flags again, as there could have been another
503 * prepare_update or a new device a new device while we were still
504 * processing the data
505 */
506 spin_lock_irqsave(&lcu->lock, flags);
507 if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
508 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
509 " alias data in lcu (rc = %d), retry later", rc);
510 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
511 } else {
512 lcu->ruac_data.device = NULL;
513 lcu->flags &= ~UPDATE_PENDING;
514 }
515 spin_unlock_irqrestore(&lcu->lock, flags);
516 }
517
518 static int _schedule_lcu_update(struct alias_lcu *lcu,
519 struct dasd_device *device)
520 {
521 struct dasd_device *usedev = NULL;
522 struct alias_pav_group *group;
523
524 lcu->flags |= NEED_UAC_UPDATE;
525 if (lcu->ruac_data.device) {
526 /* already scheduled or running */
527 return 0;
528 }
529 if (device && !list_empty(&device->alias_list))
530 usedev = device;
531
532 if (!usedev && !list_empty(&lcu->grouplist)) {
533 group = list_first_entry(&lcu->grouplist,
534 struct alias_pav_group, group);
535 if (!list_empty(&group->baselist))
536 usedev = list_first_entry(&group->baselist,
537 struct dasd_device,
538 alias_list);
539 else if (!list_empty(&group->aliaslist))
540 usedev = list_first_entry(&group->aliaslist,
541 struct dasd_device,
542 alias_list);
543 }
544 if (!usedev && !list_empty(&lcu->active_devices)) {
545 usedev = list_first_entry(&lcu->active_devices,
546 struct dasd_device, alias_list);
547 }
548 /*
549 * if we haven't found a proper device yet, give up for now, the next
550 * device that will be set active will trigger an lcu update
551 */
552 if (!usedev)
553 return -EINVAL;
554 lcu->ruac_data.device = usedev;
555 schedule_delayed_work(&lcu->ruac_data.dwork, 0);
556 return 0;
557 }
558
559 int dasd_alias_add_device(struct dasd_device *device)
560 {
561 struct dasd_eckd_private *private;
562 struct alias_lcu *lcu;
563 unsigned long flags;
564 int rc;
565
566 private = (struct dasd_eckd_private *) device->private;
567 lcu = private->lcu;
568 rc = 0;
569 spin_lock_irqsave(&lcu->lock, flags);
570 if (!(lcu->flags & UPDATE_PENDING)) {
571 rc = _add_device_to_lcu(lcu, device);
572 if (rc)
573 lcu->flags |= UPDATE_PENDING;
574 }
575 if (lcu->flags & UPDATE_PENDING) {
576 list_move(&device->alias_list, &lcu->active_devices);
577 _schedule_lcu_update(lcu, device);
578 }
579 spin_unlock_irqrestore(&lcu->lock, flags);
580 return rc;
581 }
582
583 int dasd_alias_remove_device(struct dasd_device *device)
584 {
585 struct dasd_eckd_private *private;
586 struct alias_lcu *lcu;
587 unsigned long flags;
588
589 private = (struct dasd_eckd_private *) device->private;
590 lcu = private->lcu;
591 spin_lock_irqsave(&lcu->lock, flags);
592 _remove_device_from_lcu(lcu, device);
593 spin_unlock_irqrestore(&lcu->lock, flags);
594 return 0;
595 }
596
597 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
598 {
599
600 struct dasd_device *alias_device;
601 struct alias_pav_group *group;
602 struct alias_lcu *lcu;
603 struct dasd_eckd_private *private, *alias_priv;
604 unsigned long flags;
605
606 private = (struct dasd_eckd_private *) base_device->private;
607 group = private->pavgroup;
608 lcu = private->lcu;
609 if (!group || !lcu)
610 return NULL;
611 if (lcu->pav == NO_PAV ||
612 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
613 return NULL;
614
615 spin_lock_irqsave(&lcu->lock, flags);
616 alias_device = group->next;
617 if (!alias_device) {
618 if (list_empty(&group->aliaslist)) {
619 spin_unlock_irqrestore(&lcu->lock, flags);
620 return NULL;
621 } else {
622 alias_device = list_first_entry(&group->aliaslist,
623 struct dasd_device,
624 alias_list);
625 }
626 }
627 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
628 group->next = list_first_entry(&group->aliaslist,
629 struct dasd_device, alias_list);
630 else
631 group->next = list_first_entry(&alias_device->alias_list,
632 struct dasd_device, alias_list);
633 spin_unlock_irqrestore(&lcu->lock, flags);
634 alias_priv = (struct dasd_eckd_private *) alias_device->private;
635 if ((alias_priv->count < private->count) && !alias_device->stopped)
636 return alias_device;
637 else
638 return NULL;
639 }
640
641 /*
642 * Summary unit check handling depends on the way alias devices
643 * are handled so it is done here rather then in dasd_eckd.c
644 */
645 static int reset_summary_unit_check(struct alias_lcu *lcu,
646 struct dasd_device *device,
647 char reason)
648 {
649 struct dasd_ccw_req *cqr;
650 int rc = 0;
651 struct ccw1 *ccw;
652
653 cqr = lcu->rsu_cqr;
654 strncpy((char *) &cqr->magic, "ECKD", 4);
655 ASCEBC((char *) &cqr->magic, 4);
656 ccw = cqr->cpaddr;
657 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
658 ccw->flags = 0 ;
659 ccw->count = 16;
660 ccw->cda = (__u32)(addr_t) cqr->data;
661 ((char *)cqr->data)[0] = reason;
662
663 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
664 cqr->retries = 255; /* set retry counter to enable basic ERP */
665 cqr->startdev = device;
666 cqr->memdev = device;
667 cqr->block = NULL;
668 cqr->expires = 5 * HZ;
669 cqr->buildclk = get_clock();
670 cqr->status = DASD_CQR_FILLED;
671
672 rc = dasd_sleep_on_immediatly(cqr);
673 return rc;
674 }
675
676 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
677 {
678 struct alias_pav_group *pavgroup;
679 struct dasd_device *device;
680 struct dasd_eckd_private *private;
681
682 /* active and inactive list can contain alias as well as base devices */
683 list_for_each_entry(device, &lcu->active_devices, alias_list) {
684 private = (struct dasd_eckd_private *) device->private;
685 if (private->uid.type != UA_BASE_DEVICE)
686 continue;
687 dasd_schedule_block_bh(device->block);
688 dasd_schedule_device_bh(device);
689 }
690 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
691 private = (struct dasd_eckd_private *) device->private;
692 if (private->uid.type != UA_BASE_DEVICE)
693 continue;
694 dasd_schedule_block_bh(device->block);
695 dasd_schedule_device_bh(device);
696 }
697 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
698 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
699 dasd_schedule_block_bh(device->block);
700 dasd_schedule_device_bh(device);
701 }
702 }
703 }
704
705 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
706 {
707 struct alias_pav_group *pavgroup;
708 struct dasd_device *device, *temp;
709 struct dasd_eckd_private *private;
710 int rc;
711 unsigned long flags;
712 LIST_HEAD(active);
713
714 /*
715 * Problem here ist that dasd_flush_device_queue may wait
716 * for termination of a request to complete. We can't keep
717 * the lcu lock during that time, so we must assume that
718 * the lists may have changed.
719 * Idea: first gather all active alias devices in a separate list,
720 * then flush the first element of this list unlocked, and afterwards
721 * check if it is still on the list before moving it to the
722 * active_devices list.
723 */
724
725 spin_lock_irqsave(&lcu->lock, flags);
726 list_for_each_entry_safe(device, temp, &lcu->active_devices,
727 alias_list) {
728 private = (struct dasd_eckd_private *) device->private;
729 if (private->uid.type == UA_BASE_DEVICE)
730 continue;
731 list_move(&device->alias_list, &active);
732 }
733
734 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
735 list_splice_init(&pavgroup->aliaslist, &active);
736 }
737 while (!list_empty(&active)) {
738 device = list_first_entry(&active, struct dasd_device,
739 alias_list);
740 spin_unlock_irqrestore(&lcu->lock, flags);
741 rc = dasd_flush_device_queue(device);
742 spin_lock_irqsave(&lcu->lock, flags);
743 /*
744 * only move device around if it wasn't moved away while we
745 * were waiting for the flush
746 */
747 if (device == list_first_entry(&active,
748 struct dasd_device, alias_list))
749 list_move(&device->alias_list, &lcu->active_devices);
750 }
751 spin_unlock_irqrestore(&lcu->lock, flags);
752 }
753
754 static void __stop_device_on_lcu(struct dasd_device *device,
755 struct dasd_device *pos)
756 {
757 /* If pos == device then device is already locked! */
758 if (pos == device) {
759 pos->stopped |= DASD_STOPPED_SU;
760 return;
761 }
762 spin_lock(get_ccwdev_lock(pos->cdev));
763 pos->stopped |= DASD_STOPPED_SU;
764 spin_unlock(get_ccwdev_lock(pos->cdev));
765 }
766
767 /*
768 * This function is called in interrupt context, so the
769 * cdev lock for device is already locked!
770 */
771 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
772 struct dasd_device *device)
773 {
774 struct alias_pav_group *pavgroup;
775 struct dasd_device *pos;
776
777 list_for_each_entry(pos, &lcu->active_devices, alias_list)
778 __stop_device_on_lcu(device, pos);
779 list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
780 __stop_device_on_lcu(device, pos);
781 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
782 list_for_each_entry(pos, &pavgroup->baselist, alias_list)
783 __stop_device_on_lcu(device, pos);
784 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
785 __stop_device_on_lcu(device, pos);
786 }
787 }
788
789 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
790 {
791 struct alias_pav_group *pavgroup;
792 struct dasd_device *device;
793 unsigned long flags;
794
795 list_for_each_entry(device, &lcu->active_devices, alias_list) {
796 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
797 device->stopped &= ~DASD_STOPPED_SU;
798 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
799 }
800
801 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
802 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
803 device->stopped &= ~DASD_STOPPED_SU;
804 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
805 }
806
807 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
808 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
809 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
810 device->stopped &= ~DASD_STOPPED_SU;
811 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
812 flags);
813 }
814 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
815 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
816 device->stopped &= ~DASD_STOPPED_SU;
817 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
818 flags);
819 }
820 }
821 }
822
823 static void summary_unit_check_handling_work(struct work_struct *work)
824 {
825 struct alias_lcu *lcu;
826 struct summary_unit_check_work_data *suc_data;
827 unsigned long flags;
828 struct dasd_device *device;
829
830 suc_data = container_of(work, struct summary_unit_check_work_data,
831 worker);
832 lcu = container_of(suc_data, struct alias_lcu, suc_data);
833 device = suc_data->device;
834
835 /* 1. flush alias devices */
836 flush_all_alias_devices_on_lcu(lcu);
837
838 /* 2. reset summary unit check */
839 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
840 device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
841 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
842 reset_summary_unit_check(lcu, device, suc_data->reason);
843
844 spin_lock_irqsave(&lcu->lock, flags);
845 _unstop_all_devices_on_lcu(lcu);
846 _restart_all_base_devices_on_lcu(lcu);
847 /* 3. read new alias configuration */
848 _schedule_lcu_update(lcu, device);
849 lcu->suc_data.device = NULL;
850 spin_unlock_irqrestore(&lcu->lock, flags);
851 }
852
853 /*
854 * note: this will be called from int handler context (cdev locked)
855 */
856 void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
857 struct irb *irb)
858 {
859 struct alias_lcu *lcu;
860 char reason;
861 struct dasd_eckd_private *private;
862 char *sense;
863
864 private = (struct dasd_eckd_private *) device->private;
865
866 sense = dasd_get_sense(irb);
867 if (sense) {
868 reason = sense[8];
869 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
870 "eckd handle summary unit check: reason", reason);
871 } else {
872 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
873 "eckd handle summary unit check:"
874 " no reason code available");
875 return;
876 }
877
878 lcu = private->lcu;
879 if (!lcu) {
880 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
881 "device not ready to handle summary"
882 " unit check (no lcu structure)");
883 return;
884 }
885 spin_lock(&lcu->lock);
886 _stop_all_devices_on_lcu(lcu, device);
887 /* prepare for lcu_update */
888 private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
889 /* If this device is about to be removed just return and wait for
890 * the next interrupt on a different device
891 */
892 if (list_empty(&device->alias_list)) {
893 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
894 "device is in offline processing,"
895 " don't do summary unit check handling");
896 spin_unlock(&lcu->lock);
897 return;
898 }
899 if (lcu->suc_data.device) {
900 /* already scheduled or running */
901 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
902 "previous instance of summary unit check worker"
903 " still pending");
904 spin_unlock(&lcu->lock);
905 return ;
906 }
907 lcu->suc_data.reason = reason;
908 lcu->suc_data.device = device;
909 spin_unlock(&lcu->lock);
910 schedule_work(&lcu->suc_data.worker);
911 };
This page took 0.049919 seconds and 5 git commands to generate.