sparc: expand cpu table
[deliverable/linux.git] / arch / sparc / kernel / viohs.c
1 /* viohs.c: LDOM Virtual I/O handshake helper layer.
2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/string.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12
13 #include <asm/ldc.h>
14 #include <asm/vio.h>
15
16 int vio_ldc_send(struct vio_driver_state *vio, void *data, int len)
17 {
18 int err, limit = 1000;
19
20 err = -EINVAL;
21 while (limit-- > 0) {
22 err = ldc_write(vio->lp, data, len);
23 if (!err || (err != -EAGAIN))
24 break;
25 udelay(1);
26 }
27
28 return err;
29 }
30 EXPORT_SYMBOL(vio_ldc_send);
31
32 static int send_ctrl(struct vio_driver_state *vio,
33 struct vio_msg_tag *tag, int len)
34 {
35 tag->sid = vio_send_sid(vio);
36 return vio_ldc_send(vio, tag, len);
37 }
38
39 static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env)
40 {
41 tag->type = type;
42 tag->stype = stype;
43 tag->stype_env = stype_env;
44 }
45
46 static int send_version(struct vio_driver_state *vio, u16 major, u16 minor)
47 {
48 struct vio_ver_info pkt;
49
50 vio->_local_sid = (u32) sched_clock();
51
52 memset(&pkt, 0, sizeof(pkt));
53 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO);
54 pkt.major = major;
55 pkt.minor = minor;
56 pkt.dev_class = vio->dev_class;
57
58 viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n",
59 major, minor, vio->dev_class);
60
61 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
62 }
63
64 static int start_handshake(struct vio_driver_state *vio)
65 {
66 int err;
67
68 viodbg(HS, "START HANDSHAKE\n");
69
70 vio->hs_state = VIO_HS_INVALID;
71
72 err = send_version(vio,
73 vio->ver_table[0].major,
74 vio->ver_table[0].minor);
75 if (err < 0)
76 return err;
77
78 return 0;
79 }
80
81 static void flush_rx_dring(struct vio_driver_state *vio)
82 {
83 struct vio_dring_state *dr;
84 u64 ident;
85
86 BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
87
88 dr = &vio->drings[VIO_DRIVER_RX_RING];
89 ident = dr->ident;
90
91 BUG_ON(!vio->desc_buf);
92 kfree(vio->desc_buf);
93 vio->desc_buf = NULL;
94
95 memset(dr, 0, sizeof(*dr));
96 dr->ident = ident;
97 }
98
99 void vio_link_state_change(struct vio_driver_state *vio, int event)
100 {
101 if (event == LDC_EVENT_UP) {
102 vio->hs_state = VIO_HS_INVALID;
103
104 switch (vio->dev_class) {
105 case VDEV_NETWORK:
106 case VDEV_NETWORK_SWITCH:
107 vio->dr_state = (VIO_DR_STATE_TXREQ |
108 VIO_DR_STATE_RXREQ);
109 break;
110
111 case VDEV_DISK:
112 vio->dr_state = VIO_DR_STATE_TXREQ;
113 break;
114 case VDEV_DISK_SERVER:
115 vio->dr_state = VIO_DR_STATE_RXREQ;
116 break;
117 }
118 start_handshake(vio);
119 } else if (event == LDC_EVENT_RESET) {
120 vio->hs_state = VIO_HS_INVALID;
121
122 if (vio->dr_state & VIO_DR_STATE_RXREG)
123 flush_rx_dring(vio);
124
125 vio->dr_state = 0x00;
126 memset(&vio->ver, 0, sizeof(vio->ver));
127
128 ldc_disconnect(vio->lp);
129 }
130 }
131 EXPORT_SYMBOL(vio_link_state_change);
132
133 static int handshake_failure(struct vio_driver_state *vio)
134 {
135 struct vio_dring_state *dr;
136
137 /* XXX Put policy here... Perhaps start a timer to fire
138 * XXX in 100 ms, which will bring the link up and retry
139 * XXX the handshake.
140 */
141
142 viodbg(HS, "HANDSHAKE FAILURE\n");
143
144 vio->dr_state &= ~(VIO_DR_STATE_TXREG |
145 VIO_DR_STATE_RXREG);
146
147 dr = &vio->drings[VIO_DRIVER_RX_RING];
148 memset(dr, 0, sizeof(*dr));
149
150 kfree(vio->desc_buf);
151 vio->desc_buf = NULL;
152 vio->desc_buf_len = 0;
153
154 vio->hs_state = VIO_HS_INVALID;
155
156 return -ECONNRESET;
157 }
158
159 static int process_unknown(struct vio_driver_state *vio, void *arg)
160 {
161 struct vio_msg_tag *pkt = arg;
162
163 viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n",
164 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
165
166 printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n",
167 vio->vdev->channel_id);
168
169 ldc_disconnect(vio->lp);
170
171 return -ECONNRESET;
172 }
173
174 static int send_dreg(struct vio_driver_state *vio)
175 {
176 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING];
177 union {
178 struct vio_dring_register pkt;
179 char all[sizeof(struct vio_dring_register) +
180 (sizeof(struct ldc_trans_cookie) *
181 dr->ncookies)];
182 } u;
183 int i;
184
185 memset(&u, 0, sizeof(u));
186 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
187 u.pkt.dring_ident = 0;
188 u.pkt.num_descr = dr->num_entries;
189 u.pkt.descr_size = dr->entry_size;
190 u.pkt.options = VIO_TX_DRING;
191 u.pkt.num_cookies = dr->ncookies;
192
193 viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] "
194 "ncookies[%u]\n",
195 u.pkt.num_descr, u.pkt.descr_size, u.pkt.options,
196 u.pkt.num_cookies);
197
198 for (i = 0; i < dr->ncookies; i++) {
199 u.pkt.cookies[i] = dr->cookies[i];
200
201 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
202 i,
203 (unsigned long long) u.pkt.cookies[i].cookie_addr,
204 (unsigned long long) u.pkt.cookies[i].cookie_size);
205 }
206
207 return send_ctrl(vio, &u.pkt.tag, sizeof(u));
208 }
209
210 static int send_rdx(struct vio_driver_state *vio)
211 {
212 struct vio_rdx pkt;
213
214 memset(&pkt, 0, sizeof(pkt));
215
216 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX);
217
218 viodbg(HS, "SEND RDX INFO\n");
219
220 return send_ctrl(vio, &pkt.tag, sizeof(pkt));
221 }
222
223 static int send_attr(struct vio_driver_state *vio)
224 {
225 return vio->ops->send_attr(vio);
226 }
227
228 static struct vio_version *find_by_major(struct vio_driver_state *vio,
229 u16 major)
230 {
231 struct vio_version *ret = NULL;
232 int i;
233
234 for (i = 0; i < vio->ver_table_entries; i++) {
235 struct vio_version *v = &vio->ver_table[i];
236 if (v->major <= major) {
237 ret = v;
238 break;
239 }
240 }
241 return ret;
242 }
243
244 static int process_ver_info(struct vio_driver_state *vio,
245 struct vio_ver_info *pkt)
246 {
247 struct vio_version *vap;
248 int err;
249
250 viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n",
251 pkt->major, pkt->minor, pkt->dev_class);
252
253 if (vio->hs_state != VIO_HS_INVALID) {
254 /* XXX Perhaps invoke start_handshake? XXX */
255 memset(&vio->ver, 0, sizeof(vio->ver));
256 vio->hs_state = VIO_HS_INVALID;
257 }
258
259 vap = find_by_major(vio, pkt->major);
260
261 vio->_peer_sid = pkt->tag.sid;
262
263 if (!vap) {
264 pkt->tag.stype = VIO_SUBTYPE_NACK;
265 pkt->major = 0;
266 pkt->minor = 0;
267 viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n");
268 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
269 } else if (vap->major != pkt->major) {
270 pkt->tag.stype = VIO_SUBTYPE_NACK;
271 pkt->major = vap->major;
272 pkt->minor = vap->minor;
273 viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n",
274 pkt->major, pkt->minor);
275 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
276 } else {
277 struct vio_version ver = {
278 .major = pkt->major,
279 .minor = pkt->minor,
280 };
281 if (ver.minor > vap->minor)
282 ver.minor = vap->minor;
283 pkt->minor = ver.minor;
284 pkt->tag.stype = VIO_SUBTYPE_ACK;
285 viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
286 pkt->major, pkt->minor);
287 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
288 if (err > 0) {
289 vio->ver = ver;
290 vio->hs_state = VIO_HS_GOTVERS;
291 }
292 }
293 if (err < 0)
294 return handshake_failure(vio);
295
296 return 0;
297 }
298
299 static int process_ver_ack(struct vio_driver_state *vio,
300 struct vio_ver_info *pkt)
301 {
302 viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n",
303 pkt->major, pkt->minor, pkt->dev_class);
304
305 if (vio->hs_state & VIO_HS_GOTVERS) {
306 if (vio->ver.major != pkt->major ||
307 vio->ver.minor != pkt->minor) {
308 pkt->tag.stype = VIO_SUBTYPE_NACK;
309 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
310 return handshake_failure(vio);
311 }
312 } else {
313 vio->ver.major = pkt->major;
314 vio->ver.minor = pkt->minor;
315 vio->hs_state = VIO_HS_GOTVERS;
316 }
317
318 switch (vio->dev_class) {
319 case VDEV_NETWORK:
320 case VDEV_DISK:
321 if (send_attr(vio) < 0)
322 return handshake_failure(vio);
323 break;
324
325 default:
326 break;
327 }
328
329 return 0;
330 }
331
332 static int process_ver_nack(struct vio_driver_state *vio,
333 struct vio_ver_info *pkt)
334 {
335 struct vio_version *nver;
336
337 viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
338 pkt->major, pkt->minor, pkt->dev_class);
339
340 if ((pkt->major == 0 && pkt->minor == 0) ||
341 !(nver = find_by_major(vio, pkt->major)))
342 return handshake_failure(vio);
343
344 if (send_version(vio, nver->major, nver->minor) < 0)
345 return handshake_failure(vio);
346
347 return 0;
348 }
349
350 static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
351 {
352 switch (pkt->tag.stype) {
353 case VIO_SUBTYPE_INFO:
354 return process_ver_info(vio, pkt);
355
356 case VIO_SUBTYPE_ACK:
357 return process_ver_ack(vio, pkt);
358
359 case VIO_SUBTYPE_NACK:
360 return process_ver_nack(vio, pkt);
361
362 default:
363 return handshake_failure(vio);
364 };
365 }
366
367 static int process_attr(struct vio_driver_state *vio, void *pkt)
368 {
369 int err;
370
371 if (!(vio->hs_state & VIO_HS_GOTVERS))
372 return handshake_failure(vio);
373
374 err = vio->ops->handle_attr(vio, pkt);
375 if (err < 0) {
376 return handshake_failure(vio);
377 } else {
378 vio->hs_state |= VIO_HS_GOT_ATTR;
379
380 if ((vio->dr_state & VIO_DR_STATE_TXREQ) &&
381 !(vio->hs_state & VIO_HS_SENT_DREG)) {
382 if (send_dreg(vio) < 0)
383 return handshake_failure(vio);
384
385 vio->hs_state |= VIO_HS_SENT_DREG;
386 }
387 }
388 return 0;
389 }
390
391 static int all_drings_registered(struct vio_driver_state *vio)
392 {
393 int need_rx, need_tx;
394
395 need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ);
396 need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ);
397
398 if (need_rx &&
399 !(vio->dr_state & VIO_DR_STATE_RXREG))
400 return 0;
401
402 if (need_tx &&
403 !(vio->dr_state & VIO_DR_STATE_TXREG))
404 return 0;
405
406 return 1;
407 }
408
409 static int process_dreg_info(struct vio_driver_state *vio,
410 struct vio_dring_register *pkt)
411 {
412 struct vio_dring_state *dr;
413 int i, len;
414
415 viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
416 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
417 (unsigned long long) pkt->dring_ident,
418 pkt->num_descr, pkt->descr_size, pkt->options,
419 pkt->num_cookies);
420
421 if (!(vio->dr_state & VIO_DR_STATE_RXREQ))
422 goto send_nack;
423
424 if (vio->dr_state & VIO_DR_STATE_RXREG)
425 goto send_nack;
426
427 BUG_ON(vio->desc_buf);
428
429 vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
430 if (!vio->desc_buf)
431 goto send_nack;
432
433 vio->desc_buf_len = pkt->descr_size;
434
435 dr = &vio->drings[VIO_DRIVER_RX_RING];
436
437 dr->num_entries = pkt->num_descr;
438 dr->entry_size = pkt->descr_size;
439 dr->ncookies = pkt->num_cookies;
440 for (i = 0; i < dr->ncookies; i++) {
441 dr->cookies[i] = pkt->cookies[i];
442
443 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n",
444 i,
445 (unsigned long long)
446 pkt->cookies[i].cookie_addr,
447 (unsigned long long)
448 pkt->cookies[i].cookie_size);
449 }
450
451 pkt->tag.stype = VIO_SUBTYPE_ACK;
452 pkt->dring_ident = ++dr->ident;
453
454 viodbg(HS, "SEND DRING_REG ACK ident[%llx]\n",
455 (unsigned long long) pkt->dring_ident);
456
457 len = (sizeof(*pkt) +
458 (dr->ncookies * sizeof(struct ldc_trans_cookie)));
459 if (send_ctrl(vio, &pkt->tag, len) < 0)
460 goto send_nack;
461
462 vio->dr_state |= VIO_DR_STATE_RXREG;
463
464 return 0;
465
466 send_nack:
467 pkt->tag.stype = VIO_SUBTYPE_NACK;
468 viodbg(HS, "SEND DRING_REG NACK\n");
469 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt));
470
471 return handshake_failure(vio);
472 }
473
474 static int process_dreg_ack(struct vio_driver_state *vio,
475 struct vio_dring_register *pkt)
476 {
477 struct vio_dring_state *dr;
478
479 viodbg(HS, "GOT DRING_REG ACK ident[%llx] "
480 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
481 (unsigned long long) pkt->dring_ident,
482 pkt->num_descr, pkt->descr_size, pkt->options,
483 pkt->num_cookies);
484
485 dr = &vio->drings[VIO_DRIVER_TX_RING];
486
487 if (!(vio->dr_state & VIO_DR_STATE_TXREQ))
488 return handshake_failure(vio);
489
490 dr->ident = pkt->dring_ident;
491 vio->dr_state |= VIO_DR_STATE_TXREG;
492
493 if (all_drings_registered(vio)) {
494 if (send_rdx(vio) < 0)
495 return handshake_failure(vio);
496 vio->hs_state = VIO_HS_SENT_RDX;
497 }
498 return 0;
499 }
500
501 static int process_dreg_nack(struct vio_driver_state *vio,
502 struct vio_dring_register *pkt)
503 {
504 viodbg(HS, "GOT DRING_REG NACK ident[%llx] "
505 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
506 (unsigned long long) pkt->dring_ident,
507 pkt->num_descr, pkt->descr_size, pkt->options,
508 pkt->num_cookies);
509
510 return handshake_failure(vio);
511 }
512
513 static int process_dreg(struct vio_driver_state *vio,
514 struct vio_dring_register *pkt)
515 {
516 if (!(vio->hs_state & VIO_HS_GOTVERS))
517 return handshake_failure(vio);
518
519 switch (pkt->tag.stype) {
520 case VIO_SUBTYPE_INFO:
521 return process_dreg_info(vio, pkt);
522
523 case VIO_SUBTYPE_ACK:
524 return process_dreg_ack(vio, pkt);
525
526 case VIO_SUBTYPE_NACK:
527 return process_dreg_nack(vio, pkt);
528
529 default:
530 return handshake_failure(vio);
531 }
532 }
533
534 static int process_dunreg(struct vio_driver_state *vio,
535 struct vio_dring_unregister *pkt)
536 {
537 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING];
538
539 viodbg(HS, "GOT DRING_UNREG\n");
540
541 if (pkt->dring_ident != dr->ident)
542 return 0;
543
544 vio->dr_state &= ~VIO_DR_STATE_RXREG;
545
546 memset(dr, 0, sizeof(*dr));
547
548 kfree(vio->desc_buf);
549 vio->desc_buf = NULL;
550 vio->desc_buf_len = 0;
551
552 return 0;
553 }
554
555 static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt)
556 {
557 viodbg(HS, "GOT RDX INFO\n");
558
559 pkt->tag.stype = VIO_SUBTYPE_ACK;
560 viodbg(HS, "SEND RDX ACK\n");
561 if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0)
562 return handshake_failure(vio);
563
564 vio->hs_state |= VIO_HS_SENT_RDX_ACK;
565 return 0;
566 }
567
568 static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt)
569 {
570 viodbg(HS, "GOT RDX ACK\n");
571
572 if (!(vio->hs_state & VIO_HS_SENT_RDX))
573 return handshake_failure(vio);
574
575 vio->hs_state |= VIO_HS_GOT_RDX_ACK;
576 return 0;
577 }
578
579 static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt)
580 {
581 viodbg(HS, "GOT RDX NACK\n");
582
583 return handshake_failure(vio);
584 }
585
586 static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt)
587 {
588 if (!all_drings_registered(vio))
589 handshake_failure(vio);
590
591 switch (pkt->tag.stype) {
592 case VIO_SUBTYPE_INFO:
593 return process_rdx_info(vio, pkt);
594
595 case VIO_SUBTYPE_ACK:
596 return process_rdx_ack(vio, pkt);
597
598 case VIO_SUBTYPE_NACK:
599 return process_rdx_nack(vio, pkt);
600
601 default:
602 return handshake_failure(vio);
603 }
604 }
605
606 int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
607 {
608 struct vio_msg_tag *tag = pkt;
609 u8 prev_state = vio->hs_state;
610 int err;
611
612 switch (tag->stype_env) {
613 case VIO_VER_INFO:
614 err = process_ver(vio, pkt);
615 break;
616
617 case VIO_ATTR_INFO:
618 err = process_attr(vio, pkt);
619 break;
620
621 case VIO_DRING_REG:
622 err = process_dreg(vio, pkt);
623 break;
624
625 case VIO_DRING_UNREG:
626 err = process_dunreg(vio, pkt);
627 break;
628
629 case VIO_RDX:
630 err = process_rdx(vio, pkt);
631 break;
632
633 default:
634 err = process_unknown(vio, pkt);
635 break;
636 }
637 if (!err &&
638 vio->hs_state != prev_state &&
639 (vio->hs_state & VIO_HS_COMPLETE))
640 vio->ops->handshake_complete(vio);
641
642 return err;
643 }
644 EXPORT_SYMBOL(vio_control_pkt_engine);
645
646 void vio_conn_reset(struct vio_driver_state *vio)
647 {
648 }
649 EXPORT_SYMBOL(vio_conn_reset);
650
651 /* The issue is that the Solaris virtual disk server just mirrors the
652 * SID values it gets from the client peer. So we work around that
653 * here in vio_{validate,send}_sid() so that the drivers don't need
654 * to be aware of this crap.
655 */
656 int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp)
657 {
658 u32 sid;
659
660 /* Always let VERSION+INFO packets through unchecked, they
661 * define the new SID.
662 */
663 if (tp->type == VIO_TYPE_CTRL &&
664 tp->stype == VIO_SUBTYPE_INFO &&
665 tp->stype_env == VIO_VER_INFO)
666 return 0;
667
668 /* Ok, now figure out which SID to use. */
669 switch (vio->dev_class) {
670 case VDEV_NETWORK:
671 case VDEV_NETWORK_SWITCH:
672 case VDEV_DISK_SERVER:
673 default:
674 sid = vio->_peer_sid;
675 break;
676
677 case VDEV_DISK:
678 sid = vio->_local_sid;
679 break;
680 }
681
682 if (sid == tp->sid)
683 return 0;
684 viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n",
685 tp->sid, vio->_peer_sid, vio->_local_sid);
686 return -EINVAL;
687 }
688 EXPORT_SYMBOL(vio_validate_sid);
689
690 u32 vio_send_sid(struct vio_driver_state *vio)
691 {
692 switch (vio->dev_class) {
693 case VDEV_NETWORK:
694 case VDEV_NETWORK_SWITCH:
695 case VDEV_DISK:
696 default:
697 return vio->_local_sid;
698
699 case VDEV_DISK_SERVER:
700 return vio->_peer_sid;
701 }
702 }
703 EXPORT_SYMBOL(vio_send_sid);
704
705 int vio_ldc_alloc(struct vio_driver_state *vio,
706 struct ldc_channel_config *base_cfg,
707 void *event_arg)
708 {
709 struct ldc_channel_config cfg = *base_cfg;
710 struct ldc_channel *lp;
711
712 cfg.tx_irq = vio->vdev->tx_irq;
713 cfg.rx_irq = vio->vdev->rx_irq;
714
715 lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg);
716 if (IS_ERR(lp))
717 return PTR_ERR(lp);
718
719 vio->lp = lp;
720
721 return 0;
722 }
723 EXPORT_SYMBOL(vio_ldc_alloc);
724
725 void vio_ldc_free(struct vio_driver_state *vio)
726 {
727 ldc_free(vio->lp);
728 vio->lp = NULL;
729
730 kfree(vio->desc_buf);
731 vio->desc_buf = NULL;
732 vio->desc_buf_len = 0;
733 }
734 EXPORT_SYMBOL(vio_ldc_free);
735
736 void vio_port_up(struct vio_driver_state *vio)
737 {
738 unsigned long flags;
739 int err, state;
740
741 spin_lock_irqsave(&vio->lock, flags);
742
743 state = ldc_state(vio->lp);
744
745 err = 0;
746 if (state == LDC_STATE_INIT) {
747 err = ldc_bind(vio->lp, vio->name);
748 if (err)
749 printk(KERN_WARNING "%s: Port %lu bind failed, "
750 "err=%d\n",
751 vio->name, vio->vdev->channel_id, err);
752 }
753
754 if (!err) {
755 err = ldc_connect(vio->lp);
756 if (err)
757 printk(KERN_WARNING "%s: Port %lu connect failed, "
758 "err=%d\n",
759 vio->name, vio->vdev->channel_id, err);
760 }
761 if (err) {
762 unsigned long expires = jiffies + HZ;
763
764 expires = round_jiffies(expires);
765 mod_timer(&vio->timer, expires);
766 }
767
768 spin_unlock_irqrestore(&vio->lock, flags);
769 }
770 EXPORT_SYMBOL(vio_port_up);
771
772 static void vio_port_timer(unsigned long _arg)
773 {
774 struct vio_driver_state *vio = (struct vio_driver_state *) _arg;
775
776 vio_port_up(vio);
777 }
778
779 int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
780 u8 dev_class, struct vio_version *ver_table,
781 int ver_table_size, struct vio_driver_ops *ops,
782 char *name)
783 {
784 switch (dev_class) {
785 case VDEV_NETWORK:
786 case VDEV_NETWORK_SWITCH:
787 case VDEV_DISK:
788 case VDEV_DISK_SERVER:
789 break;
790
791 default:
792 return -EINVAL;
793 }
794
795 if (!ops->send_attr ||
796 !ops->handle_attr ||
797 !ops->handshake_complete)
798 return -EINVAL;
799
800 if (!ver_table || ver_table_size < 0)
801 return -EINVAL;
802
803 if (!name)
804 return -EINVAL;
805
806 spin_lock_init(&vio->lock);
807
808 vio->name = name;
809
810 vio->dev_class = dev_class;
811 vio->vdev = vdev;
812
813 vio->ver_table = ver_table;
814 vio->ver_table_entries = ver_table_size;
815
816 vio->ops = ops;
817
818 setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio);
819
820 return 0;
821 }
822 EXPORT_SYMBOL(vio_driver_init);
This page took 0.048806 seconds and 5 git commands to generate.