Commit | Line | Data |
---|---|---|
667ef3c3 DM |
1 | /* sunvdc.c: Sun LDOM Virtual Disk Client. |
2 | * | |
3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> | |
4 | */ | |
5 | ||
6 | #include <linux/module.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/types.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/hdreg.h> | |
11 | #include <linux/genhd.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/completion.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/list.h> | |
18 | ||
19 | #include <asm/vio.h> | |
20 | #include <asm/ldc.h> | |
21 | ||
22 | #define DRV_MODULE_NAME "sunvdc" | |
23 | #define PFX DRV_MODULE_NAME ": " | |
24 | #define DRV_MODULE_VERSION "1.0" | |
25 | #define DRV_MODULE_RELDATE "June 25, 2007" | |
26 | ||
27 | static char version[] __devinitdata = | |
28 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | |
29 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | |
30 | MODULE_DESCRIPTION("Sun LDOM virtual disk client driver"); | |
31 | MODULE_LICENSE("GPL"); | |
32 | MODULE_VERSION(DRV_MODULE_VERSION); | |
33 | ||
34 | #define VDC_TX_RING_SIZE 256 | |
35 | ||
36 | #define WAITING_FOR_LINK_UP 0x01 | |
37 | #define WAITING_FOR_TX_SPACE 0x02 | |
38 | #define WAITING_FOR_GEN_CMD 0x04 | |
39 | #define WAITING_FOR_ANY -1 | |
40 | ||
41 | struct vdc_req_entry { | |
42 | struct request *req; | |
43 | }; | |
44 | ||
45 | struct vdc_port { | |
46 | struct vio_driver_state vio; | |
47 | ||
48 | struct vdc *vp; | |
49 | ||
50 | struct gendisk *disk; | |
51 | ||
52 | struct vdc_completion *cmp; | |
53 | ||
54 | u64 req_id; | |
55 | u64 seq; | |
56 | struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE]; | |
57 | ||
58 | unsigned long ring_cookies; | |
59 | ||
60 | u64 max_xfer_size; | |
61 | u32 vdisk_block_size; | |
62 | ||
63 | /* The server fills these in for us in the disk attribute | |
64 | * ACK packet. | |
65 | */ | |
66 | u64 operations; | |
67 | u32 vdisk_size; | |
68 | u8 vdisk_type; | |
69 | u8 dev_no; | |
70 | ||
71 | char disk_name[32]; | |
72 | ||
73 | struct vio_disk_geom geom; | |
74 | struct vio_disk_vtoc label; | |
75 | ||
76 | struct list_head list; | |
77 | }; | |
78 | ||
79 | static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) | |
80 | { | |
81 | return container_of(vio, struct vdc_port, vio); | |
82 | } | |
83 | ||
84 | struct vdc { | |
85 | /* Protects prot_list. */ | |
86 | spinlock_t lock; | |
87 | ||
88 | struct vio_dev *dev; | |
89 | ||
90 | struct list_head port_list; | |
91 | }; | |
92 | ||
93 | /* Ordered from largest major to lowest */ | |
94 | static struct vio_version vdc_versions[] = { | |
95 | { .major = 1, .minor = 0 }, | |
96 | }; | |
97 | ||
98 | #define VDCBLK_NAME "vdisk" | |
99 | static int vdc_major; | |
100 | #define PARTITION_SHIFT 3 | |
101 | ||
102 | static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr) | |
103 | { | |
104 | return vio_dring_avail(dr, VDC_TX_RING_SIZE); | |
105 | } | |
106 | ||
107 | static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
108 | { | |
109 | struct gendisk *disk = bdev->bd_disk; | |
110 | struct vdc_port *port = disk->private_data; | |
111 | ||
112 | geo->heads = (u8) port->geom.num_hd; | |
113 | geo->sectors = (u8) port->geom.num_sec; | |
114 | geo->cylinders = port->geom.num_cyl; | |
115 | ||
116 | return 0; | |
117 | } | |
118 | ||
119 | static struct block_device_operations vdc_fops = { | |
120 | .owner = THIS_MODULE, | |
121 | .getgeo = vdc_getgeo, | |
122 | }; | |
123 | ||
124 | static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) | |
125 | { | |
126 | if (vio->cmp && | |
127 | (waiting_for == -1 || | |
128 | vio->cmp->waiting_for == waiting_for)) { | |
129 | vio->cmp->err = err; | |
130 | complete(&vio->cmp->com); | |
131 | vio->cmp = NULL; | |
132 | } | |
133 | } | |
134 | ||
135 | static void vdc_handshake_complete(struct vio_driver_state *vio) | |
136 | { | |
137 | vdc_finish(vio, 0, WAITING_FOR_LINK_UP); | |
138 | } | |
139 | ||
140 | static int vdc_handle_unknown(struct vdc_port *port, void *arg) | |
141 | { | |
142 | struct vio_msg_tag *pkt = arg; | |
143 | ||
144 | printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", | |
145 | pkt->type, pkt->stype, pkt->stype_env, pkt->sid); | |
146 | printk(KERN_ERR PFX "Resetting connection.\n"); | |
147 | ||
148 | ldc_disconnect(port->vio.lp); | |
149 | ||
150 | return -ECONNRESET; | |
151 | } | |
152 | ||
153 | static int vdc_send_attr(struct vio_driver_state *vio) | |
154 | { | |
155 | struct vdc_port *port = to_vdc_port(vio); | |
156 | struct vio_disk_attr_info pkt; | |
157 | ||
158 | memset(&pkt, 0, sizeof(pkt)); | |
159 | ||
160 | pkt.tag.type = VIO_TYPE_CTRL; | |
161 | pkt.tag.stype = VIO_SUBTYPE_INFO; | |
162 | pkt.tag.stype_env = VIO_ATTR_INFO; | |
163 | pkt.tag.sid = vio_send_sid(vio); | |
164 | ||
165 | pkt.xfer_mode = VIO_DRING_MODE; | |
166 | pkt.vdisk_block_size = port->vdisk_block_size; | |
167 | pkt.max_xfer_size = port->max_xfer_size; | |
168 | ||
169 | viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n", | |
170 | pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size); | |
171 | ||
172 | return vio_ldc_send(&port->vio, &pkt, sizeof(pkt)); | |
173 | } | |
174 | ||
175 | static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) | |
176 | { | |
177 | struct vdc_port *port = to_vdc_port(vio); | |
178 | struct vio_disk_attr_info *pkt = arg; | |
179 | ||
180 | viodbg(HS, "GOT ATTR stype[0x%x] ops[%lx] disk_size[%lu] disk_type[%x] " | |
181 | "xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n", | |
182 | pkt->tag.stype, pkt->operations, | |
183 | pkt->vdisk_size, pkt->vdisk_type, | |
184 | pkt->xfer_mode, pkt->vdisk_block_size, | |
185 | pkt->max_xfer_size); | |
186 | ||
187 | if (pkt->tag.stype == VIO_SUBTYPE_ACK) { | |
188 | switch (pkt->vdisk_type) { | |
189 | case VD_DISK_TYPE_DISK: | |
190 | case VD_DISK_TYPE_SLICE: | |
191 | break; | |
192 | ||
193 | default: | |
194 | printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n", | |
195 | vio->name, pkt->vdisk_type); | |
196 | return -ECONNRESET; | |
197 | } | |
198 | ||
199 | if (pkt->vdisk_block_size > port->vdisk_block_size) { | |
200 | printk(KERN_ERR PFX "%s: BLOCK size increased " | |
201 | "%u --> %u\n", | |
202 | vio->name, | |
203 | port->vdisk_block_size, pkt->vdisk_block_size); | |
204 | return -ECONNRESET; | |
205 | } | |
206 | ||
207 | port->operations = pkt->operations; | |
208 | port->vdisk_size = pkt->vdisk_size; | |
209 | port->vdisk_type = pkt->vdisk_type; | |
210 | if (pkt->max_xfer_size < port->max_xfer_size) | |
211 | port->max_xfer_size = pkt->max_xfer_size; | |
212 | port->vdisk_block_size = pkt->vdisk_block_size; | |
213 | return 0; | |
214 | } else { | |
215 | printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name); | |
216 | ||
217 | return -ECONNRESET; | |
218 | } | |
219 | } | |
220 | ||
221 | static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc) | |
222 | { | |
223 | int err = desc->status; | |
224 | ||
225 | vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); | |
226 | } | |
227 | ||
228 | static void vdc_end_request(struct request *req, int uptodate, int num_sectors) | |
229 | { | |
230 | if (end_that_request_first(req, uptodate, num_sectors)) | |
231 | return; | |
232 | add_disk_randomness(req->rq_disk); | |
233 | end_that_request_last(req, uptodate); | |
234 | } | |
235 | ||
236 | static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, | |
237 | unsigned int index) | |
238 | { | |
239 | struct vio_disk_desc *desc = vio_dring_entry(dr, index); | |
240 | struct vdc_req_entry *rqe = &port->rq_arr[index]; | |
241 | struct request *req; | |
242 | ||
243 | if (unlikely(desc->hdr.state != VIO_DESC_DONE)) | |
244 | return; | |
245 | ||
246 | ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies); | |
247 | desc->hdr.state = VIO_DESC_FREE; | |
248 | dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1); | |
249 | ||
250 | req = rqe->req; | |
251 | if (req == NULL) { | |
252 | vdc_end_special(port, desc); | |
253 | return; | |
254 | } | |
255 | ||
256 | rqe->req = NULL; | |
257 | ||
258 | vdc_end_request(req, !desc->status, desc->size >> 9); | |
259 | ||
260 | if (blk_queue_stopped(port->disk->queue)) | |
261 | blk_start_queue(port->disk->queue); | |
262 | } | |
263 | ||
264 | static int vdc_ack(struct vdc_port *port, void *msgbuf) | |
265 | { | |
266 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
267 | struct vio_dring_data *pkt = msgbuf; | |
268 | ||
269 | if (unlikely(pkt->dring_ident != dr->ident || | |
270 | pkt->start_idx != pkt->end_idx || | |
271 | pkt->start_idx >= VDC_TX_RING_SIZE)) | |
272 | return 0; | |
273 | ||
274 | vdc_end_one(port, dr, pkt->start_idx); | |
275 | ||
276 | return 0; | |
277 | } | |
278 | ||
279 | static int vdc_nack(struct vdc_port *port, void *msgbuf) | |
280 | { | |
281 | /* XXX Implement me XXX */ | |
282 | return 0; | |
283 | } | |
284 | ||
285 | static void vdc_event(void *arg, int event) | |
286 | { | |
287 | struct vdc_port *port = arg; | |
288 | struct vio_driver_state *vio = &port->vio; | |
289 | unsigned long flags; | |
290 | int err; | |
291 | ||
292 | spin_lock_irqsave(&vio->lock, flags); | |
293 | ||
294 | if (unlikely(event == LDC_EVENT_RESET || | |
295 | event == LDC_EVENT_UP)) { | |
296 | vio_link_state_change(vio, event); | |
297 | spin_unlock_irqrestore(&vio->lock, flags); | |
298 | return; | |
299 | } | |
300 | ||
301 | if (unlikely(event != LDC_EVENT_DATA_READY)) { | |
302 | printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); | |
303 | spin_unlock_irqrestore(&vio->lock, flags); | |
304 | return; | |
305 | } | |
306 | ||
307 | err = 0; | |
308 | while (1) { | |
309 | union { | |
310 | struct vio_msg_tag tag; | |
311 | u64 raw[8]; | |
312 | } msgbuf; | |
313 | ||
314 | err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); | |
315 | if (unlikely(err < 0)) { | |
316 | if (err == -ECONNRESET) | |
317 | vio_conn_reset(vio); | |
318 | break; | |
319 | } | |
320 | if (err == 0) | |
321 | break; | |
322 | viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", | |
323 | msgbuf.tag.type, | |
324 | msgbuf.tag.stype, | |
325 | msgbuf.tag.stype_env, | |
326 | msgbuf.tag.sid); | |
327 | err = vio_validate_sid(vio, &msgbuf.tag); | |
328 | if (err < 0) | |
329 | break; | |
330 | ||
331 | if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { | |
332 | if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) | |
333 | err = vdc_ack(port, &msgbuf); | |
334 | else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) | |
335 | err = vdc_nack(port, &msgbuf); | |
336 | else | |
337 | err = vdc_handle_unknown(port, &msgbuf); | |
338 | } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { | |
339 | err = vio_control_pkt_engine(vio, &msgbuf); | |
340 | } else { | |
341 | err = vdc_handle_unknown(port, &msgbuf); | |
342 | } | |
343 | if (err < 0) | |
344 | break; | |
345 | } | |
346 | if (err < 0) | |
347 | vdc_finish(&port->vio, err, WAITING_FOR_ANY); | |
348 | spin_unlock_irqrestore(&vio->lock, flags); | |
349 | } | |
350 | ||
351 | static int __vdc_tx_trigger(struct vdc_port *port) | |
352 | { | |
353 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
354 | struct vio_dring_data hdr = { | |
355 | .tag = { | |
356 | .type = VIO_TYPE_DATA, | |
357 | .stype = VIO_SUBTYPE_INFO, | |
358 | .stype_env = VIO_DRING_DATA, | |
359 | .sid = vio_send_sid(&port->vio), | |
360 | }, | |
361 | .dring_ident = dr->ident, | |
362 | .start_idx = dr->prod, | |
363 | .end_idx = dr->prod, | |
364 | }; | |
365 | int err, delay; | |
366 | ||
367 | hdr.seq = dr->snd_nxt; | |
368 | delay = 1; | |
369 | do { | |
370 | err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); | |
371 | if (err > 0) { | |
372 | dr->snd_nxt++; | |
373 | break; | |
374 | } | |
375 | udelay(delay); | |
376 | if ((delay <<= 1) > 128) | |
377 | delay = 128; | |
378 | } while (err == -EAGAIN); | |
379 | ||
380 | return err; | |
381 | } | |
382 | ||
383 | static int __send_request(struct request *req) | |
384 | { | |
385 | struct vdc_port *port = req->rq_disk->private_data; | |
386 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
387 | struct scatterlist sg[port->ring_cookies]; | |
388 | struct vdc_req_entry *rqe; | |
389 | struct vio_disk_desc *desc; | |
390 | unsigned int map_perm; | |
391 | int nsg, err, i; | |
392 | u64 len; | |
393 | u8 op; | |
394 | ||
395 | map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; | |
396 | ||
397 | if (rq_data_dir(req) == READ) { | |
398 | map_perm |= LDC_MAP_W; | |
399 | op = VD_OP_BREAD; | |
400 | } else { | |
401 | map_perm |= LDC_MAP_R; | |
402 | op = VD_OP_BWRITE; | |
403 | } | |
404 | ||
405 | nsg = blk_rq_map_sg(req->q, req, sg); | |
406 | ||
407 | len = 0; | |
408 | for (i = 0; i < nsg; i++) | |
409 | len += sg[i].length; | |
410 | ||
411 | if (unlikely(vdc_tx_dring_avail(dr) < 1)) { | |
412 | blk_stop_queue(port->disk->queue); | |
413 | err = -ENOMEM; | |
414 | goto out; | |
415 | } | |
416 | ||
417 | desc = vio_dring_cur(dr); | |
418 | ||
419 | err = ldc_map_sg(port->vio.lp, sg, nsg, | |
420 | desc->cookies, port->ring_cookies, | |
421 | map_perm); | |
422 | if (err < 0) { | |
423 | printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err); | |
424 | return err; | |
425 | } | |
426 | ||
427 | rqe = &port->rq_arr[dr->prod]; | |
428 | rqe->req = req; | |
429 | ||
430 | desc->hdr.ack = VIO_ACK_ENABLE; | |
431 | desc->req_id = port->req_id; | |
432 | desc->operation = op; | |
433 | if (port->vdisk_type == VD_DISK_TYPE_DISK) { | |
434 | desc->slice = 2; | |
435 | } else { | |
436 | desc->slice = 0; | |
437 | } | |
438 | desc->status = ~0; | |
439 | desc->offset = (req->sector << 9) / port->vdisk_block_size; | |
440 | desc->size = len; | |
441 | desc->ncookies = err; | |
442 | ||
443 | /* This has to be a non-SMP write barrier because we are writing | |
444 | * to memory which is shared with the peer LDOM. | |
445 | */ | |
446 | wmb(); | |
447 | desc->hdr.state = VIO_DESC_READY; | |
448 | ||
449 | err = __vdc_tx_trigger(port); | |
450 | if (err < 0) { | |
451 | printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err); | |
452 | } else { | |
453 | port->req_id++; | |
454 | dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); | |
455 | } | |
456 | out: | |
457 | ||
458 | return err; | |
459 | } | |
460 | ||
461 | static void do_vdc_request(request_queue_t *q) | |
462 | { | |
463 | while (1) { | |
464 | struct request *req = elv_next_request(q); | |
465 | ||
466 | if (!req) | |
467 | break; | |
468 | ||
469 | blkdev_dequeue_request(req); | |
470 | if (__send_request(req) < 0) | |
471 | vdc_end_request(req, 0, req->hard_nr_sectors); | |
472 | } | |
473 | } | |
474 | ||
475 | static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) | |
476 | { | |
477 | struct vio_dring_state *dr; | |
478 | struct vio_completion comp; | |
479 | struct vio_disk_desc *desc; | |
480 | unsigned int map_perm; | |
481 | unsigned long flags; | |
482 | int op_len, err; | |
483 | void *req_buf; | |
484 | ||
485 | if (!(((u64)1 << ((u64)op - 1)) & port->operations)) | |
486 | return -EOPNOTSUPP; | |
487 | ||
488 | switch (op) { | |
489 | case VD_OP_BREAD: | |
490 | case VD_OP_BWRITE: | |
491 | default: | |
492 | return -EINVAL; | |
493 | ||
494 | case VD_OP_FLUSH: | |
495 | op_len = 0; | |
496 | map_perm = 0; | |
497 | break; | |
498 | ||
499 | case VD_OP_GET_WCE: | |
500 | op_len = sizeof(u32); | |
501 | map_perm = LDC_MAP_W; | |
502 | break; | |
503 | ||
504 | case VD_OP_SET_WCE: | |
505 | op_len = sizeof(u32); | |
506 | map_perm = LDC_MAP_R; | |
507 | break; | |
508 | ||
509 | case VD_OP_GET_VTOC: | |
510 | op_len = sizeof(struct vio_disk_vtoc); | |
511 | map_perm = LDC_MAP_W; | |
512 | break; | |
513 | ||
514 | case VD_OP_SET_VTOC: | |
515 | op_len = sizeof(struct vio_disk_vtoc); | |
516 | map_perm = LDC_MAP_R; | |
517 | break; | |
518 | ||
519 | case VD_OP_GET_DISKGEOM: | |
520 | op_len = sizeof(struct vio_disk_geom); | |
521 | map_perm = LDC_MAP_W; | |
522 | break; | |
523 | ||
524 | case VD_OP_SET_DISKGEOM: | |
525 | op_len = sizeof(struct vio_disk_geom); | |
526 | map_perm = LDC_MAP_R; | |
527 | break; | |
528 | ||
529 | case VD_OP_SCSICMD: | |
530 | op_len = 16; | |
531 | map_perm = LDC_MAP_RW; | |
532 | break; | |
533 | ||
534 | case VD_OP_GET_DEVID: | |
535 | op_len = sizeof(struct vio_disk_devid); | |
536 | map_perm = LDC_MAP_W; | |
537 | break; | |
538 | ||
539 | case VD_OP_GET_EFI: | |
540 | case VD_OP_SET_EFI: | |
541 | return -EOPNOTSUPP; | |
542 | break; | |
543 | }; | |
544 | ||
545 | map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; | |
546 | ||
547 | op_len = (op_len + 7) & ~7; | |
548 | req_buf = kzalloc(op_len, GFP_KERNEL); | |
549 | if (!req_buf) | |
550 | return -ENOMEM; | |
551 | ||
552 | if (len > op_len) | |
553 | len = op_len; | |
554 | ||
555 | if (map_perm & LDC_MAP_R) | |
556 | memcpy(req_buf, buf, len); | |
557 | ||
558 | spin_lock_irqsave(&port->vio.lock, flags); | |
559 | ||
560 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
561 | ||
562 | /* XXX If we want to use this code generically we have to | |
563 | * XXX handle TX ring exhaustion etc. | |
564 | */ | |
565 | desc = vio_dring_cur(dr); | |
566 | ||
567 | err = ldc_map_single(port->vio.lp, req_buf, op_len, | |
568 | desc->cookies, port->ring_cookies, | |
569 | map_perm); | |
570 | if (err < 0) { | |
571 | spin_unlock_irqrestore(&port->vio.lock, flags); | |
572 | kfree(req_buf); | |
573 | return err; | |
574 | } | |
575 | ||
576 | init_completion(&comp.com); | |
577 | comp.waiting_for = WAITING_FOR_GEN_CMD; | |
578 | port->vio.cmp = ∁ | |
579 | ||
580 | desc->hdr.ack = VIO_ACK_ENABLE; | |
581 | desc->req_id = port->req_id; | |
582 | desc->operation = op; | |
583 | desc->slice = 0; | |
584 | desc->status = ~0; | |
585 | desc->offset = 0; | |
586 | desc->size = op_len; | |
587 | desc->ncookies = err; | |
588 | ||
589 | /* This has to be a non-SMP write barrier because we are writing | |
590 | * to memory which is shared with the peer LDOM. | |
591 | */ | |
592 | wmb(); | |
593 | desc->hdr.state = VIO_DESC_READY; | |
594 | ||
595 | err = __vdc_tx_trigger(port); | |
596 | if (err >= 0) { | |
597 | port->req_id++; | |
598 | dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); | |
599 | spin_unlock_irqrestore(&port->vio.lock, flags); | |
600 | ||
601 | wait_for_completion(&comp.com); | |
602 | err = comp.err; | |
603 | } else { | |
604 | port->vio.cmp = NULL; | |
605 | spin_unlock_irqrestore(&port->vio.lock, flags); | |
606 | } | |
607 | ||
608 | if (map_perm & LDC_MAP_W) | |
609 | memcpy(buf, req_buf, len); | |
610 | ||
611 | kfree(req_buf); | |
612 | ||
613 | return err; | |
614 | } | |
615 | ||
616 | static int __devinit vdc_alloc_tx_ring(struct vdc_port *port) | |
617 | { | |
618 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
619 | unsigned long len, entry_size; | |
620 | int ncookies; | |
621 | void *dring; | |
622 | ||
623 | entry_size = sizeof(struct vio_disk_desc) + | |
624 | (sizeof(struct ldc_trans_cookie) * port->ring_cookies); | |
625 | len = (VDC_TX_RING_SIZE * entry_size); | |
626 | ||
627 | ncookies = VIO_MAX_RING_COOKIES; | |
628 | dring = ldc_alloc_exp_dring(port->vio.lp, len, | |
629 | dr->cookies, &ncookies, | |
630 | (LDC_MAP_SHADOW | | |
631 | LDC_MAP_DIRECT | | |
632 | LDC_MAP_RW)); | |
633 | if (IS_ERR(dring)) | |
634 | return PTR_ERR(dring); | |
635 | ||
636 | dr->base = dring; | |
637 | dr->entry_size = entry_size; | |
638 | dr->num_entries = VDC_TX_RING_SIZE; | |
639 | dr->prod = dr->cons = 0; | |
640 | dr->pending = VDC_TX_RING_SIZE; | |
641 | dr->ncookies = ncookies; | |
642 | ||
643 | return 0; | |
644 | } | |
645 | ||
646 | static void vdc_free_tx_ring(struct vdc_port *port) | |
647 | { | |
648 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
649 | ||
650 | if (dr->base) { | |
651 | ldc_free_exp_dring(port->vio.lp, dr->base, | |
652 | (dr->entry_size * dr->num_entries), | |
653 | dr->cookies, dr->ncookies); | |
654 | dr->base = NULL; | |
655 | dr->entry_size = 0; | |
656 | dr->num_entries = 0; | |
657 | dr->pending = 0; | |
658 | dr->ncookies = 0; | |
659 | } | |
660 | } | |
661 | ||
662 | static int probe_disk(struct vdc_port *port) | |
663 | { | |
664 | struct vio_completion comp; | |
665 | struct request_queue *q; | |
666 | struct gendisk *g; | |
667 | int err; | |
668 | ||
669 | init_completion(&comp.com); | |
670 | comp.err = 0; | |
671 | comp.waiting_for = WAITING_FOR_LINK_UP; | |
672 | port->vio.cmp = ∁ | |
673 | ||
674 | vio_port_up(&port->vio); | |
675 | ||
676 | wait_for_completion(&comp.com); | |
677 | if (comp.err) | |
678 | return comp.err; | |
679 | ||
680 | err = generic_request(port, VD_OP_GET_VTOC, | |
681 | &port->label, sizeof(port->label)); | |
682 | if (err < 0) { | |
683 | printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); | |
684 | return err; | |
685 | } | |
686 | ||
687 | err = generic_request(port, VD_OP_GET_DISKGEOM, | |
688 | &port->geom, sizeof(port->geom)); | |
689 | if (err < 0) { | |
690 | printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " | |
691 | "error %d\n", err); | |
692 | return err; | |
693 | } | |
694 | ||
695 | port->vdisk_size = ((u64)port->geom.num_cyl * | |
696 | (u64)port->geom.num_hd * | |
697 | (u64)port->geom.num_sec); | |
698 | ||
699 | q = blk_init_queue(do_vdc_request, &port->vio.lock); | |
700 | if (!q) { | |
701 | printk(KERN_ERR PFX "%s: Could not allocate queue.\n", | |
702 | port->vio.name); | |
703 | return -ENOMEM; | |
704 | } | |
705 | g = alloc_disk(1 << PARTITION_SHIFT); | |
706 | if (!g) { | |
707 | printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n", | |
708 | port->vio.name); | |
709 | blk_cleanup_queue(q); | |
710 | return -ENOMEM; | |
711 | } | |
712 | ||
713 | port->disk = g; | |
714 | ||
715 | blk_queue_max_hw_segments(q, port->ring_cookies); | |
716 | blk_queue_max_phys_segments(q, port->ring_cookies); | |
717 | blk_queue_max_sectors(q, port->max_xfer_size); | |
718 | g->major = vdc_major; | |
719 | g->first_minor = port->dev_no << PARTITION_SHIFT; | |
720 | strcpy(g->disk_name, port->disk_name); | |
721 | ||
722 | g->fops = &vdc_fops; | |
723 | g->queue = q; | |
724 | g->private_data = port; | |
725 | g->driverfs_dev = &port->vio.vdev->dev; | |
726 | ||
727 | set_capacity(g, port->vdisk_size); | |
728 | ||
729 | printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n", | |
730 | g->disk_name, | |
731 | port->vdisk_size, (port->vdisk_size >> (20 - 9))); | |
732 | ||
733 | add_disk(g); | |
734 | ||
735 | return 0; | |
736 | } | |
737 | ||
738 | static struct ldc_channel_config vdc_ldc_cfg = { | |
739 | .event = vdc_event, | |
740 | .mtu = 64, | |
741 | .mode = LDC_MODE_UNRELIABLE, | |
742 | }; | |
743 | ||
744 | static struct vio_driver_ops vdc_vio_ops = { | |
745 | .send_attr = vdc_send_attr, | |
746 | .handle_attr = vdc_handle_attr, | |
747 | .handshake_complete = vdc_handshake_complete, | |
748 | }; | |
749 | ||
750 | static int __devinit vdc_port_probe(struct vio_dev *vdev, | |
751 | const struct vio_device_id *id) | |
752 | { | |
43fdf274 | 753 | struct mdesc_handle *hp; |
667ef3c3 DM |
754 | struct vdc_port *port; |
755 | unsigned long flags; | |
756 | struct vdc *vp; | |
757 | const u64 *port_id; | |
758 | int err; | |
759 | ||
760 | vp = dev_get_drvdata(vdev->dev.parent); | |
761 | if (!vp) { | |
762 | printk(KERN_ERR PFX "Cannot find port parent vdc.\n"); | |
763 | return -ENODEV; | |
764 | } | |
765 | ||
43fdf274 | 766 | hp = mdesc_grab(); |
667ef3c3 | 767 | |
43fdf274 DM |
768 | port_id = mdesc_get_property(hp, vdev->mp, "id", NULL); |
769 | err = -ENODEV; | |
667ef3c3 DM |
770 | if (!port_id) { |
771 | printk(KERN_ERR PFX "Port lacks id property.\n"); | |
43fdf274 | 772 | goto err_out_release_mdesc; |
667ef3c3 DM |
773 | } |
774 | if ((*port_id << PARTITION_SHIFT) & ~(u64)MINORMASK) { | |
775 | printk(KERN_ERR PFX "Port id [%lu] too large.\n", *port_id); | |
43fdf274 | 776 | goto err_out_release_mdesc; |
667ef3c3 DM |
777 | } |
778 | ||
779 | port = kzalloc(sizeof(*port), GFP_KERNEL); | |
43fdf274 | 780 | err = -ENOMEM; |
667ef3c3 DM |
781 | if (!port) { |
782 | printk(KERN_ERR PFX "Cannot allocate vdc_port.\n"); | |
43fdf274 | 783 | goto err_out_release_mdesc; |
667ef3c3 DM |
784 | } |
785 | ||
786 | port->vp = vp; | |
787 | port->dev_no = *port_id; | |
788 | ||
789 | if (port->dev_no >= 26) | |
790 | snprintf(port->disk_name, sizeof(port->disk_name), | |
791 | VDCBLK_NAME "%c%c", | |
792 | 'a' + (port->dev_no / 26) - 1, | |
793 | 'a' + (port->dev_no % 26)); | |
794 | else | |
795 | snprintf(port->disk_name, sizeof(port->disk_name), | |
796 | VDCBLK_NAME "%c", 'a' + (port->dev_no % 26)); | |
797 | ||
43fdf274 | 798 | err = vio_driver_init(&port->vio, vdev, VDEV_DISK, |
667ef3c3 DM |
799 | vdc_versions, ARRAY_SIZE(vdc_versions), |
800 | &vdc_vio_ops, port->disk_name); | |
801 | if (err) | |
802 | goto err_out_free_port; | |
803 | ||
804 | port->vdisk_block_size = 512; | |
805 | port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size); | |
806 | port->ring_cookies = ((port->max_xfer_size * | |
807 | port->vdisk_block_size) / PAGE_SIZE) + 2; | |
808 | ||
809 | err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port); | |
810 | if (err) | |
811 | goto err_out_free_port; | |
812 | ||
813 | err = vdc_alloc_tx_ring(port); | |
814 | if (err) | |
815 | goto err_out_free_ldc; | |
816 | ||
817 | err = probe_disk(port); | |
818 | if (err) | |
819 | goto err_out_free_tx_ring; | |
820 | ||
821 | INIT_LIST_HEAD(&port->list); | |
822 | ||
823 | spin_lock_irqsave(&vp->lock, flags); | |
824 | list_add(&port->list, &vp->port_list); | |
825 | spin_unlock_irqrestore(&vp->lock, flags); | |
826 | ||
827 | dev_set_drvdata(&vdev->dev, port); | |
828 | ||
43fdf274 DM |
829 | mdesc_release(hp); |
830 | ||
667ef3c3 DM |
831 | return 0; |
832 | ||
833 | err_out_free_tx_ring: | |
834 | vdc_free_tx_ring(port); | |
835 | ||
836 | err_out_free_ldc: | |
837 | vio_ldc_free(&port->vio); | |
838 | ||
839 | err_out_free_port: | |
840 | kfree(port); | |
841 | ||
43fdf274 DM |
842 | err_out_release_mdesc: |
843 | mdesc_release(hp); | |
667ef3c3 DM |
844 | return err; |
845 | } | |
846 | ||
847 | static int vdc_port_remove(struct vio_dev *vdev) | |
848 | { | |
849 | struct vdc_port *port = dev_get_drvdata(&vdev->dev); | |
850 | ||
851 | if (port) { | |
852 | del_timer_sync(&port->vio.timer); | |
853 | ||
854 | vdc_free_tx_ring(port); | |
855 | vio_ldc_free(&port->vio); | |
856 | ||
857 | dev_set_drvdata(&vdev->dev, NULL); | |
858 | ||
859 | kfree(port); | |
860 | } | |
861 | return 0; | |
862 | } | |
863 | ||
864 | static struct vio_device_id vdc_port_match[] = { | |
865 | { | |
866 | .type = "vdc-port", | |
867 | }, | |
868 | {}, | |
869 | }; | |
870 | MODULE_DEVICE_TABLE(vio, vdc_match); | |
871 | ||
872 | static struct vio_driver vdc_port_driver = { | |
873 | .id_table = vdc_port_match, | |
874 | .probe = vdc_port_probe, | |
875 | .remove = vdc_port_remove, | |
876 | .driver = { | |
877 | .name = "vdc_port", | |
878 | .owner = THIS_MODULE, | |
879 | } | |
880 | }; | |
881 | ||
882 | static int __devinit vdc_probe(struct vio_dev *vdev, | |
883 | const struct vio_device_id *id) | |
884 | { | |
885 | static int vdc_version_printed; | |
886 | struct vdc *vp; | |
887 | ||
888 | if (vdc_version_printed++ == 0) | |
889 | printk(KERN_INFO "%s", version); | |
890 | ||
891 | vp = kzalloc(sizeof(struct vdc), GFP_KERNEL); | |
892 | if (!vp) | |
893 | return -ENOMEM; | |
894 | ||
895 | spin_lock_init(&vp->lock); | |
896 | vp->dev = vdev; | |
897 | INIT_LIST_HEAD(&vp->port_list); | |
898 | ||
899 | dev_set_drvdata(&vdev->dev, vp); | |
900 | ||
901 | return 0; | |
902 | } | |
903 | ||
904 | static int vdc_remove(struct vio_dev *vdev) | |
905 | { | |
906 | ||
907 | struct vdc *vp = dev_get_drvdata(&vdev->dev); | |
908 | ||
909 | if (vp) { | |
910 | kfree(vp); | |
911 | dev_set_drvdata(&vdev->dev, NULL); | |
912 | } | |
913 | return 0; | |
914 | } | |
915 | ||
916 | static struct vio_device_id vdc_match[] = { | |
917 | { | |
918 | .type = "block", | |
919 | }, | |
920 | {}, | |
921 | }; | |
922 | MODULE_DEVICE_TABLE(vio, vdc_match); | |
923 | ||
924 | static struct vio_driver vdc_driver = { | |
925 | .id_table = vdc_match, | |
926 | .probe = vdc_probe, | |
927 | .remove = vdc_remove, | |
928 | .driver = { | |
929 | .name = "vdc", | |
930 | .owner = THIS_MODULE, | |
931 | } | |
932 | }; | |
933 | ||
934 | static int __init vdc_init(void) | |
935 | { | |
936 | int err; | |
937 | ||
938 | err = register_blkdev(0, VDCBLK_NAME); | |
939 | if (err < 0) | |
940 | goto out_err; | |
941 | ||
942 | vdc_major = err; | |
943 | err = vio_register_driver(&vdc_driver); | |
944 | if (err) | |
945 | goto out_unregister_blkdev; | |
946 | ||
947 | err = vio_register_driver(&vdc_port_driver); | |
948 | if (err) | |
949 | goto out_unregister_vdc; | |
950 | ||
951 | return 0; | |
952 | ||
953 | out_unregister_vdc: | |
954 | vio_unregister_driver(&vdc_driver); | |
955 | ||
956 | out_unregister_blkdev: | |
957 | unregister_blkdev(vdc_major, VDCBLK_NAME); | |
958 | vdc_major = 0; | |
959 | ||
960 | out_err: | |
961 | return err; | |
962 | } | |
963 | ||
964 | static void __exit vdc_exit(void) | |
965 | { | |
966 | vio_unregister_driver(&vdc_port_driver); | |
967 | vio_unregister_driver(&vdc_driver); | |
968 | unregister_blkdev(vdc_major, VDCBLK_NAME); | |
969 | } | |
970 | ||
971 | module_init(vdc_init); | |
972 | module_exit(vdc_exit); |