2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
12 * Theory of operation:
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
39 * At the top layer there is a custom make_request_fn function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
45 *************************************************************************/
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #include <linux/pktcdvd.h>
50 #include <linux/module.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/compat.h>
54 #include <linux/kthread.h>
55 #include <linux/errno.h>
56 #include <linux/spinlock.h>
57 #include <linux/file.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/miscdevice.h>
61 #include <linux/freezer.h>
62 #include <linux/mutex.h>
63 #include <linux/slab.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_ioctl.h>
66 #include <scsi/scsi.h>
67 #include <linux/debugfs.h>
68 #include <linux/device.h>
70 #include <asm/uaccess.h>
72 #define DRIVER_NAME "pktcdvd"
74 #define pkt_err(pd, fmt, ...) \
75 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
76 #define pkt_notice(pd, fmt, ...) \
77 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
79 #define pkt_dbg(level, pd, fmt, ...) \
81 if (level == 2 && PACKET_DEBUG >= 2) \
82 pr_notice("%s: %s():" fmt, \
83 pd->name, __func__, ##__VA_ARGS__); \
84 else if (level == 1 && PACKET_DEBUG >= 1) \
85 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
88 #define MAX_SPEED 0xffff
90 static DEFINE_MUTEX(pktcdvd_mutex
);
91 static struct pktcdvd_device
*pkt_devs
[MAX_WRITERS
];
92 static struct proc_dir_entry
*pkt_proc
;
93 static int pktdev_major
;
94 static int write_congestion_on
= PKT_WRITE_CONGESTION_ON
;
95 static int write_congestion_off
= PKT_WRITE_CONGESTION_OFF
;
96 static struct mutex ctl_mutex
; /* Serialize open/close/setup/teardown */
97 static mempool_t
*psd_pool
;
99 static struct class *class_pktcdvd
= NULL
; /* /sys/class/pktcdvd */
100 static struct dentry
*pkt_debugfs_root
= NULL
; /* /sys/kernel/debug/pktcdvd */
102 /* forward declaration */
103 static int pkt_setup_dev(dev_t dev
, dev_t
* pkt_dev
);
104 static int pkt_remove_dev(dev_t pkt_dev
);
105 static int pkt_seq_show(struct seq_file
*m
, void *p
);
107 static sector_t
get_zone(sector_t sector
, struct pktcdvd_device
*pd
)
109 return (sector
+ pd
->offset
) & ~(sector_t
)(pd
->settings
.size
- 1);
113 * create and register a pktcdvd kernel object.
115 static struct pktcdvd_kobj
* pkt_kobj_create(struct pktcdvd_device
*pd
,
117 struct kobject
* parent
,
118 struct kobj_type
* ktype
)
120 struct pktcdvd_kobj
*p
;
123 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
127 error
= kobject_init_and_add(&p
->kobj
, ktype
, parent
, "%s", name
);
129 kobject_put(&p
->kobj
);
132 kobject_uevent(&p
->kobj
, KOBJ_ADD
);
136 * remove a pktcdvd kernel object.
138 static void pkt_kobj_remove(struct pktcdvd_kobj
*p
)
141 kobject_put(&p
->kobj
);
144 * default release function for pktcdvd kernel objects.
146 static void pkt_kobj_release(struct kobject
*kobj
)
148 kfree(to_pktcdvdkobj(kobj
));
152 /**********************************************************
154 * sysfs interface for pktcdvd
155 * by (C) 2006 Thomas Maier <balagi@justmail.de>
157 **********************************************************/
159 #define DEF_ATTR(_obj,_name,_mode) \
160 static struct attribute _obj = { .name = _name, .mode = _mode }
162 /**********************************************************
163 /sys/class/pktcdvd/pktcdvd[0-7]/
166 stat/packets_finished
171 write_queue/congestion_off
172 write_queue/congestion_on
173 **********************************************************/
175 DEF_ATTR(kobj_pkt_attr_st1
, "reset", 0200);
176 DEF_ATTR(kobj_pkt_attr_st2
, "packets_started", 0444);
177 DEF_ATTR(kobj_pkt_attr_st3
, "packets_finished", 0444);
178 DEF_ATTR(kobj_pkt_attr_st4
, "kb_written", 0444);
179 DEF_ATTR(kobj_pkt_attr_st5
, "kb_read", 0444);
180 DEF_ATTR(kobj_pkt_attr_st6
, "kb_read_gather", 0444);
182 static struct attribute
*kobj_pkt_attrs_stat
[] = {
192 DEF_ATTR(kobj_pkt_attr_wq1
, "size", 0444);
193 DEF_ATTR(kobj_pkt_attr_wq2
, "congestion_off", 0644);
194 DEF_ATTR(kobj_pkt_attr_wq3
, "congestion_on", 0644);
196 static struct attribute
*kobj_pkt_attrs_wqueue
[] = {
203 static ssize_t
kobj_pkt_show(struct kobject
*kobj
,
204 struct attribute
*attr
, char *data
)
206 struct pktcdvd_device
*pd
= to_pktcdvdkobj(kobj
)->pd
;
209 if (strcmp(attr
->name
, "packets_started") == 0) {
210 n
= sprintf(data
, "%lu\n", pd
->stats
.pkt_started
);
212 } else if (strcmp(attr
->name
, "packets_finished") == 0) {
213 n
= sprintf(data
, "%lu\n", pd
->stats
.pkt_ended
);
215 } else if (strcmp(attr
->name
, "kb_written") == 0) {
216 n
= sprintf(data
, "%lu\n", pd
->stats
.secs_w
>> 1);
218 } else if (strcmp(attr
->name
, "kb_read") == 0) {
219 n
= sprintf(data
, "%lu\n", pd
->stats
.secs_r
>> 1);
221 } else if (strcmp(attr
->name
, "kb_read_gather") == 0) {
222 n
= sprintf(data
, "%lu\n", pd
->stats
.secs_rg
>> 1);
224 } else if (strcmp(attr
->name
, "size") == 0) {
225 spin_lock(&pd
->lock
);
226 v
= pd
->bio_queue_size
;
227 spin_unlock(&pd
->lock
);
228 n
= sprintf(data
, "%d\n", v
);
230 } else if (strcmp(attr
->name
, "congestion_off") == 0) {
231 spin_lock(&pd
->lock
);
232 v
= pd
->write_congestion_off
;
233 spin_unlock(&pd
->lock
);
234 n
= sprintf(data
, "%d\n", v
);
236 } else if (strcmp(attr
->name
, "congestion_on") == 0) {
237 spin_lock(&pd
->lock
);
238 v
= pd
->write_congestion_on
;
239 spin_unlock(&pd
->lock
);
240 n
= sprintf(data
, "%d\n", v
);
245 static void init_write_congestion_marks(int* lo
, int* hi
)
249 *hi
= min(*hi
, 1000000);
253 *lo
= min(*lo
, *hi
- 100);
262 static ssize_t
kobj_pkt_store(struct kobject
*kobj
,
263 struct attribute
*attr
,
264 const char *data
, size_t len
)
266 struct pktcdvd_device
*pd
= to_pktcdvdkobj(kobj
)->pd
;
269 if (strcmp(attr
->name
, "reset") == 0 && len
> 0) {
270 pd
->stats
.pkt_started
= 0;
271 pd
->stats
.pkt_ended
= 0;
272 pd
->stats
.secs_w
= 0;
273 pd
->stats
.secs_rg
= 0;
274 pd
->stats
.secs_r
= 0;
276 } else if (strcmp(attr
->name
, "congestion_off") == 0
277 && sscanf(data
, "%d", &val
) == 1) {
278 spin_lock(&pd
->lock
);
279 pd
->write_congestion_off
= val
;
280 init_write_congestion_marks(&pd
->write_congestion_off
,
281 &pd
->write_congestion_on
);
282 spin_unlock(&pd
->lock
);
284 } else if (strcmp(attr
->name
, "congestion_on") == 0
285 && sscanf(data
, "%d", &val
) == 1) {
286 spin_lock(&pd
->lock
);
287 pd
->write_congestion_on
= val
;
288 init_write_congestion_marks(&pd
->write_congestion_off
,
289 &pd
->write_congestion_on
);
290 spin_unlock(&pd
->lock
);
295 static const struct sysfs_ops kobj_pkt_ops
= {
296 .show
= kobj_pkt_show
,
297 .store
= kobj_pkt_store
299 static struct kobj_type kobj_pkt_type_stat
= {
300 .release
= pkt_kobj_release
,
301 .sysfs_ops
= &kobj_pkt_ops
,
302 .default_attrs
= kobj_pkt_attrs_stat
304 static struct kobj_type kobj_pkt_type_wqueue
= {
305 .release
= pkt_kobj_release
,
306 .sysfs_ops
= &kobj_pkt_ops
,
307 .default_attrs
= kobj_pkt_attrs_wqueue
310 static void pkt_sysfs_dev_new(struct pktcdvd_device
*pd
)
313 pd
->dev
= device_create(class_pktcdvd
, NULL
, MKDEV(0, 0), NULL
,
319 pd
->kobj_stat
= pkt_kobj_create(pd
, "stat",
321 &kobj_pkt_type_stat
);
322 pd
->kobj_wqueue
= pkt_kobj_create(pd
, "write_queue",
324 &kobj_pkt_type_wqueue
);
328 static void pkt_sysfs_dev_remove(struct pktcdvd_device
*pd
)
330 pkt_kobj_remove(pd
->kobj_stat
);
331 pkt_kobj_remove(pd
->kobj_wqueue
);
333 device_unregister(pd
->dev
);
337 /********************************************************************
340 remove unmap packet dev
341 device_map show mappings
342 *******************************************************************/
344 static void class_pktcdvd_release(struct class *cls
)
348 static ssize_t
class_pktcdvd_show_map(struct class *c
,
349 struct class_attribute
*attr
,
354 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
355 for (idx
= 0; idx
< MAX_WRITERS
; idx
++) {
356 struct pktcdvd_device
*pd
= pkt_devs
[idx
];
359 n
+= sprintf(data
+n
, "%s %u:%u %u:%u\n",
361 MAJOR(pd
->pkt_dev
), MINOR(pd
->pkt_dev
),
362 MAJOR(pd
->bdev
->bd_dev
),
363 MINOR(pd
->bdev
->bd_dev
));
365 mutex_unlock(&ctl_mutex
);
369 static ssize_t
class_pktcdvd_store_add(struct class *c
,
370 struct class_attribute
*attr
,
374 unsigned int major
, minor
;
376 if (sscanf(buf
, "%u:%u", &major
, &minor
) == 2) {
377 /* pkt_setup_dev() expects caller to hold reference to self */
378 if (!try_module_get(THIS_MODULE
))
381 pkt_setup_dev(MKDEV(major
, minor
), NULL
);
383 module_put(THIS_MODULE
);
391 static ssize_t
class_pktcdvd_store_remove(struct class *c
,
392 struct class_attribute
*attr
,
396 unsigned int major
, minor
;
397 if (sscanf(buf
, "%u:%u", &major
, &minor
) == 2) {
398 pkt_remove_dev(MKDEV(major
, minor
));
404 static struct class_attribute class_pktcdvd_attrs
[] = {
405 __ATTR(add
, 0200, NULL
, class_pktcdvd_store_add
),
406 __ATTR(remove
, 0200, NULL
, class_pktcdvd_store_remove
),
407 __ATTR(device_map
, 0444, class_pktcdvd_show_map
, NULL
),
412 static int pkt_sysfs_init(void)
417 * create control files in sysfs
418 * /sys/class/pktcdvd/...
420 class_pktcdvd
= kzalloc(sizeof(*class_pktcdvd
), GFP_KERNEL
);
423 class_pktcdvd
->name
= DRIVER_NAME
;
424 class_pktcdvd
->owner
= THIS_MODULE
;
425 class_pktcdvd
->class_release
= class_pktcdvd_release
;
426 class_pktcdvd
->class_attrs
= class_pktcdvd_attrs
;
427 ret
= class_register(class_pktcdvd
);
429 kfree(class_pktcdvd
);
430 class_pktcdvd
= NULL
;
431 pr_err("failed to create class pktcdvd\n");
437 static void pkt_sysfs_cleanup(void)
440 class_destroy(class_pktcdvd
);
441 class_pktcdvd
= NULL
;
444 /********************************************************************
447 /sys/kernel/debug/pktcdvd[0-7]/
450 *******************************************************************/
452 static int pkt_debugfs_seq_show(struct seq_file
*m
, void *p
)
454 return pkt_seq_show(m
, p
);
457 static int pkt_debugfs_fops_open(struct inode
*inode
, struct file
*file
)
459 return single_open(file
, pkt_debugfs_seq_show
, inode
->i_private
);
462 static const struct file_operations debug_fops
= {
463 .open
= pkt_debugfs_fops_open
,
466 .release
= single_release
,
467 .owner
= THIS_MODULE
,
470 static void pkt_debugfs_dev_new(struct pktcdvd_device
*pd
)
472 if (!pkt_debugfs_root
)
474 pd
->dfs_f_info
= NULL
;
475 pd
->dfs_d_root
= debugfs_create_dir(pd
->name
, pkt_debugfs_root
);
476 if (IS_ERR(pd
->dfs_d_root
)) {
477 pd
->dfs_d_root
= NULL
;
480 pd
->dfs_f_info
= debugfs_create_file("info", S_IRUGO
,
481 pd
->dfs_d_root
, pd
, &debug_fops
);
482 if (IS_ERR(pd
->dfs_f_info
)) {
483 pd
->dfs_f_info
= NULL
;
488 static void pkt_debugfs_dev_remove(struct pktcdvd_device
*pd
)
490 if (!pkt_debugfs_root
)
493 debugfs_remove(pd
->dfs_f_info
);
494 pd
->dfs_f_info
= NULL
;
496 debugfs_remove(pd
->dfs_d_root
);
497 pd
->dfs_d_root
= NULL
;
500 static void pkt_debugfs_init(void)
502 pkt_debugfs_root
= debugfs_create_dir(DRIVER_NAME
, NULL
);
503 if (IS_ERR(pkt_debugfs_root
)) {
504 pkt_debugfs_root
= NULL
;
509 static void pkt_debugfs_cleanup(void)
511 if (!pkt_debugfs_root
)
513 debugfs_remove(pkt_debugfs_root
);
514 pkt_debugfs_root
= NULL
;
517 /* ----------------------------------------------------------*/
520 static void pkt_bio_finished(struct pktcdvd_device
*pd
)
522 BUG_ON(atomic_read(&pd
->cdrw
.pending_bios
) <= 0);
523 if (atomic_dec_and_test(&pd
->cdrw
.pending_bios
)) {
524 pkt_dbg(2, pd
, "queue empty\n");
525 atomic_set(&pd
->iosched
.attention
, 1);
526 wake_up(&pd
->wqueue
);
531 * Allocate a packet_data struct
533 static struct packet_data
*pkt_alloc_packet_data(int frames
)
536 struct packet_data
*pkt
;
538 pkt
= kzalloc(sizeof(struct packet_data
), GFP_KERNEL
);
542 pkt
->frames
= frames
;
543 pkt
->w_bio
= bio_kmalloc(GFP_KERNEL
, frames
);
547 for (i
= 0; i
< frames
/ FRAMES_PER_PAGE
; i
++) {
548 pkt
->pages
[i
] = alloc_page(GFP_KERNEL
|__GFP_ZERO
);
553 spin_lock_init(&pkt
->lock
);
554 bio_list_init(&pkt
->orig_bios
);
556 for (i
= 0; i
< frames
; i
++) {
557 struct bio
*bio
= bio_kmalloc(GFP_KERNEL
, 1);
561 pkt
->r_bios
[i
] = bio
;
567 for (i
= 0; i
< frames
; i
++) {
568 struct bio
*bio
= pkt
->r_bios
[i
];
574 for (i
= 0; i
< frames
/ FRAMES_PER_PAGE
; i
++)
576 __free_page(pkt
->pages
[i
]);
585 * Free a packet_data struct
587 static void pkt_free_packet_data(struct packet_data
*pkt
)
591 for (i
= 0; i
< pkt
->frames
; i
++) {
592 struct bio
*bio
= pkt
->r_bios
[i
];
596 for (i
= 0; i
< pkt
->frames
/ FRAMES_PER_PAGE
; i
++)
597 __free_page(pkt
->pages
[i
]);
602 static void pkt_shrink_pktlist(struct pktcdvd_device
*pd
)
604 struct packet_data
*pkt
, *next
;
606 BUG_ON(!list_empty(&pd
->cdrw
.pkt_active_list
));
608 list_for_each_entry_safe(pkt
, next
, &pd
->cdrw
.pkt_free_list
, list
) {
609 pkt_free_packet_data(pkt
);
611 INIT_LIST_HEAD(&pd
->cdrw
.pkt_free_list
);
614 static int pkt_grow_pktlist(struct pktcdvd_device
*pd
, int nr_packets
)
616 struct packet_data
*pkt
;
618 BUG_ON(!list_empty(&pd
->cdrw
.pkt_free_list
));
620 while (nr_packets
> 0) {
621 pkt
= pkt_alloc_packet_data(pd
->settings
.size
>> 2);
623 pkt_shrink_pktlist(pd
);
626 pkt
->id
= nr_packets
;
628 list_add(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
634 static inline struct pkt_rb_node
*pkt_rbtree_next(struct pkt_rb_node
*node
)
636 struct rb_node
*n
= rb_next(&node
->rb_node
);
639 return rb_entry(n
, struct pkt_rb_node
, rb_node
);
642 static void pkt_rbtree_erase(struct pktcdvd_device
*pd
, struct pkt_rb_node
*node
)
644 rb_erase(&node
->rb_node
, &pd
->bio_queue
);
645 mempool_free(node
, pd
->rb_pool
);
646 pd
->bio_queue_size
--;
647 BUG_ON(pd
->bio_queue_size
< 0);
651 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
653 static struct pkt_rb_node
*pkt_rbtree_find(struct pktcdvd_device
*pd
, sector_t s
)
655 struct rb_node
*n
= pd
->bio_queue
.rb_node
;
656 struct rb_node
*next
;
657 struct pkt_rb_node
*tmp
;
660 BUG_ON(pd
->bio_queue_size
> 0);
665 tmp
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
666 if (s
<= tmp
->bio
->bi_sector
)
675 if (s
> tmp
->bio
->bi_sector
) {
676 tmp
= pkt_rbtree_next(tmp
);
680 BUG_ON(s
> tmp
->bio
->bi_sector
);
685 * Insert a node into the pd->bio_queue rb tree.
687 static void pkt_rbtree_insert(struct pktcdvd_device
*pd
, struct pkt_rb_node
*node
)
689 struct rb_node
**p
= &pd
->bio_queue
.rb_node
;
690 struct rb_node
*parent
= NULL
;
691 sector_t s
= node
->bio
->bi_sector
;
692 struct pkt_rb_node
*tmp
;
696 tmp
= rb_entry(parent
, struct pkt_rb_node
, rb_node
);
697 if (s
< tmp
->bio
->bi_sector
)
702 rb_link_node(&node
->rb_node
, parent
, p
);
703 rb_insert_color(&node
->rb_node
, &pd
->bio_queue
);
704 pd
->bio_queue_size
++;
708 * Send a packet_command to the underlying block device and
709 * wait for completion.
711 static int pkt_generic_packet(struct pktcdvd_device
*pd
, struct packet_command
*cgc
)
713 struct request_queue
*q
= bdev_get_queue(pd
->bdev
);
717 rq
= blk_get_request(q
, (cgc
->data_direction
== CGC_DATA_WRITE
) ?
718 WRITE
: READ
, __GFP_WAIT
);
721 if (blk_rq_map_kern(q
, rq
, cgc
->buffer
, cgc
->buflen
, __GFP_WAIT
))
725 rq
->cmd_len
= COMMAND_SIZE(cgc
->cmd
[0]);
726 memcpy(rq
->cmd
, cgc
->cmd
, CDROM_PACKET_SIZE
);
729 rq
->cmd_type
= REQ_TYPE_BLOCK_PC
;
731 rq
->cmd_flags
|= REQ_QUIET
;
733 blk_execute_rq(rq
->q
, pd
->bdev
->bd_disk
, rq
, 0);
741 static const char *sense_key_string(__u8 index
)
743 static const char * const info
[] = {
744 "No sense", "Recovered error", "Not ready",
745 "Medium error", "Hardware error", "Illegal request",
746 "Unit attention", "Data protect", "Blank check",
749 return index
< ARRAY_SIZE(info
) ? info
[index
] : "INVALID";
753 * A generic sense dump / resolve mechanism should be implemented across
754 * all ATAPI + SCSI devices.
756 static void pkt_dump_sense(struct packet_command
*cgc
)
758 struct request_sense
*sense
= cgc
->sense
;
761 pr_err("%*ph - sense %02x.%02x.%02x (%s)\n",
762 CDROM_PACKET_SIZE
, cgc
->cmd
,
763 sense
->sense_key
, sense
->asc
, sense
->ascq
,
764 sense_key_string(sense
->sense_key
));
766 pr_err("%*ph - no sense\n", CDROM_PACKET_SIZE
, cgc
->cmd
);
770 * flush the drive cache to media
772 static int pkt_flush_cache(struct pktcdvd_device
*pd
)
774 struct packet_command cgc
;
776 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
777 cgc
.cmd
[0] = GPCMD_FLUSH_CACHE
;
781 * the IMMED bit -- we default to not setting it, although that
782 * would allow a much faster close, this is safer
787 return pkt_generic_packet(pd
, &cgc
);
791 * speed is given as the normal factor, e.g. 4 for 4x
793 static noinline_for_stack
int pkt_set_speed(struct pktcdvd_device
*pd
,
794 unsigned write_speed
, unsigned read_speed
)
796 struct packet_command cgc
;
797 struct request_sense sense
;
800 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
802 cgc
.cmd
[0] = GPCMD_SET_SPEED
;
803 cgc
.cmd
[2] = (read_speed
>> 8) & 0xff;
804 cgc
.cmd
[3] = read_speed
& 0xff;
805 cgc
.cmd
[4] = (write_speed
>> 8) & 0xff;
806 cgc
.cmd
[5] = write_speed
& 0xff;
808 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
809 pkt_dump_sense(&cgc
);
815 * Queue a bio for processing by the low-level CD device. Must be called
816 * from process context.
818 static void pkt_queue_bio(struct pktcdvd_device
*pd
, struct bio
*bio
)
820 spin_lock(&pd
->iosched
.lock
);
821 if (bio_data_dir(bio
) == READ
)
822 bio_list_add(&pd
->iosched
.read_queue
, bio
);
824 bio_list_add(&pd
->iosched
.write_queue
, bio
);
825 spin_unlock(&pd
->iosched
.lock
);
827 atomic_set(&pd
->iosched
.attention
, 1);
828 wake_up(&pd
->wqueue
);
832 * Process the queued read/write requests. This function handles special
833 * requirements for CDRW drives:
834 * - A cache flush command must be inserted before a read request if the
835 * previous request was a write.
836 * - Switching between reading and writing is slow, so don't do it more often
838 * - Optimize for throughput at the expense of latency. This means that streaming
839 * writes will never be interrupted by a read, but if the drive has to seek
840 * before the next write, switch to reading instead if there are any pending
842 * - Set the read speed according to current usage pattern. When only reading
843 * from the device, it's best to use the highest possible read speed, but
844 * when switching often between reading and writing, it's better to have the
845 * same read and write speeds.
847 static void pkt_iosched_process_queue(struct pktcdvd_device
*pd
)
850 if (atomic_read(&pd
->iosched
.attention
) == 0)
852 atomic_set(&pd
->iosched
.attention
, 0);
856 int reads_queued
, writes_queued
;
858 spin_lock(&pd
->iosched
.lock
);
859 reads_queued
= !bio_list_empty(&pd
->iosched
.read_queue
);
860 writes_queued
= !bio_list_empty(&pd
->iosched
.write_queue
);
861 spin_unlock(&pd
->iosched
.lock
);
863 if (!reads_queued
&& !writes_queued
)
866 if (pd
->iosched
.writing
) {
867 int need_write_seek
= 1;
868 spin_lock(&pd
->iosched
.lock
);
869 bio
= bio_list_peek(&pd
->iosched
.write_queue
);
870 spin_unlock(&pd
->iosched
.lock
);
871 if (bio
&& (bio
->bi_sector
== pd
->iosched
.last_write
))
873 if (need_write_seek
&& reads_queued
) {
874 if (atomic_read(&pd
->cdrw
.pending_bios
) > 0) {
875 pkt_dbg(2, pd
, "write, waiting\n");
879 pd
->iosched
.writing
= 0;
882 if (!reads_queued
&& writes_queued
) {
883 if (atomic_read(&pd
->cdrw
.pending_bios
) > 0) {
884 pkt_dbg(2, pd
, "read, waiting\n");
887 pd
->iosched
.writing
= 1;
891 spin_lock(&pd
->iosched
.lock
);
892 if (pd
->iosched
.writing
)
893 bio
= bio_list_pop(&pd
->iosched
.write_queue
);
895 bio
= bio_list_pop(&pd
->iosched
.read_queue
);
896 spin_unlock(&pd
->iosched
.lock
);
901 if (bio_data_dir(bio
) == READ
)
902 pd
->iosched
.successive_reads
+= bio
->bi_size
>> 10;
904 pd
->iosched
.successive_reads
= 0;
905 pd
->iosched
.last_write
= bio_end_sector(bio
);
907 if (pd
->iosched
.successive_reads
>= HI_SPEED_SWITCH
) {
908 if (pd
->read_speed
== pd
->write_speed
) {
909 pd
->read_speed
= MAX_SPEED
;
910 pkt_set_speed(pd
, pd
->write_speed
, pd
->read_speed
);
913 if (pd
->read_speed
!= pd
->write_speed
) {
914 pd
->read_speed
= pd
->write_speed
;
915 pkt_set_speed(pd
, pd
->write_speed
, pd
->read_speed
);
919 atomic_inc(&pd
->cdrw
.pending_bios
);
920 generic_make_request(bio
);
925 * Special care is needed if the underlying block device has a small
926 * max_phys_segments value.
928 static int pkt_set_segment_merging(struct pktcdvd_device
*pd
, struct request_queue
*q
)
930 if ((pd
->settings
.size
<< 9) / CD_FRAMESIZE
931 <= queue_max_segments(q
)) {
933 * The cdrom device can handle one segment/frame
935 clear_bit(PACKET_MERGE_SEGS
, &pd
->flags
);
937 } else if ((pd
->settings
.size
<< 9) / PAGE_SIZE
938 <= queue_max_segments(q
)) {
940 * We can handle this case at the expense of some extra memory
941 * copies during write operations
943 set_bit(PACKET_MERGE_SEGS
, &pd
->flags
);
946 pkt_err(pd
, "cdrom max_phys_segments too small\n");
952 * Copy all data for this packet to pkt->pages[], so that
953 * a) The number of required segments for the write bio is minimized, which
954 * is necessary for some scsi controllers.
955 * b) The data can be used as cache to avoid read requests if we receive a
956 * new write request for the same zone.
958 static void pkt_make_local_copy(struct packet_data
*pkt
, struct bio_vec
*bvec
)
962 /* Copy all data to pkt->pages[] */
965 for (f
= 0; f
< pkt
->frames
; f
++) {
966 if (bvec
[f
].bv_page
!= pkt
->pages
[p
]) {
967 void *vfrom
= kmap_atomic(bvec
[f
].bv_page
) + bvec
[f
].bv_offset
;
968 void *vto
= page_address(pkt
->pages
[p
]) + offs
;
969 memcpy(vto
, vfrom
, CD_FRAMESIZE
);
970 kunmap_atomic(vfrom
);
971 bvec
[f
].bv_page
= pkt
->pages
[p
];
972 bvec
[f
].bv_offset
= offs
;
974 BUG_ON(bvec
[f
].bv_offset
!= offs
);
976 offs
+= CD_FRAMESIZE
;
977 if (offs
>= PAGE_SIZE
) {
984 static void pkt_end_io_read(struct bio
*bio
, int err
)
986 struct packet_data
*pkt
= bio
->bi_private
;
987 struct pktcdvd_device
*pd
= pkt
->pd
;
990 pkt_dbg(2, pd
, "bio=%p sec0=%llx sec=%llx err=%d\n",
991 bio
, (unsigned long long)pkt
->sector
,
992 (unsigned long long)bio
->bi_sector
, err
);
995 atomic_inc(&pkt
->io_errors
);
996 if (atomic_dec_and_test(&pkt
->io_wait
)) {
997 atomic_inc(&pkt
->run_sm
);
998 wake_up(&pd
->wqueue
);
1000 pkt_bio_finished(pd
);
1003 static void pkt_end_io_packet_write(struct bio
*bio
, int err
)
1005 struct packet_data
*pkt
= bio
->bi_private
;
1006 struct pktcdvd_device
*pd
= pkt
->pd
;
1009 pkt_dbg(2, pd
, "id=%d, err=%d\n", pkt
->id
, err
);
1011 pd
->stats
.pkt_ended
++;
1013 pkt_bio_finished(pd
);
1014 atomic_dec(&pkt
->io_wait
);
1015 atomic_inc(&pkt
->run_sm
);
1016 wake_up(&pd
->wqueue
);
1020 * Schedule reads for the holes in a packet
1022 static void pkt_gather_data(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1024 int frames_read
= 0;
1027 char written
[PACKET_MAX_SIZE
];
1029 BUG_ON(bio_list_empty(&pkt
->orig_bios
));
1031 atomic_set(&pkt
->io_wait
, 0);
1032 atomic_set(&pkt
->io_errors
, 0);
1035 * Figure out which frames we need to read before we can write.
1037 memset(written
, 0, sizeof(written
));
1038 spin_lock(&pkt
->lock
);
1039 bio_list_for_each(bio
, &pkt
->orig_bios
) {
1040 int first_frame
= (bio
->bi_sector
- pkt
->sector
) / (CD_FRAMESIZE
>> 9);
1041 int num_frames
= bio
->bi_size
/ CD_FRAMESIZE
;
1042 pd
->stats
.secs_w
+= num_frames
* (CD_FRAMESIZE
>> 9);
1043 BUG_ON(first_frame
< 0);
1044 BUG_ON(first_frame
+ num_frames
> pkt
->frames
);
1045 for (f
= first_frame
; f
< first_frame
+ num_frames
; f
++)
1048 spin_unlock(&pkt
->lock
);
1050 if (pkt
->cache_valid
) {
1051 pkt_dbg(2, pd
, "zone %llx cached\n",
1052 (unsigned long long)pkt
->sector
);
1057 * Schedule reads for missing parts of the packet.
1059 for (f
= 0; f
< pkt
->frames
; f
++) {
1065 bio
= pkt
->r_bios
[f
];
1067 bio
->bi_sector
= pkt
->sector
+ f
* (CD_FRAMESIZE
>> 9);
1068 bio
->bi_bdev
= pd
->bdev
;
1069 bio
->bi_end_io
= pkt_end_io_read
;
1070 bio
->bi_private
= pkt
;
1072 p
= (f
* CD_FRAMESIZE
) / PAGE_SIZE
;
1073 offset
= (f
* CD_FRAMESIZE
) % PAGE_SIZE
;
1074 pkt_dbg(2, pd
, "Adding frame %d, page:%p offs:%d\n",
1075 f
, pkt
->pages
[p
], offset
);
1076 if (!bio_add_page(bio
, pkt
->pages
[p
], CD_FRAMESIZE
, offset
))
1079 atomic_inc(&pkt
->io_wait
);
1081 pkt_queue_bio(pd
, bio
);
1086 pkt_dbg(2, pd
, "need %d frames for zone %llx\n",
1087 frames_read
, (unsigned long long)pkt
->sector
);
1088 pd
->stats
.pkt_started
++;
1089 pd
->stats
.secs_rg
+= frames_read
* (CD_FRAMESIZE
>> 9);
1093 * Find a packet matching zone, or the least recently used packet if
1094 * there is no match.
1096 static struct packet_data
*pkt_get_packet_data(struct pktcdvd_device
*pd
, int zone
)
1098 struct packet_data
*pkt
;
1100 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_free_list
, list
) {
1101 if (pkt
->sector
== zone
|| pkt
->list
.next
== &pd
->cdrw
.pkt_free_list
) {
1102 list_del_init(&pkt
->list
);
1103 if (pkt
->sector
!= zone
)
1104 pkt
->cache_valid
= 0;
1112 static void pkt_put_packet_data(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1114 if (pkt
->cache_valid
) {
1115 list_add(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
1117 list_add_tail(&pkt
->list
, &pd
->cdrw
.pkt_free_list
);
1122 * recover a failed write, query for relocation if possible
1124 * returns 1 if recovery is possible, or 0 if not
1127 static int pkt_start_recovery(struct packet_data
*pkt
)
1130 * FIXME. We need help from the file system to implement
1131 * recovery handling.
1135 struct request
*rq
= pkt
->rq
;
1136 struct pktcdvd_device
*pd
= rq
->rq_disk
->private_data
;
1137 struct block_device
*pkt_bdev
;
1138 struct super_block
*sb
= NULL
;
1139 unsigned long old_block
, new_block
;
1140 sector_t new_sector
;
1142 pkt_bdev
= bdget(kdev_t_to_nr(pd
->pkt_dev
));
1144 sb
= get_super(pkt_bdev
);
1151 if (!sb
->s_op
->relocate_blocks
)
1154 old_block
= pkt
->sector
/ (CD_FRAMESIZE
>> 9);
1155 if (sb
->s_op
->relocate_blocks(sb
, old_block
, &new_block
))
1158 new_sector
= new_block
* (CD_FRAMESIZE
>> 9);
1159 pkt
->sector
= new_sector
;
1161 bio_reset(pkt
->bio
);
1162 pkt
->bio
->bi_bdev
= pd
->bdev
;
1163 pkt
->bio
->bi_rw
= REQ_WRITE
;
1164 pkt
->bio
->bi_sector
= new_sector
;
1165 pkt
->bio
->bi_size
= pkt
->frames
* CD_FRAMESIZE
;
1166 pkt
->bio
->bi_vcnt
= pkt
->frames
;
1168 pkt
->bio
->bi_end_io
= pkt_end_io_packet_write
;
1169 pkt
->bio
->bi_private
= pkt
;
1180 static inline void pkt_set_state(struct packet_data
*pkt
, enum packet_data_state state
)
1182 #if PACKET_DEBUG > 1
1183 static const char *state_name
[] = {
1184 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1186 enum packet_data_state old_state
= pkt
->state
;
1187 pkt_dbg(2, pd
, "pkt %2d : s=%6llx %s -> %s\n",
1188 pkt
->id
, (unsigned long long)pkt
->sector
,
1189 state_name
[old_state
], state_name
[state
]);
1195 * Scan the work queue to see if we can start a new packet.
1196 * returns non-zero if any work was done.
1198 static int pkt_handle_queue(struct pktcdvd_device
*pd
)
1200 struct packet_data
*pkt
, *p
;
1201 struct bio
*bio
= NULL
;
1202 sector_t zone
= 0; /* Suppress gcc warning */
1203 struct pkt_rb_node
*node
, *first_node
;
1207 atomic_set(&pd
->scan_queue
, 0);
1209 if (list_empty(&pd
->cdrw
.pkt_free_list
)) {
1210 pkt_dbg(2, pd
, "no pkt\n");
1215 * Try to find a zone we are not already working on.
1217 spin_lock(&pd
->lock
);
1218 first_node
= pkt_rbtree_find(pd
, pd
->current_sector
);
1220 n
= rb_first(&pd
->bio_queue
);
1222 first_node
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
1227 zone
= get_zone(bio
->bi_sector
, pd
);
1228 list_for_each_entry(p
, &pd
->cdrw
.pkt_active_list
, list
) {
1229 if (p
->sector
== zone
) {
1236 node
= pkt_rbtree_next(node
);
1238 n
= rb_first(&pd
->bio_queue
);
1240 node
= rb_entry(n
, struct pkt_rb_node
, rb_node
);
1242 if (node
== first_node
)
1245 spin_unlock(&pd
->lock
);
1247 pkt_dbg(2, pd
, "no bio\n");
1251 pkt
= pkt_get_packet_data(pd
, zone
);
1253 pd
->current_sector
= zone
+ pd
->settings
.size
;
1255 BUG_ON(pkt
->frames
!= pd
->settings
.size
>> 2);
1256 pkt
->write_size
= 0;
1259 * Scan work queue for bios in the same zone and link them
1262 spin_lock(&pd
->lock
);
1263 pkt_dbg(2, pd
, "looking for zone %llx\n", (unsigned long long)zone
);
1264 while ((node
= pkt_rbtree_find(pd
, zone
)) != NULL
) {
1266 pkt_dbg(2, pd
, "found zone=%llx\n",
1267 (unsigned long long)get_zone(bio
->bi_sector
, pd
));
1268 if (get_zone(bio
->bi_sector
, pd
) != zone
)
1270 pkt_rbtree_erase(pd
, node
);
1271 spin_lock(&pkt
->lock
);
1272 bio_list_add(&pkt
->orig_bios
, bio
);
1273 pkt
->write_size
+= bio
->bi_size
/ CD_FRAMESIZE
;
1274 spin_unlock(&pkt
->lock
);
1276 /* check write congestion marks, and if bio_queue_size is
1277 below, wake up any waiters */
1278 wakeup
= (pd
->write_congestion_on
> 0
1279 && pd
->bio_queue_size
<= pd
->write_congestion_off
);
1280 spin_unlock(&pd
->lock
);
1282 clear_bdi_congested(&pd
->disk
->queue
->backing_dev_info
,
1286 pkt
->sleep_time
= max(PACKET_WAIT_TIME
, 1);
1287 pkt_set_state(pkt
, PACKET_WAITING_STATE
);
1288 atomic_set(&pkt
->run_sm
, 1);
1290 spin_lock(&pd
->cdrw
.active_list_lock
);
1291 list_add(&pkt
->list
, &pd
->cdrw
.pkt_active_list
);
1292 spin_unlock(&pd
->cdrw
.active_list_lock
);
1298 * Assemble a bio to write one packet and queue the bio for processing
1299 * by the underlying block device.
1301 static void pkt_start_write(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1304 struct bio_vec
*bvec
= pkt
->w_bio
->bi_io_vec
;
1306 bio_reset(pkt
->w_bio
);
1307 pkt
->w_bio
->bi_sector
= pkt
->sector
;
1308 pkt
->w_bio
->bi_bdev
= pd
->bdev
;
1309 pkt
->w_bio
->bi_end_io
= pkt_end_io_packet_write
;
1310 pkt
->w_bio
->bi_private
= pkt
;
1313 for (f
= 0; f
< pkt
->frames
; f
++) {
1314 bvec
[f
].bv_page
= pkt
->pages
[(f
* CD_FRAMESIZE
) / PAGE_SIZE
];
1315 bvec
[f
].bv_offset
= (f
* CD_FRAMESIZE
) % PAGE_SIZE
;
1316 if (!bio_add_page(pkt
->w_bio
, bvec
[f
].bv_page
, CD_FRAMESIZE
, bvec
[f
].bv_offset
))
1319 pkt_dbg(2, pd
, "vcnt=%d\n", pkt
->w_bio
->bi_vcnt
);
1322 * Fill-in bvec with data from orig_bios.
1324 spin_lock(&pkt
->lock
);
1325 bio_copy_data(pkt
->w_bio
, pkt
->orig_bios
.head
);
1327 pkt_set_state(pkt
, PACKET_WRITE_WAIT_STATE
);
1328 spin_unlock(&pkt
->lock
);
1330 pkt_dbg(2, pd
, "Writing %d frames for zone %llx\n",
1331 pkt
->write_size
, (unsigned long long)pkt
->sector
);
1333 if (test_bit(PACKET_MERGE_SEGS
, &pd
->flags
) || (pkt
->write_size
< pkt
->frames
)) {
1334 pkt_make_local_copy(pkt
, bvec
);
1335 pkt
->cache_valid
= 1;
1337 pkt
->cache_valid
= 0;
1340 /* Start the write request */
1341 atomic_set(&pkt
->io_wait
, 1);
1342 pkt
->w_bio
->bi_rw
= WRITE
;
1343 pkt_queue_bio(pd
, pkt
->w_bio
);
1346 static void pkt_finish_packet(struct packet_data
*pkt
, int uptodate
)
1351 pkt
->cache_valid
= 0;
1353 /* Finish all bios corresponding to this packet */
1354 while ((bio
= bio_list_pop(&pkt
->orig_bios
)))
1355 bio_endio(bio
, uptodate
? 0 : -EIO
);
1358 static void pkt_run_state_machine(struct pktcdvd_device
*pd
, struct packet_data
*pkt
)
1362 pkt_dbg(2, pd
, "pkt %d\n", pkt
->id
);
1365 switch (pkt
->state
) {
1366 case PACKET_WAITING_STATE
:
1367 if ((pkt
->write_size
< pkt
->frames
) && (pkt
->sleep_time
> 0))
1370 pkt
->sleep_time
= 0;
1371 pkt_gather_data(pd
, pkt
);
1372 pkt_set_state(pkt
, PACKET_READ_WAIT_STATE
);
1375 case PACKET_READ_WAIT_STATE
:
1376 if (atomic_read(&pkt
->io_wait
) > 0)
1379 if (atomic_read(&pkt
->io_errors
) > 0) {
1380 pkt_set_state(pkt
, PACKET_RECOVERY_STATE
);
1382 pkt_start_write(pd
, pkt
);
1386 case PACKET_WRITE_WAIT_STATE
:
1387 if (atomic_read(&pkt
->io_wait
) > 0)
1390 if (test_bit(BIO_UPTODATE
, &pkt
->w_bio
->bi_flags
)) {
1391 pkt_set_state(pkt
, PACKET_FINISHED_STATE
);
1393 pkt_set_state(pkt
, PACKET_RECOVERY_STATE
);
1397 case PACKET_RECOVERY_STATE
:
1398 if (pkt_start_recovery(pkt
)) {
1399 pkt_start_write(pd
, pkt
);
1401 pkt_dbg(2, pd
, "No recovery possible\n");
1402 pkt_set_state(pkt
, PACKET_FINISHED_STATE
);
1406 case PACKET_FINISHED_STATE
:
1407 uptodate
= test_bit(BIO_UPTODATE
, &pkt
->w_bio
->bi_flags
);
1408 pkt_finish_packet(pkt
, uptodate
);
1418 static void pkt_handle_packets(struct pktcdvd_device
*pd
)
1420 struct packet_data
*pkt
, *next
;
1423 * Run state machine for active packets
1425 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1426 if (atomic_read(&pkt
->run_sm
) > 0) {
1427 atomic_set(&pkt
->run_sm
, 0);
1428 pkt_run_state_machine(pd
, pkt
);
1433 * Move no longer active packets to the free list
1435 spin_lock(&pd
->cdrw
.active_list_lock
);
1436 list_for_each_entry_safe(pkt
, next
, &pd
->cdrw
.pkt_active_list
, list
) {
1437 if (pkt
->state
== PACKET_FINISHED_STATE
) {
1438 list_del(&pkt
->list
);
1439 pkt_put_packet_data(pd
, pkt
);
1440 pkt_set_state(pkt
, PACKET_IDLE_STATE
);
1441 atomic_set(&pd
->scan_queue
, 1);
1444 spin_unlock(&pd
->cdrw
.active_list_lock
);
1447 static void pkt_count_states(struct pktcdvd_device
*pd
, int *states
)
1449 struct packet_data
*pkt
;
1452 for (i
= 0; i
< PACKET_NUM_STATES
; i
++)
1455 spin_lock(&pd
->cdrw
.active_list_lock
);
1456 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1457 states
[pkt
->state
]++;
1459 spin_unlock(&pd
->cdrw
.active_list_lock
);
1463 * kcdrwd is woken up when writes have been queued for one of our
1464 * registered devices
1466 static int kcdrwd(void *foobar
)
1468 struct pktcdvd_device
*pd
= foobar
;
1469 struct packet_data
*pkt
;
1470 long min_sleep_time
, residue
;
1472 set_user_nice(current
, -20);
1476 DECLARE_WAITQUEUE(wait
, current
);
1479 * Wait until there is something to do
1481 add_wait_queue(&pd
->wqueue
, &wait
);
1483 set_current_state(TASK_INTERRUPTIBLE
);
1485 /* Check if we need to run pkt_handle_queue */
1486 if (atomic_read(&pd
->scan_queue
) > 0)
1489 /* Check if we need to run the state machine for some packet */
1490 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1491 if (atomic_read(&pkt
->run_sm
) > 0)
1495 /* Check if we need to process the iosched queues */
1496 if (atomic_read(&pd
->iosched
.attention
) != 0)
1499 /* Otherwise, go to sleep */
1500 if (PACKET_DEBUG
> 1) {
1501 int states
[PACKET_NUM_STATES
];
1502 pkt_count_states(pd
, states
);
1503 pkt_dbg(2, pd
, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1504 states
[0], states
[1], states
[2],
1505 states
[3], states
[4], states
[5]);
1508 min_sleep_time
= MAX_SCHEDULE_TIMEOUT
;
1509 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1510 if (pkt
->sleep_time
&& pkt
->sleep_time
< min_sleep_time
)
1511 min_sleep_time
= pkt
->sleep_time
;
1514 pkt_dbg(2, pd
, "sleeping\n");
1515 residue
= schedule_timeout(min_sleep_time
);
1516 pkt_dbg(2, pd
, "wake up\n");
1518 /* make swsusp happy with our thread */
1521 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
1522 if (!pkt
->sleep_time
)
1524 pkt
->sleep_time
-= min_sleep_time
- residue
;
1525 if (pkt
->sleep_time
<= 0) {
1526 pkt
->sleep_time
= 0;
1527 atomic_inc(&pkt
->run_sm
);
1531 if (kthread_should_stop())
1535 set_current_state(TASK_RUNNING
);
1536 remove_wait_queue(&pd
->wqueue
, &wait
);
1538 if (kthread_should_stop())
1542 * if pkt_handle_queue returns true, we can queue
1545 while (pkt_handle_queue(pd
))
1549 * Handle packet state machine
1551 pkt_handle_packets(pd
);
1554 * Handle iosched queues
1556 pkt_iosched_process_queue(pd
);
1562 static void pkt_print_settings(struct pktcdvd_device
*pd
)
1564 pr_info("%s packets, %u blocks, Mode-%c disc\n",
1565 pd
->settings
.fp
? "Fixed" : "Variable",
1566 pd
->settings
.size
>> 2,
1567 pd
->settings
.block_mode
== 8 ? '1' : '2');
1570 static int pkt_mode_sense(struct pktcdvd_device
*pd
, struct packet_command
*cgc
, int page_code
, int page_control
)
1572 memset(cgc
->cmd
, 0, sizeof(cgc
->cmd
));
1574 cgc
->cmd
[0] = GPCMD_MODE_SENSE_10
;
1575 cgc
->cmd
[2] = page_code
| (page_control
<< 6);
1576 cgc
->cmd
[7] = cgc
->buflen
>> 8;
1577 cgc
->cmd
[8] = cgc
->buflen
& 0xff;
1578 cgc
->data_direction
= CGC_DATA_READ
;
1579 return pkt_generic_packet(pd
, cgc
);
1582 static int pkt_mode_select(struct pktcdvd_device
*pd
, struct packet_command
*cgc
)
1584 memset(cgc
->cmd
, 0, sizeof(cgc
->cmd
));
1585 memset(cgc
->buffer
, 0, 2);
1586 cgc
->cmd
[0] = GPCMD_MODE_SELECT_10
;
1587 cgc
->cmd
[1] = 0x10; /* PF */
1588 cgc
->cmd
[7] = cgc
->buflen
>> 8;
1589 cgc
->cmd
[8] = cgc
->buflen
& 0xff;
1590 cgc
->data_direction
= CGC_DATA_WRITE
;
1591 return pkt_generic_packet(pd
, cgc
);
1594 static int pkt_get_disc_info(struct pktcdvd_device
*pd
, disc_information
*di
)
1596 struct packet_command cgc
;
1599 /* set up command and get the disc info */
1600 init_cdrom_command(&cgc
, di
, sizeof(*di
), CGC_DATA_READ
);
1601 cgc
.cmd
[0] = GPCMD_READ_DISC_INFO
;
1602 cgc
.cmd
[8] = cgc
.buflen
= 2;
1605 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
1608 /* not all drives have the same disc_info length, so requeue
1609 * packet with the length the drive tells us it can supply
1611 cgc
.buflen
= be16_to_cpu(di
->disc_information_length
) +
1612 sizeof(di
->disc_information_length
);
1614 if (cgc
.buflen
> sizeof(disc_information
))
1615 cgc
.buflen
= sizeof(disc_information
);
1617 cgc
.cmd
[8] = cgc
.buflen
;
1618 return pkt_generic_packet(pd
, &cgc
);
1621 static int pkt_get_track_info(struct pktcdvd_device
*pd
, __u16 track
, __u8 type
, track_information
*ti
)
1623 struct packet_command cgc
;
1626 init_cdrom_command(&cgc
, ti
, 8, CGC_DATA_READ
);
1627 cgc
.cmd
[0] = GPCMD_READ_TRACK_RZONE_INFO
;
1628 cgc
.cmd
[1] = type
& 3;
1629 cgc
.cmd
[4] = (track
& 0xff00) >> 8;
1630 cgc
.cmd
[5] = track
& 0xff;
1634 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
1637 cgc
.buflen
= be16_to_cpu(ti
->track_information_length
) +
1638 sizeof(ti
->track_information_length
);
1640 if (cgc
.buflen
> sizeof(track_information
))
1641 cgc
.buflen
= sizeof(track_information
);
1643 cgc
.cmd
[8] = cgc
.buflen
;
1644 return pkt_generic_packet(pd
, &cgc
);
1647 static noinline_for_stack
int pkt_get_last_written(struct pktcdvd_device
*pd
,
1650 disc_information di
;
1651 track_information ti
;
1655 if ((ret
= pkt_get_disc_info(pd
, &di
)))
1658 last_track
= (di
.last_track_msb
<< 8) | di
.last_track_lsb
;
1659 if ((ret
= pkt_get_track_info(pd
, last_track
, 1, &ti
)))
1662 /* if this track is blank, try the previous. */
1665 if ((ret
= pkt_get_track_info(pd
, last_track
, 1, &ti
)))
1669 /* if last recorded field is valid, return it. */
1671 *last_written
= be32_to_cpu(ti
.last_rec_address
);
1673 /* make it up instead */
1674 *last_written
= be32_to_cpu(ti
.track_start
) +
1675 be32_to_cpu(ti
.track_size
);
1677 *last_written
-= (be32_to_cpu(ti
.free_blocks
) + 7);
1683 * write mode select package based on pd->settings
1685 static noinline_for_stack
int pkt_set_write_settings(struct pktcdvd_device
*pd
)
1687 struct packet_command cgc
;
1688 struct request_sense sense
;
1689 write_param_page
*wp
;
1693 /* doesn't apply to DVD+RW or DVD-RAM */
1694 if ((pd
->mmc3_profile
== 0x1a) || (pd
->mmc3_profile
== 0x12))
1697 memset(buffer
, 0, sizeof(buffer
));
1698 init_cdrom_command(&cgc
, buffer
, sizeof(*wp
), CGC_DATA_READ
);
1700 if ((ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WRITE_PARMS_PAGE
, 0))) {
1701 pkt_dump_sense(&cgc
);
1705 size
= 2 + ((buffer
[0] << 8) | (buffer
[1] & 0xff));
1706 pd
->mode_offset
= (buffer
[6] << 8) | (buffer
[7] & 0xff);
1707 if (size
> sizeof(buffer
))
1708 size
= sizeof(buffer
);
1713 init_cdrom_command(&cgc
, buffer
, size
, CGC_DATA_READ
);
1715 if ((ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WRITE_PARMS_PAGE
, 0))) {
1716 pkt_dump_sense(&cgc
);
1721 * write page is offset header + block descriptor length
1723 wp
= (write_param_page
*) &buffer
[sizeof(struct mode_page_header
) + pd
->mode_offset
];
1725 wp
->fp
= pd
->settings
.fp
;
1726 wp
->track_mode
= pd
->settings
.track_mode
;
1727 wp
->write_type
= pd
->settings
.write_type
;
1728 wp
->data_block_type
= pd
->settings
.block_mode
;
1730 wp
->multi_session
= 0;
1732 #ifdef PACKET_USE_LS
1737 if (wp
->data_block_type
== PACKET_BLOCK_MODE1
) {
1738 wp
->session_format
= 0;
1740 } else if (wp
->data_block_type
== PACKET_BLOCK_MODE2
) {
1741 wp
->session_format
= 0x20;
1745 memcpy(&wp
->mcn
[1], PACKET_MCN
, sizeof(wp
->mcn
) - 1);
1751 pkt_err(pd
, "write mode wrong %d\n", wp
->data_block_type
);
1754 wp
->packet_size
= cpu_to_be32(pd
->settings
.size
>> 2);
1756 cgc
.buflen
= cgc
.cmd
[8] = size
;
1757 if ((ret
= pkt_mode_select(pd
, &cgc
))) {
1758 pkt_dump_sense(&cgc
);
1762 pkt_print_settings(pd
);
1767 * 1 -- we can write to this track, 0 -- we can't
1769 static int pkt_writable_track(struct pktcdvd_device
*pd
, track_information
*ti
)
1771 switch (pd
->mmc3_profile
) {
1772 case 0x1a: /* DVD+RW */
1773 case 0x12: /* DVD-RAM */
1774 /* The track is always writable on DVD+RW/DVD-RAM */
1780 if (!ti
->packet
|| !ti
->fp
)
1784 * "good" settings as per Mt Fuji.
1786 if (ti
->rt
== 0 && ti
->blank
== 0)
1789 if (ti
->rt
== 0 && ti
->blank
== 1)
1792 if (ti
->rt
== 1 && ti
->blank
== 0)
1795 pkt_err(pd
, "bad state %d-%d-%d\n", ti
->rt
, ti
->blank
, ti
->packet
);
1800 * 1 -- we can write to this disc, 0 -- we can't
1802 static int pkt_writable_disc(struct pktcdvd_device
*pd
, disc_information
*di
)
1804 switch (pd
->mmc3_profile
) {
1805 case 0x0a: /* CD-RW */
1806 case 0xffff: /* MMC3 not supported */
1808 case 0x1a: /* DVD+RW */
1809 case 0x13: /* DVD-RW */
1810 case 0x12: /* DVD-RAM */
1813 pkt_dbg(2, pd
, "Wrong disc profile (%x)\n",
1819 * for disc type 0xff we should probably reserve a new track.
1820 * but i'm not sure, should we leave this to user apps? probably.
1822 if (di
->disc_type
== 0xff) {
1823 pkt_notice(pd
, "unknown disc - no track?\n");
1827 if (di
->disc_type
!= 0x20 && di
->disc_type
!= 0) {
1828 pkt_err(pd
, "wrong disc type (%x)\n", di
->disc_type
);
1832 if (di
->erasable
== 0) {
1833 pkt_notice(pd
, "disc not erasable\n");
1837 if (di
->border_status
== PACKET_SESSION_RESERVED
) {
1838 pkt_err(pd
, "can't write to last track (reserved)\n");
1845 static noinline_for_stack
int pkt_probe_settings(struct pktcdvd_device
*pd
)
1847 struct packet_command cgc
;
1848 unsigned char buf
[12];
1849 disc_information di
;
1850 track_information ti
;
1853 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_READ
);
1854 cgc
.cmd
[0] = GPCMD_GET_CONFIGURATION
;
1856 ret
= pkt_generic_packet(pd
, &cgc
);
1857 pd
->mmc3_profile
= ret
? 0xffff : buf
[6] << 8 | buf
[7];
1859 memset(&di
, 0, sizeof(disc_information
));
1860 memset(&ti
, 0, sizeof(track_information
));
1862 if ((ret
= pkt_get_disc_info(pd
, &di
))) {
1863 pkt_err(pd
, "failed get_disc\n");
1867 if (!pkt_writable_disc(pd
, &di
))
1870 pd
->type
= di
.erasable
? PACKET_CDRW
: PACKET_CDR
;
1872 track
= 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1873 if ((ret
= pkt_get_track_info(pd
, track
, 1, &ti
))) {
1874 pkt_err(pd
, "failed get_track\n");
1878 if (!pkt_writable_track(pd
, &ti
)) {
1879 pkt_err(pd
, "can't write to this track\n");
1884 * we keep packet size in 512 byte units, makes it easier to
1885 * deal with request calculations.
1887 pd
->settings
.size
= be32_to_cpu(ti
.fixed_packet_size
) << 2;
1888 if (pd
->settings
.size
== 0) {
1889 pkt_notice(pd
, "detected zero packet size!\n");
1892 if (pd
->settings
.size
> PACKET_MAX_SECTORS
) {
1893 pkt_err(pd
, "packet size is too big\n");
1896 pd
->settings
.fp
= ti
.fp
;
1897 pd
->offset
= (be32_to_cpu(ti
.track_start
) << 2) & (pd
->settings
.size
- 1);
1900 pd
->nwa
= be32_to_cpu(ti
.next_writable
);
1901 set_bit(PACKET_NWA_VALID
, &pd
->flags
);
1905 * in theory we could use lra on -RW media as well and just zero
1906 * blocks that haven't been written yet, but in practice that
1907 * is just a no-go. we'll use that for -R, naturally.
1910 pd
->lra
= be32_to_cpu(ti
.last_rec_address
);
1911 set_bit(PACKET_LRA_VALID
, &pd
->flags
);
1913 pd
->lra
= 0xffffffff;
1914 set_bit(PACKET_LRA_VALID
, &pd
->flags
);
1920 pd
->settings
.link_loss
= 7;
1921 pd
->settings
.write_type
= 0; /* packet */
1922 pd
->settings
.track_mode
= ti
.track_mode
;
1925 * mode1 or mode2 disc
1927 switch (ti
.data_mode
) {
1929 pd
->settings
.block_mode
= PACKET_BLOCK_MODE1
;
1932 pd
->settings
.block_mode
= PACKET_BLOCK_MODE2
;
1935 pkt_err(pd
, "unknown data mode\n");
1942 * enable/disable write caching on drive
1944 static noinline_for_stack
int pkt_write_caching(struct pktcdvd_device
*pd
,
1947 struct packet_command cgc
;
1948 struct request_sense sense
;
1949 unsigned char buf
[64];
1952 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_READ
);
1954 cgc
.buflen
= pd
->mode_offset
+ 12;
1957 * caching mode page might not be there, so quiet this command
1961 if ((ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_WCACHING_PAGE
, 0)))
1964 buf
[pd
->mode_offset
+ 10] |= (!!set
<< 2);
1966 cgc
.buflen
= cgc
.cmd
[8] = 2 + ((buf
[0] << 8) | (buf
[1] & 0xff));
1967 ret
= pkt_mode_select(pd
, &cgc
);
1969 pkt_err(pd
, "write caching control failed\n");
1970 pkt_dump_sense(&cgc
);
1971 } else if (!ret
&& set
)
1972 pkt_notice(pd
, "enabled write caching\n");
1976 static int pkt_lock_door(struct pktcdvd_device
*pd
, int lockflag
)
1978 struct packet_command cgc
;
1980 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
1981 cgc
.cmd
[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL
;
1982 cgc
.cmd
[4] = lockflag
? 1 : 0;
1983 return pkt_generic_packet(pd
, &cgc
);
1987 * Returns drive maximum write speed
1989 static noinline_for_stack
int pkt_get_max_speed(struct pktcdvd_device
*pd
,
1990 unsigned *write_speed
)
1992 struct packet_command cgc
;
1993 struct request_sense sense
;
1994 unsigned char buf
[256+18];
1995 unsigned char *cap_buf
;
1998 cap_buf
= &buf
[sizeof(struct mode_page_header
) + pd
->mode_offset
];
1999 init_cdrom_command(&cgc
, buf
, sizeof(buf
), CGC_DATA_UNKNOWN
);
2002 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_CAPABILITIES_PAGE
, 0);
2004 cgc
.buflen
= pd
->mode_offset
+ cap_buf
[1] + 2 +
2005 sizeof(struct mode_page_header
);
2006 ret
= pkt_mode_sense(pd
, &cgc
, GPMODE_CAPABILITIES_PAGE
, 0);
2008 pkt_dump_sense(&cgc
);
2013 offset
= 20; /* Obsoleted field, used by older drives */
2014 if (cap_buf
[1] >= 28)
2015 offset
= 28; /* Current write speed selected */
2016 if (cap_buf
[1] >= 30) {
2017 /* If the drive reports at least one "Logical Unit Write
2018 * Speed Performance Descriptor Block", use the information
2019 * in the first block. (contains the highest speed)
2021 int num_spdb
= (cap_buf
[30] << 8) + cap_buf
[31];
2026 *write_speed
= (cap_buf
[offset
] << 8) | cap_buf
[offset
+ 1];
2030 /* These tables from cdrecord - I don't have orange book */
2031 /* standard speed CD-RW (1-4x) */
2032 static char clv_to_speed
[16] = {
2033 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2034 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2036 /* high speed CD-RW (-10x) */
2037 static char hs_clv_to_speed
[16] = {
2038 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2039 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2041 /* ultra high speed CD-RW */
2042 static char us_clv_to_speed
[16] = {
2043 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2044 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2048 * reads the maximum media speed from ATIP
2050 static noinline_for_stack
int pkt_media_speed(struct pktcdvd_device
*pd
,
2053 struct packet_command cgc
;
2054 struct request_sense sense
;
2055 unsigned char buf
[64];
2056 unsigned int size
, st
, sp
;
2059 init_cdrom_command(&cgc
, buf
, 2, CGC_DATA_READ
);
2061 cgc
.cmd
[0] = GPCMD_READ_TOC_PMA_ATIP
;
2063 cgc
.cmd
[2] = 4; /* READ ATIP */
2065 ret
= pkt_generic_packet(pd
, &cgc
);
2067 pkt_dump_sense(&cgc
);
2070 size
= ((unsigned int) buf
[0]<<8) + buf
[1] + 2;
2071 if (size
> sizeof(buf
))
2074 init_cdrom_command(&cgc
, buf
, size
, CGC_DATA_READ
);
2076 cgc
.cmd
[0] = GPCMD_READ_TOC_PMA_ATIP
;
2080 ret
= pkt_generic_packet(pd
, &cgc
);
2082 pkt_dump_sense(&cgc
);
2086 if (!(buf
[6] & 0x40)) {
2087 pkt_notice(pd
, "disc type is not CD-RW\n");
2090 if (!(buf
[6] & 0x4)) {
2091 pkt_notice(pd
, "A1 values on media are not valid, maybe not CDRW?\n");
2095 st
= (buf
[6] >> 3) & 0x7; /* disc sub-type */
2097 sp
= buf
[16] & 0xf; /* max speed from ATIP A1 field */
2099 /* Info from cdrecord */
2101 case 0: /* standard speed */
2102 *speed
= clv_to_speed
[sp
];
2104 case 1: /* high speed */
2105 *speed
= hs_clv_to_speed
[sp
];
2107 case 2: /* ultra high speed */
2108 *speed
= us_clv_to_speed
[sp
];
2111 pkt_notice(pd
, "unknown disc sub-type %d\n", st
);
2115 pr_info("maximum media speed: %d\n", *speed
);
2118 pkt_notice(pd
, "unknown speed %d for sub-type %d\n", sp
, st
);
2123 static noinline_for_stack
int pkt_perform_opc(struct pktcdvd_device
*pd
)
2125 struct packet_command cgc
;
2126 struct request_sense sense
;
2129 pkt_dbg(2, pd
, "Performing OPC\n");
2131 init_cdrom_command(&cgc
, NULL
, 0, CGC_DATA_NONE
);
2133 cgc
.timeout
= 60*HZ
;
2134 cgc
.cmd
[0] = GPCMD_SEND_OPC
;
2136 if ((ret
= pkt_generic_packet(pd
, &cgc
)))
2137 pkt_dump_sense(&cgc
);
2141 static int pkt_open_write(struct pktcdvd_device
*pd
)
2144 unsigned int write_speed
, media_write_speed
, read_speed
;
2146 if ((ret
= pkt_probe_settings(pd
))) {
2147 pkt_dbg(2, pd
, "failed probe\n");
2151 if ((ret
= pkt_set_write_settings(pd
))) {
2152 pkt_dbg(1, pd
, "failed saving write settings\n");
2156 pkt_write_caching(pd
, USE_WCACHING
);
2158 if ((ret
= pkt_get_max_speed(pd
, &write_speed
)))
2159 write_speed
= 16 * 177;
2160 switch (pd
->mmc3_profile
) {
2161 case 0x13: /* DVD-RW */
2162 case 0x1a: /* DVD+RW */
2163 case 0x12: /* DVD-RAM */
2164 pkt_dbg(1, pd
, "write speed %ukB/s\n", write_speed
);
2167 if ((ret
= pkt_media_speed(pd
, &media_write_speed
)))
2168 media_write_speed
= 16;
2169 write_speed
= min(write_speed
, media_write_speed
* 177);
2170 pkt_dbg(1, pd
, "write speed %ux\n", write_speed
/ 176);
2173 read_speed
= write_speed
;
2175 if ((ret
= pkt_set_speed(pd
, write_speed
, read_speed
))) {
2176 pkt_dbg(1, pd
, "couldn't set write speed\n");
2179 pd
->write_speed
= write_speed
;
2180 pd
->read_speed
= read_speed
;
2182 if ((ret
= pkt_perform_opc(pd
))) {
2183 pkt_dbg(1, pd
, "Optimum Power Calibration failed\n");
2190 * called at open time.
2192 static int pkt_open_dev(struct pktcdvd_device
*pd
, fmode_t write
)
2196 struct request_queue
*q
;
2199 * We need to re-open the cdrom device without O_NONBLOCK to be able
2200 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2201 * so bdget() can't fail.
2203 bdget(pd
->bdev
->bd_dev
);
2204 if ((ret
= blkdev_get(pd
->bdev
, FMODE_READ
| FMODE_EXCL
, pd
)))
2207 if ((ret
= pkt_get_last_written(pd
, &lba
))) {
2208 pkt_err(pd
, "pkt_get_last_written failed\n");
2212 set_capacity(pd
->disk
, lba
<< 2);
2213 set_capacity(pd
->bdev
->bd_disk
, lba
<< 2);
2214 bd_set_size(pd
->bdev
, (loff_t
)lba
<< 11);
2216 q
= bdev_get_queue(pd
->bdev
);
2218 if ((ret
= pkt_open_write(pd
)))
2221 * Some CDRW drives can not handle writes larger than one packet,
2222 * even if the size is a multiple of the packet size.
2224 spin_lock_irq(q
->queue_lock
);
2225 blk_queue_max_hw_sectors(q
, pd
->settings
.size
);
2226 spin_unlock_irq(q
->queue_lock
);
2227 set_bit(PACKET_WRITABLE
, &pd
->flags
);
2229 pkt_set_speed(pd
, MAX_SPEED
, MAX_SPEED
);
2230 clear_bit(PACKET_WRITABLE
, &pd
->flags
);
2233 if ((ret
= pkt_set_segment_merging(pd
, q
)))
2237 if (!pkt_grow_pktlist(pd
, CONFIG_CDROM_PKTCDVD_BUFFERS
)) {
2238 pkt_err(pd
, "not enough memory for buffers\n");
2242 pr_info("%lukB available on disc\n", lba
<< 1);
2248 blkdev_put(pd
->bdev
, FMODE_READ
| FMODE_EXCL
);
2254 * called when the device is closed. makes sure that the device flushes
2255 * the internal cache before we close.
2257 static void pkt_release_dev(struct pktcdvd_device
*pd
, int flush
)
2259 if (flush
&& pkt_flush_cache(pd
))
2260 pkt_dbg(1, pd
, "not flushing cache\n");
2262 pkt_lock_door(pd
, 0);
2264 pkt_set_speed(pd
, MAX_SPEED
, MAX_SPEED
);
2265 blkdev_put(pd
->bdev
, FMODE_READ
| FMODE_EXCL
);
2267 pkt_shrink_pktlist(pd
);
2270 static struct pktcdvd_device
*pkt_find_dev_from_minor(unsigned int dev_minor
)
2272 if (dev_minor
>= MAX_WRITERS
)
2274 return pkt_devs
[dev_minor
];
2277 static int pkt_open(struct block_device
*bdev
, fmode_t mode
)
2279 struct pktcdvd_device
*pd
= NULL
;
2282 mutex_lock(&pktcdvd_mutex
);
2283 mutex_lock(&ctl_mutex
);
2284 pd
= pkt_find_dev_from_minor(MINOR(bdev
->bd_dev
));
2289 BUG_ON(pd
->refcnt
< 0);
2292 if (pd
->refcnt
> 1) {
2293 if ((mode
& FMODE_WRITE
) &&
2294 !test_bit(PACKET_WRITABLE
, &pd
->flags
)) {
2299 ret
= pkt_open_dev(pd
, mode
& FMODE_WRITE
);
2303 * needed here as well, since ext2 (among others) may change
2304 * the blocksize at mount time
2306 set_blocksize(bdev
, CD_FRAMESIZE
);
2309 mutex_unlock(&ctl_mutex
);
2310 mutex_unlock(&pktcdvd_mutex
);
2316 mutex_unlock(&ctl_mutex
);
2317 mutex_unlock(&pktcdvd_mutex
);
2321 static void pkt_close(struct gendisk
*disk
, fmode_t mode
)
2323 struct pktcdvd_device
*pd
= disk
->private_data
;
2325 mutex_lock(&pktcdvd_mutex
);
2326 mutex_lock(&ctl_mutex
);
2328 BUG_ON(pd
->refcnt
< 0);
2329 if (pd
->refcnt
== 0) {
2330 int flush
= test_bit(PACKET_WRITABLE
, &pd
->flags
);
2331 pkt_release_dev(pd
, flush
);
2333 mutex_unlock(&ctl_mutex
);
2334 mutex_unlock(&pktcdvd_mutex
);
2338 static void pkt_end_io_read_cloned(struct bio
*bio
, int err
)
2340 struct packet_stacked_data
*psd
= bio
->bi_private
;
2341 struct pktcdvd_device
*pd
= psd
->pd
;
2344 bio_endio(psd
->bio
, err
);
2345 mempool_free(psd
, psd_pool
);
2346 pkt_bio_finished(pd
);
2349 static void pkt_make_request(struct request_queue
*q
, struct bio
*bio
)
2351 struct pktcdvd_device
*pd
;
2352 char b
[BDEVNAME_SIZE
];
2354 struct packet_data
*pkt
;
2355 int was_empty
, blocked_bio
;
2356 struct pkt_rb_node
*node
;
2360 pkt_err(pd
, "%s incorrect request queue\n",
2361 bdevname(bio
->bi_bdev
, b
));
2366 * Clone READ bios so we can have our own bi_end_io callback.
2368 if (bio_data_dir(bio
) == READ
) {
2369 struct bio
*cloned_bio
= bio_clone(bio
, GFP_NOIO
);
2370 struct packet_stacked_data
*psd
= mempool_alloc(psd_pool
, GFP_NOIO
);
2374 cloned_bio
->bi_bdev
= pd
->bdev
;
2375 cloned_bio
->bi_private
= psd
;
2376 cloned_bio
->bi_end_io
= pkt_end_io_read_cloned
;
2377 pd
->stats
.secs_r
+= bio_sectors(bio
);
2378 pkt_queue_bio(pd
, cloned_bio
);
2382 if (!test_bit(PACKET_WRITABLE
, &pd
->flags
)) {
2383 pkt_notice(pd
, "WRITE for ro device (%llu)\n",
2384 (unsigned long long)bio
->bi_sector
);
2388 if (!bio
->bi_size
|| (bio
->bi_size
% CD_FRAMESIZE
)) {
2389 pkt_err(pd
, "wrong bio size\n");
2393 blk_queue_bounce(q
, &bio
);
2395 zone
= get_zone(bio
->bi_sector
, pd
);
2396 pkt_dbg(2, pd
, "start = %6llx stop = %6llx\n",
2397 (unsigned long long)bio
->bi_sector
,
2398 (unsigned long long)bio_end_sector(bio
));
2400 /* Check if we have to split the bio */
2402 struct bio_pair
*bp
;
2406 last_zone
= get_zone(bio_end_sector(bio
) - 1, pd
);
2407 if (last_zone
!= zone
) {
2408 BUG_ON(last_zone
!= zone
+ pd
->settings
.size
);
2409 first_sectors
= last_zone
- bio
->bi_sector
;
2410 bp
= bio_split(bio
, first_sectors
);
2412 pkt_make_request(q
, &bp
->bio1
);
2413 pkt_make_request(q
, &bp
->bio2
);
2414 bio_pair_release(bp
);
2420 * If we find a matching packet in state WAITING or READ_WAIT, we can
2421 * just append this bio to that packet.
2423 spin_lock(&pd
->cdrw
.active_list_lock
);
2425 list_for_each_entry(pkt
, &pd
->cdrw
.pkt_active_list
, list
) {
2426 if (pkt
->sector
== zone
) {
2427 spin_lock(&pkt
->lock
);
2428 if ((pkt
->state
== PACKET_WAITING_STATE
) ||
2429 (pkt
->state
== PACKET_READ_WAIT_STATE
)) {
2430 bio_list_add(&pkt
->orig_bios
, bio
);
2431 pkt
->write_size
+= bio
->bi_size
/ CD_FRAMESIZE
;
2432 if ((pkt
->write_size
>= pkt
->frames
) &&
2433 (pkt
->state
== PACKET_WAITING_STATE
)) {
2434 atomic_inc(&pkt
->run_sm
);
2435 wake_up(&pd
->wqueue
);
2437 spin_unlock(&pkt
->lock
);
2438 spin_unlock(&pd
->cdrw
.active_list_lock
);
2443 spin_unlock(&pkt
->lock
);
2446 spin_unlock(&pd
->cdrw
.active_list_lock
);
2449 * Test if there is enough room left in the bio work queue
2450 * (queue size >= congestion on mark).
2451 * If not, wait till the work queue size is below the congestion off mark.
2453 spin_lock(&pd
->lock
);
2454 if (pd
->write_congestion_on
> 0
2455 && pd
->bio_queue_size
>= pd
->write_congestion_on
) {
2456 set_bdi_congested(&q
->backing_dev_info
, BLK_RW_ASYNC
);
2458 spin_unlock(&pd
->lock
);
2459 congestion_wait(BLK_RW_ASYNC
, HZ
);
2460 spin_lock(&pd
->lock
);
2461 } while(pd
->bio_queue_size
> pd
->write_congestion_off
);
2463 spin_unlock(&pd
->lock
);
2466 * No matching packet found. Store the bio in the work queue.
2468 node
= mempool_alloc(pd
->rb_pool
, GFP_NOIO
);
2470 spin_lock(&pd
->lock
);
2471 BUG_ON(pd
->bio_queue_size
< 0);
2472 was_empty
= (pd
->bio_queue_size
== 0);
2473 pkt_rbtree_insert(pd
, node
);
2474 spin_unlock(&pd
->lock
);
2477 * Wake up the worker thread.
2479 atomic_set(&pd
->scan_queue
, 1);
2481 /* This wake_up is required for correct operation */
2482 wake_up(&pd
->wqueue
);
2483 } else if (!list_empty(&pd
->cdrw
.pkt_free_list
) && !blocked_bio
) {
2485 * This wake up is not required for correct operation,
2486 * but improves performance in some cases.
2488 wake_up(&pd
->wqueue
);
2497 static int pkt_merge_bvec(struct request_queue
*q
, struct bvec_merge_data
*bmd
,
2498 struct bio_vec
*bvec
)
2500 struct pktcdvd_device
*pd
= q
->queuedata
;
2501 sector_t zone
= get_zone(bmd
->bi_sector
, pd
);
2502 int used
= ((bmd
->bi_sector
- zone
) << 9) + bmd
->bi_size
;
2503 int remaining
= (pd
->settings
.size
<< 9) - used
;
2507 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2508 * boundary, pkt_make_request() will split the bio.
2510 remaining2
= PAGE_SIZE
- bmd
->bi_size
;
2511 remaining
= max(remaining
, remaining2
);
2513 BUG_ON(remaining
< 0);
2517 static void pkt_init_queue(struct pktcdvd_device
*pd
)
2519 struct request_queue
*q
= pd
->disk
->queue
;
2521 blk_queue_make_request(q
, pkt_make_request
);
2522 blk_queue_logical_block_size(q
, CD_FRAMESIZE
);
2523 blk_queue_max_hw_sectors(q
, PACKET_MAX_SECTORS
);
2524 blk_queue_merge_bvec(q
, pkt_merge_bvec
);
2528 static int pkt_seq_show(struct seq_file
*m
, void *p
)
2530 struct pktcdvd_device
*pd
= m
->private;
2532 char bdev_buf
[BDEVNAME_SIZE
];
2533 int states
[PACKET_NUM_STATES
];
2535 seq_printf(m
, "Writer %s mapped to %s:\n", pd
->name
,
2536 bdevname(pd
->bdev
, bdev_buf
));
2538 seq_printf(m
, "\nSettings:\n");
2539 seq_printf(m
, "\tpacket size:\t\t%dkB\n", pd
->settings
.size
/ 2);
2541 if (pd
->settings
.write_type
== 0)
2545 seq_printf(m
, "\twrite type:\t\t%s\n", msg
);
2547 seq_printf(m
, "\tpacket type:\t\t%s\n", pd
->settings
.fp
? "Fixed" : "Variable");
2548 seq_printf(m
, "\tlink loss:\t\t%d\n", pd
->settings
.link_loss
);
2550 seq_printf(m
, "\ttrack mode:\t\t%d\n", pd
->settings
.track_mode
);
2552 if (pd
->settings
.block_mode
== PACKET_BLOCK_MODE1
)
2554 else if (pd
->settings
.block_mode
== PACKET_BLOCK_MODE2
)
2558 seq_printf(m
, "\tblock mode:\t\t%s\n", msg
);
2560 seq_printf(m
, "\nStatistics:\n");
2561 seq_printf(m
, "\tpackets started:\t%lu\n", pd
->stats
.pkt_started
);
2562 seq_printf(m
, "\tpackets ended:\t\t%lu\n", pd
->stats
.pkt_ended
);
2563 seq_printf(m
, "\twritten:\t\t%lukB\n", pd
->stats
.secs_w
>> 1);
2564 seq_printf(m
, "\tread gather:\t\t%lukB\n", pd
->stats
.secs_rg
>> 1);
2565 seq_printf(m
, "\tread:\t\t\t%lukB\n", pd
->stats
.secs_r
>> 1);
2567 seq_printf(m
, "\nMisc:\n");
2568 seq_printf(m
, "\treference count:\t%d\n", pd
->refcnt
);
2569 seq_printf(m
, "\tflags:\t\t\t0x%lx\n", pd
->flags
);
2570 seq_printf(m
, "\tread speed:\t\t%ukB/s\n", pd
->read_speed
);
2571 seq_printf(m
, "\twrite speed:\t\t%ukB/s\n", pd
->write_speed
);
2572 seq_printf(m
, "\tstart offset:\t\t%lu\n", pd
->offset
);
2573 seq_printf(m
, "\tmode page offset:\t%u\n", pd
->mode_offset
);
2575 seq_printf(m
, "\nQueue state:\n");
2576 seq_printf(m
, "\tbios queued:\t\t%d\n", pd
->bio_queue_size
);
2577 seq_printf(m
, "\tbios pending:\t\t%d\n", atomic_read(&pd
->cdrw
.pending_bios
));
2578 seq_printf(m
, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd
->current_sector
);
2580 pkt_count_states(pd
, states
);
2581 seq_printf(m
, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2582 states
[0], states
[1], states
[2], states
[3], states
[4], states
[5]);
2584 seq_printf(m
, "\twrite congestion marks:\toff=%d on=%d\n",
2585 pd
->write_congestion_off
,
2586 pd
->write_congestion_on
);
2590 static int pkt_seq_open(struct inode
*inode
, struct file
*file
)
2592 return single_open(file
, pkt_seq_show
, PDE_DATA(inode
));
2595 static const struct file_operations pkt_proc_fops
= {
2596 .open
= pkt_seq_open
,
2598 .llseek
= seq_lseek
,
2599 .release
= single_release
2602 static int pkt_new_dev(struct pktcdvd_device
*pd
, dev_t dev
)
2606 char b
[BDEVNAME_SIZE
];
2607 struct block_device
*bdev
;
2609 if (pd
->pkt_dev
== dev
) {
2610 pkt_err(pd
, "recursive setup not allowed\n");
2613 for (i
= 0; i
< MAX_WRITERS
; i
++) {
2614 struct pktcdvd_device
*pd2
= pkt_devs
[i
];
2617 if (pd2
->bdev
->bd_dev
== dev
) {
2618 pkt_err(pd
, "%s already setup\n",
2619 bdevname(pd2
->bdev
, b
));
2622 if (pd2
->pkt_dev
== dev
) {
2623 pkt_err(pd
, "can't chain pktcdvd devices\n");
2631 ret
= blkdev_get(bdev
, FMODE_READ
| FMODE_NDELAY
, NULL
);
2635 /* This is safe, since we have a reference from open(). */
2636 __module_get(THIS_MODULE
);
2639 set_blocksize(bdev
, CD_FRAMESIZE
);
2643 atomic_set(&pd
->cdrw
.pending_bios
, 0);
2644 pd
->cdrw
.thread
= kthread_run(kcdrwd
, pd
, "%s", pd
->name
);
2645 if (IS_ERR(pd
->cdrw
.thread
)) {
2646 pkt_err(pd
, "can't start kernel thread\n");
2651 proc_create_data(pd
->name
, 0, pkt_proc
, &pkt_proc_fops
, pd
);
2652 pkt_dbg(1, pd
, "writer mapped to %s\n", bdevname(bdev
, b
));
2656 blkdev_put(bdev
, FMODE_READ
| FMODE_NDELAY
);
2657 /* This is safe: open() is still holding a reference. */
2658 module_put(THIS_MODULE
);
2662 static int pkt_ioctl(struct block_device
*bdev
, fmode_t mode
, unsigned int cmd
, unsigned long arg
)
2664 struct pktcdvd_device
*pd
= bdev
->bd_disk
->private_data
;
2667 pkt_dbg(2, pd
, "cmd %x, dev %d:%d\n",
2668 cmd
, MAJOR(bdev
->bd_dev
), MINOR(bdev
->bd_dev
));
2670 mutex_lock(&pktcdvd_mutex
);
2674 * The door gets locked when the device is opened, so we
2675 * have to unlock it or else the eject command fails.
2677 if (pd
->refcnt
== 1)
2678 pkt_lock_door(pd
, 0);
2681 * forward selected CDROM ioctls to CD-ROM, for UDF
2683 case CDROMMULTISESSION
:
2684 case CDROMREADTOCENTRY
:
2685 case CDROM_LAST_WRITTEN
:
2686 case CDROM_SEND_PACKET
:
2687 case SCSI_IOCTL_SEND_COMMAND
:
2688 ret
= __blkdev_driver_ioctl(pd
->bdev
, mode
, cmd
, arg
);
2692 pkt_dbg(2, pd
, "Unknown ioctl (%x)\n", cmd
);
2695 mutex_unlock(&pktcdvd_mutex
);
2700 static unsigned int pkt_check_events(struct gendisk
*disk
,
2701 unsigned int clearing
)
2703 struct pktcdvd_device
*pd
= disk
->private_data
;
2704 struct gendisk
*attached_disk
;
2710 attached_disk
= pd
->bdev
->bd_disk
;
2711 if (!attached_disk
|| !attached_disk
->fops
->check_events
)
2713 return attached_disk
->fops
->check_events(attached_disk
, clearing
);
2716 static const struct block_device_operations pktcdvd_ops
= {
2717 .owner
= THIS_MODULE
,
2719 .release
= pkt_close
,
2721 .check_events
= pkt_check_events
,
2724 static char *pktcdvd_devnode(struct gendisk
*gd
, umode_t
*mode
)
2726 return kasprintf(GFP_KERNEL
, "pktcdvd/%s", gd
->disk_name
);
2730 * Set up mapping from pktcdvd device to CD-ROM device.
2732 static int pkt_setup_dev(dev_t dev
, dev_t
* pkt_dev
)
2736 struct pktcdvd_device
*pd
;
2737 struct gendisk
*disk
;
2739 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2741 for (idx
= 0; idx
< MAX_WRITERS
; idx
++)
2744 if (idx
== MAX_WRITERS
) {
2745 pr_err("max %d writers supported\n", MAX_WRITERS
);
2750 pd
= kzalloc(sizeof(struct pktcdvd_device
), GFP_KERNEL
);
2754 pd
->rb_pool
= mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE
,
2755 sizeof(struct pkt_rb_node
));
2759 INIT_LIST_HEAD(&pd
->cdrw
.pkt_free_list
);
2760 INIT_LIST_HEAD(&pd
->cdrw
.pkt_active_list
);
2761 spin_lock_init(&pd
->cdrw
.active_list_lock
);
2763 spin_lock_init(&pd
->lock
);
2764 spin_lock_init(&pd
->iosched
.lock
);
2765 bio_list_init(&pd
->iosched
.read_queue
);
2766 bio_list_init(&pd
->iosched
.write_queue
);
2767 sprintf(pd
->name
, DRIVER_NAME
"%d", idx
);
2768 init_waitqueue_head(&pd
->wqueue
);
2769 pd
->bio_queue
= RB_ROOT
;
2771 pd
->write_congestion_on
= write_congestion_on
;
2772 pd
->write_congestion_off
= write_congestion_off
;
2774 disk
= alloc_disk(1);
2778 disk
->major
= pktdev_major
;
2779 disk
->first_minor
= idx
;
2780 disk
->fops
= &pktcdvd_ops
;
2781 disk
->flags
= GENHD_FL_REMOVABLE
;
2782 strcpy(disk
->disk_name
, pd
->name
);
2783 disk
->devnode
= pktcdvd_devnode
;
2784 disk
->private_data
= pd
;
2785 disk
->queue
= blk_alloc_queue(GFP_KERNEL
);
2789 pd
->pkt_dev
= MKDEV(pktdev_major
, idx
);
2790 ret
= pkt_new_dev(pd
, dev
);
2794 /* inherit events of the host device */
2795 disk
->events
= pd
->bdev
->bd_disk
->events
;
2796 disk
->async_events
= pd
->bdev
->bd_disk
->async_events
;
2800 pkt_sysfs_dev_new(pd
);
2801 pkt_debugfs_dev_new(pd
);
2805 *pkt_dev
= pd
->pkt_dev
;
2807 mutex_unlock(&ctl_mutex
);
2811 blk_cleanup_queue(disk
->queue
);
2816 mempool_destroy(pd
->rb_pool
);
2819 mutex_unlock(&ctl_mutex
);
2820 pr_err("setup of pktcdvd device failed\n");
2825 * Tear down mapping from pktcdvd device to CD-ROM device.
2827 static int pkt_remove_dev(dev_t pkt_dev
)
2829 struct pktcdvd_device
*pd
;
2833 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2835 for (idx
= 0; idx
< MAX_WRITERS
; idx
++) {
2837 if (pd
&& (pd
->pkt_dev
== pkt_dev
))
2840 if (idx
== MAX_WRITERS
) {
2841 pkt_dbg(1, pd
, "dev not setup\n");
2846 if (pd
->refcnt
> 0) {
2850 if (!IS_ERR(pd
->cdrw
.thread
))
2851 kthread_stop(pd
->cdrw
.thread
);
2853 pkt_devs
[idx
] = NULL
;
2855 pkt_debugfs_dev_remove(pd
);
2856 pkt_sysfs_dev_remove(pd
);
2858 blkdev_put(pd
->bdev
, FMODE_READ
| FMODE_NDELAY
);
2860 remove_proc_entry(pd
->name
, pkt_proc
);
2861 pkt_dbg(1, pd
, "writer unmapped\n");
2863 del_gendisk(pd
->disk
);
2864 blk_cleanup_queue(pd
->disk
->queue
);
2867 mempool_destroy(pd
->rb_pool
);
2870 /* This is safe: open() is still holding a reference. */
2871 module_put(THIS_MODULE
);
2874 mutex_unlock(&ctl_mutex
);
2878 static void pkt_get_status(struct pkt_ctrl_command
*ctrl_cmd
)
2880 struct pktcdvd_device
*pd
;
2882 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2884 pd
= pkt_find_dev_from_minor(ctrl_cmd
->dev_index
);
2886 ctrl_cmd
->dev
= new_encode_dev(pd
->bdev
->bd_dev
);
2887 ctrl_cmd
->pkt_dev
= new_encode_dev(pd
->pkt_dev
);
2890 ctrl_cmd
->pkt_dev
= 0;
2892 ctrl_cmd
->num_devices
= MAX_WRITERS
;
2894 mutex_unlock(&ctl_mutex
);
2897 static long pkt_ctl_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2899 void __user
*argp
= (void __user
*)arg
;
2900 struct pkt_ctrl_command ctrl_cmd
;
2904 if (cmd
!= PACKET_CTRL_CMD
)
2907 if (copy_from_user(&ctrl_cmd
, argp
, sizeof(struct pkt_ctrl_command
)))
2910 switch (ctrl_cmd
.command
) {
2911 case PKT_CTRL_CMD_SETUP
:
2912 if (!capable(CAP_SYS_ADMIN
))
2914 ret
= pkt_setup_dev(new_decode_dev(ctrl_cmd
.dev
), &pkt_dev
);
2915 ctrl_cmd
.pkt_dev
= new_encode_dev(pkt_dev
);
2917 case PKT_CTRL_CMD_TEARDOWN
:
2918 if (!capable(CAP_SYS_ADMIN
))
2920 ret
= pkt_remove_dev(new_decode_dev(ctrl_cmd
.pkt_dev
));
2922 case PKT_CTRL_CMD_STATUS
:
2923 pkt_get_status(&ctrl_cmd
);
2929 if (copy_to_user(argp
, &ctrl_cmd
, sizeof(struct pkt_ctrl_command
)))
2934 #ifdef CONFIG_COMPAT
2935 static long pkt_ctl_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2937 return pkt_ctl_ioctl(file
, cmd
, (unsigned long)compat_ptr(arg
));
2941 static const struct file_operations pkt_ctl_fops
= {
2942 .open
= nonseekable_open
,
2943 .unlocked_ioctl
= pkt_ctl_ioctl
,
2944 #ifdef CONFIG_COMPAT
2945 .compat_ioctl
= pkt_ctl_compat_ioctl
,
2947 .owner
= THIS_MODULE
,
2948 .llseek
= no_llseek
,
2951 static struct miscdevice pkt_misc
= {
2952 .minor
= MISC_DYNAMIC_MINOR
,
2953 .name
= DRIVER_NAME
,
2954 .nodename
= "pktcdvd/control",
2955 .fops
= &pkt_ctl_fops
2958 static int __init
pkt_init(void)
2962 mutex_init(&ctl_mutex
);
2964 psd_pool
= mempool_create_kmalloc_pool(PSD_POOL_SIZE
,
2965 sizeof(struct packet_stacked_data
));
2969 ret
= register_blkdev(pktdev_major
, DRIVER_NAME
);
2971 pr_err("unable to register block device\n");
2977 ret
= pkt_sysfs_init();
2983 ret
= misc_register(&pkt_misc
);
2985 pr_err("unable to register misc device\n");
2989 pkt_proc
= proc_mkdir("driver/"DRIVER_NAME
, NULL
);
2994 pkt_debugfs_cleanup();
2995 pkt_sysfs_cleanup();
2997 unregister_blkdev(pktdev_major
, DRIVER_NAME
);
2999 mempool_destroy(psd_pool
);
3003 static void __exit
pkt_exit(void)
3005 remove_proc_entry("driver/"DRIVER_NAME
, NULL
);
3006 misc_deregister(&pkt_misc
);
3008 pkt_debugfs_cleanup();
3009 pkt_sysfs_cleanup();
3011 unregister_blkdev(pktdev_major
, DRIVER_NAME
);
3012 mempool_destroy(psd_pool
);
3015 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
3016 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
3017 MODULE_LICENSE("GPL");
3019 module_init(pkt_init
);
3020 module_exit(pkt_exit
);