1 /*******************************************************************************
2 * Filename: target_core_file.c
4 * This file contains the Storage Engine <-> FILEIO transport specific functions
6 * Copyright (c) 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/version.h>
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
33 #include <linux/blkdev.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_device.h>
41 #include <target/target_core_transport.h>
43 #include "target_core_file.h"
46 #define DEBUG_FD_CACHE(x...) printk(x)
48 #define DEBUG_FD_CACHE(x...)
52 #define DEBUG_FD_FUA(x...) printk(x)
54 #define DEBUG_FD_FUA(x...)
57 static struct se_subsystem_api fileio_template
;
59 /* fd_attach_hba(): (Part of se_subsystem_api_t template)
63 static int fd_attach_hba(struct se_hba
*hba
, u32 host_id
)
65 struct fd_host
*fd_host
;
67 fd_host
= kzalloc(sizeof(struct fd_host
), GFP_KERNEL
);
69 printk(KERN_ERR
"Unable to allocate memory for struct fd_host\n");
73 fd_host
->fd_host_id
= host_id
;
75 hba
->hba_ptr
= fd_host
;
77 printk(KERN_INFO
"CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
78 " Target Core Stack %s\n", hba
->hba_id
, FD_VERSION
,
79 TARGET_CORE_MOD_VERSION
);
80 printk(KERN_INFO
"CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
82 hba
->hba_id
, fd_host
->fd_host_id
, FD_MAX_SECTORS
);
87 static void fd_detach_hba(struct se_hba
*hba
)
89 struct fd_host
*fd_host
= hba
->hba_ptr
;
91 printk(KERN_INFO
"CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
92 " Target Core\n", hba
->hba_id
, fd_host
->fd_host_id
);
98 static void *fd_allocate_virtdevice(struct se_hba
*hba
, const char *name
)
100 struct fd_dev
*fd_dev
;
101 struct fd_host
*fd_host
= (struct fd_host
*) hba
->hba_ptr
;
103 fd_dev
= kzalloc(sizeof(struct fd_dev
), GFP_KERNEL
);
105 printk(KERN_ERR
"Unable to allocate memory for struct fd_dev\n");
109 fd_dev
->fd_host
= fd_host
;
111 printk(KERN_INFO
"FILEIO: Allocated fd_dev for %p\n", name
);
116 /* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
120 static struct se_device
*fd_create_virtdevice(
122 struct se_subsystem_dev
*se_dev
,
126 struct se_device
*dev
;
127 struct se_dev_limits dev_limits
;
128 struct queue_limits
*limits
;
129 struct fd_dev
*fd_dev
= (struct fd_dev
*) p
;
130 struct fd_host
*fd_host
= (struct fd_host
*) hba
->hba_ptr
;
133 struct inode
*inode
= NULL
;
134 int dev_flags
= 0, flags
, ret
= -EINVAL
;
136 memset(&dev_limits
, 0, sizeof(struct se_dev_limits
));
140 dev_p
= getname(fd_dev
->fd_dev_name
);
144 printk(KERN_ERR
"getname(%s) failed: %lu\n",
145 fd_dev
->fd_dev_name
, IS_ERR(dev_p
));
146 ret
= PTR_ERR(dev_p
);
150 if (di
->no_create_file
)
151 flags
= O_RDWR
| O_LARGEFILE
;
153 flags
= O_RDWR
| O_CREAT
| O_LARGEFILE
;
155 flags
= O_RDWR
| O_CREAT
| O_LARGEFILE
;
157 /* flags |= O_DIRECT; */
159 * If fd_buffered_io=1 has not been set explicitly (the default),
160 * use O_SYNC to force FILEIO writes to disk.
162 if (!(fd_dev
->fbd_flags
& FDBD_USE_BUFFERED_IO
))
165 file
= filp_open(dev_p
, flags
, 0600);
167 printk(KERN_ERR
"filp_open(%s) failed\n", dev_p
);
171 if (!file
|| !file
->f_dentry
) {
172 printk(KERN_ERR
"filp_open(%s) failed\n", dev_p
);
175 fd_dev
->fd_file
= file
;
177 * If using a block backend with this struct file, we extract
178 * fd_dev->fd_[block,dev]_size from struct block_device.
180 * Otherwise, we use the passed fd_size= from configfs
182 inode
= file
->f_mapping
->host
;
183 if (S_ISBLK(inode
->i_mode
)) {
184 struct request_queue
*q
;
186 * Setup the local scope queue_limits from struct request_queue->limits
187 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
189 q
= bdev_get_queue(inode
->i_bdev
);
190 limits
= &dev_limits
.limits
;
191 limits
->logical_block_size
= bdev_logical_block_size(inode
->i_bdev
);
192 limits
->max_hw_sectors
= queue_max_hw_sectors(q
);
193 limits
->max_sectors
= queue_max_sectors(q
);
195 * Determine the number of bytes from i_size_read() minus
196 * one (1) logical sector from underlying struct block_device
198 fd_dev
->fd_block_size
= bdev_logical_block_size(inode
->i_bdev
);
199 fd_dev
->fd_dev_size
= (i_size_read(file
->f_mapping
->host
) -
200 fd_dev
->fd_block_size
);
202 printk(KERN_INFO
"FILEIO: Using size: %llu bytes from struct"
203 " block_device blocks: %llu logical_block_size: %d\n",
205 div_u64(fd_dev
->fd_dev_size
, fd_dev
->fd_block_size
),
206 fd_dev
->fd_block_size
);
208 if (!(fd_dev
->fbd_flags
& FBDF_HAS_SIZE
)) {
209 printk(KERN_ERR
"FILEIO: Missing fd_dev_size="
210 " parameter, and no backing struct"
215 limits
= &dev_limits
.limits
;
216 limits
->logical_block_size
= FD_BLOCKSIZE
;
217 limits
->max_hw_sectors
= FD_MAX_SECTORS
;
218 limits
->max_sectors
= FD_MAX_SECTORS
;
219 fd_dev
->fd_block_size
= FD_BLOCKSIZE
;
222 dev_limits
.hw_queue_depth
= FD_MAX_DEVICE_QUEUE_DEPTH
;
223 dev_limits
.queue_depth
= FD_DEVICE_QUEUE_DEPTH
;
225 dev
= transport_add_device_to_core_hba(hba
, &fileio_template
,
226 se_dev
, dev_flags
, fd_dev
,
227 &dev_limits
, "FILEIO", FD_VERSION
);
231 fd_dev
->fd_dev_id
= fd_host
->fd_host_dev_id_count
++;
232 fd_dev
->fd_queue_depth
= dev
->queue_depth
;
234 printk(KERN_INFO
"CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
235 " %llu total bytes\n", fd_host
->fd_host_id
, fd_dev
->fd_dev_id
,
236 fd_dev
->fd_dev_name
, fd_dev
->fd_dev_size
);
241 if (fd_dev
->fd_file
) {
242 filp_close(fd_dev
->fd_file
, NULL
);
243 fd_dev
->fd_file
= NULL
;
249 /* fd_free_device(): (Part of se_subsystem_api_t template)
253 static void fd_free_device(void *p
)
255 struct fd_dev
*fd_dev
= (struct fd_dev
*) p
;
257 if (fd_dev
->fd_file
) {
258 filp_close(fd_dev
->fd_file
, NULL
);
259 fd_dev
->fd_file
= NULL
;
265 static inline struct fd_request
*FILE_REQ(struct se_task
*task
)
267 return container_of(task
, struct fd_request
, fd_task
);
271 static struct se_task
*
272 fd_alloc_task(struct se_cmd
*cmd
)
274 struct fd_request
*fd_req
;
276 fd_req
= kzalloc(sizeof(struct fd_request
), GFP_KERNEL
);
278 printk(KERN_ERR
"Unable to allocate struct fd_request\n");
282 fd_req
->fd_dev
= cmd
->se_dev
->dev_ptr
;
284 return &fd_req
->fd_task
;
287 static int fd_do_readv(struct se_task
*task
)
289 struct fd_request
*req
= FILE_REQ(task
);
290 struct file
*fd
= req
->fd_dev
->fd_file
;
291 struct scatterlist
*sg
= task
->task_sg
;
294 loff_t pos
= (task
->task_lba
*
295 task
->se_dev
->se_sub_dev
->se_dev_attrib
.block_size
);
298 iov
= kzalloc(sizeof(struct iovec
) * task
->task_sg_num
, GFP_KERNEL
);
300 printk(KERN_ERR
"Unable to allocate fd_do_readv iov[]\n");
304 for (i
= 0; i
< task
->task_sg_num
; i
++) {
305 iov
[i
].iov_len
= sg
[i
].length
;
306 iov
[i
].iov_base
= sg_virt(&sg
[i
]);
311 ret
= vfs_readv(fd
, &iov
[0], task
->task_sg_num
, &pos
);
316 * Return zeros and GOOD status even if the READ did not return
317 * the expected virt_size for struct file w/o a backing struct
320 if (S_ISBLK(fd
->f_dentry
->d_inode
->i_mode
)) {
321 if (ret
< 0 || ret
!= task
->task_size
) {
322 printk(KERN_ERR
"vfs_readv() returned %d,"
323 " expecting %d for S_ISBLK\n", ret
,
324 (int)task
->task_size
);
325 return (ret
< 0 ? ret
: -EINVAL
);
329 printk(KERN_ERR
"vfs_readv() returned %d for non"
338 static int fd_do_writev(struct se_task
*task
)
340 struct fd_request
*req
= FILE_REQ(task
);
341 struct file
*fd
= req
->fd_dev
->fd_file
;
342 struct scatterlist
*sg
= task
->task_sg
;
345 loff_t pos
= (task
->task_lba
*
346 task
->se_dev
->se_sub_dev
->se_dev_attrib
.block_size
);
349 iov
= kzalloc(sizeof(struct iovec
) * task
->task_sg_num
, GFP_KERNEL
);
351 printk(KERN_ERR
"Unable to allocate fd_do_writev iov[]\n");
355 for (i
= 0; i
< task
->task_sg_num
; i
++) {
356 iov
[i
].iov_len
= sg
[i
].length
;
357 iov
[i
].iov_base
= sg_virt(&sg
[i
]);
362 ret
= vfs_writev(fd
, &iov
[0], task
->task_sg_num
, &pos
);
367 if (ret
< 0 || ret
!= task
->task_size
) {
368 printk(KERN_ERR
"vfs_writev() returned %d\n", ret
);
369 return (ret
< 0 ? ret
: -EINVAL
);
375 static void fd_emulate_sync_cache(struct se_task
*task
)
377 struct se_cmd
*cmd
= task
->task_se_cmd
;
378 struct se_device
*dev
= cmd
->se_dev
;
379 struct fd_dev
*fd_dev
= dev
->dev_ptr
;
380 int immed
= (cmd
->t_task_cdb
[1] & 0x2);
385 * If the Immediate bit is set, queue up the GOOD response
386 * for this SYNCHRONIZE_CACHE op
389 transport_complete_sync_cache(cmd
, 1);
392 * Determine if we will be flushing the entire device.
394 if (cmd
->t_task_lba
== 0 && cmd
->data_length
== 0) {
398 start
= cmd
->t_task_lba
* dev
->se_sub_dev
->se_dev_attrib
.block_size
;
399 if (cmd
->data_length
)
400 end
= start
+ cmd
->data_length
;
405 ret
= vfs_fsync_range(fd_dev
->fd_file
, start
, end
, 1);
407 printk(KERN_ERR
"FILEIO: vfs_fsync_range() failed: %d\n", ret
);
410 transport_complete_sync_cache(cmd
, ret
== 0);
414 * Tell TCM Core that we are capable of WriteCache emulation for
415 * an underlying struct se_device.
417 static int fd_emulated_write_cache(struct se_device
*dev
)
422 static int fd_emulated_dpo(struct se_device
*dev
)
427 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
430 static int fd_emulated_fua_write(struct se_device
*dev
)
435 static int fd_emulated_fua_read(struct se_device
*dev
)
441 * WRITE Force Unit Access (FUA) emulation on a per struct se_task
444 static void fd_emulate_write_fua(struct se_cmd
*cmd
, struct se_task
*task
)
446 struct se_device
*dev
= cmd
->se_dev
;
447 struct fd_dev
*fd_dev
= dev
->dev_ptr
;
448 loff_t start
= task
->task_lba
* dev
->se_sub_dev
->se_dev_attrib
.block_size
;
449 loff_t end
= start
+ task
->task_size
;
452 DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
453 task
->task_lba
, task
->task_size
);
455 ret
= vfs_fsync_range(fd_dev
->fd_file
, start
, end
, 1);
457 printk(KERN_ERR
"FILEIO: vfs_fsync_range() failed: %d\n", ret
);
460 static int fd_do_task(struct se_task
*task
)
462 struct se_cmd
*cmd
= task
->task_se_cmd
;
463 struct se_device
*dev
= cmd
->se_dev
;
467 * Call vectorized fileio functions to map struct scatterlist
468 * physical memory addresses to struct iovec virtual memory.
470 if (task
->task_data_direction
== DMA_FROM_DEVICE
) {
471 ret
= fd_do_readv(task
);
473 ret
= fd_do_writev(task
);
476 dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
> 0 &&
477 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
> 0 &&
480 * We might need to be a bit smarter here
481 * and return some sense data to let the initiator
482 * know the FUA WRITE cache sync failed..?
484 fd_emulate_write_fua(cmd
, task
);
492 task
->task_scsi_status
= GOOD
;
493 transport_complete_task(task
, 1);
495 return PYX_TRANSPORT_SENT_TO_TRANSPORT
;
498 /* fd_free_task(): (Part of se_subsystem_api_t template)
502 static void fd_free_task(struct se_task
*task
)
504 struct fd_request
*req
= FILE_REQ(task
);
510 Opt_fd_dev_name
, Opt_fd_dev_size
, Opt_fd_buffered_io
, Opt_err
513 static match_table_t tokens
= {
514 {Opt_fd_dev_name
, "fd_dev_name=%s"},
515 {Opt_fd_dev_size
, "fd_dev_size=%s"},
516 {Opt_fd_buffered_io
, "fd_buffered_io=%d"},
520 static ssize_t
fd_set_configfs_dev_params(
522 struct se_subsystem_dev
*se_dev
,
523 const char *page
, ssize_t count
)
525 struct fd_dev
*fd_dev
= se_dev
->se_dev_su_ptr
;
526 char *orig
, *ptr
, *arg_p
, *opts
;
527 substring_t args
[MAX_OPT_ARGS
];
528 int ret
= 0, arg
, token
;
530 opts
= kstrdup(page
, GFP_KERNEL
);
536 while ((ptr
= strsep(&opts
, ",")) != NULL
) {
540 token
= match_token(ptr
, tokens
, args
);
542 case Opt_fd_dev_name
:
543 arg_p
= match_strdup(&args
[0]);
548 snprintf(fd_dev
->fd_dev_name
, FD_MAX_DEV_NAME
,
551 printk(KERN_INFO
"FILEIO: Referencing Path: %s\n",
552 fd_dev
->fd_dev_name
);
553 fd_dev
->fbd_flags
|= FBDF_HAS_PATH
;
555 case Opt_fd_dev_size
:
556 arg_p
= match_strdup(&args
[0]);
561 ret
= strict_strtoull(arg_p
, 0, &fd_dev
->fd_dev_size
);
564 printk(KERN_ERR
"strict_strtoull() failed for"
568 printk(KERN_INFO
"FILEIO: Referencing Size: %llu"
569 " bytes\n", fd_dev
->fd_dev_size
);
570 fd_dev
->fbd_flags
|= FBDF_HAS_SIZE
;
572 case Opt_fd_buffered_io
:
573 match_int(args
, &arg
);
575 printk(KERN_ERR
"bogus fd_buffered_io=%d value\n", arg
);
580 printk(KERN_INFO
"FILEIO: Using buffered I/O"
581 " operations for struct fd_dev\n");
583 fd_dev
->fbd_flags
|= FDBD_USE_BUFFERED_IO
;
592 return (!ret
) ? count
: ret
;
595 static ssize_t
fd_check_configfs_dev_params(struct se_hba
*hba
, struct se_subsystem_dev
*se_dev
)
597 struct fd_dev
*fd_dev
= (struct fd_dev
*) se_dev
->se_dev_su_ptr
;
599 if (!(fd_dev
->fbd_flags
& FBDF_HAS_PATH
)) {
600 printk(KERN_ERR
"Missing fd_dev_name=\n");
607 static ssize_t
fd_show_configfs_dev_params(
609 struct se_subsystem_dev
*se_dev
,
612 struct fd_dev
*fd_dev
= se_dev
->se_dev_su_ptr
;
615 bl
= sprintf(b
+ bl
, "TCM FILEIO ID: %u", fd_dev
->fd_dev_id
);
616 bl
+= sprintf(b
+ bl
, " File: %s Size: %llu Mode: %s\n",
617 fd_dev
->fd_dev_name
, fd_dev
->fd_dev_size
,
618 (fd_dev
->fbd_flags
& FDBD_USE_BUFFERED_IO
) ?
619 "Buffered" : "Synchronous");
623 /* fd_get_cdb(): (Part of se_subsystem_api_t template)
627 static unsigned char *fd_get_cdb(struct se_task
*task
)
629 struct fd_request
*req
= FILE_REQ(task
);
631 return req
->fd_scsi_cdb
;
634 /* fd_get_device_rev(): (Part of se_subsystem_api_t template)
638 static u32
fd_get_device_rev(struct se_device
*dev
)
640 return SCSI_SPC_2
; /* Returns SPC-3 in Initiator Data */
643 /* fd_get_device_type(): (Part of se_subsystem_api_t template)
647 static u32
fd_get_device_type(struct se_device
*dev
)
652 static sector_t
fd_get_blocks(struct se_device
*dev
)
654 struct fd_dev
*fd_dev
= dev
->dev_ptr
;
655 unsigned long long blocks_long
= div_u64(fd_dev
->fd_dev_size
,
656 dev
->se_sub_dev
->se_dev_attrib
.block_size
);
661 static struct se_subsystem_api fileio_template
= {
663 .owner
= THIS_MODULE
,
664 .transport_type
= TRANSPORT_PLUGIN_VHBA_PDEV
,
665 .attach_hba
= fd_attach_hba
,
666 .detach_hba
= fd_detach_hba
,
667 .allocate_virtdevice
= fd_allocate_virtdevice
,
668 .create_virtdevice
= fd_create_virtdevice
,
669 .free_device
= fd_free_device
,
670 .dpo_emulated
= fd_emulated_dpo
,
671 .fua_write_emulated
= fd_emulated_fua_write
,
672 .fua_read_emulated
= fd_emulated_fua_read
,
673 .write_cache_emulated
= fd_emulated_write_cache
,
674 .alloc_task
= fd_alloc_task
,
675 .do_task
= fd_do_task
,
676 .do_sync_cache
= fd_emulate_sync_cache
,
677 .free_task
= fd_free_task
,
678 .check_configfs_dev_params
= fd_check_configfs_dev_params
,
679 .set_configfs_dev_params
= fd_set_configfs_dev_params
,
680 .show_configfs_dev_params
= fd_show_configfs_dev_params
,
681 .get_cdb
= fd_get_cdb
,
682 .get_device_rev
= fd_get_device_rev
,
683 .get_device_type
= fd_get_device_type
,
684 .get_blocks
= fd_get_blocks
,
687 static int __init
fileio_module_init(void)
689 return transport_subsystem_register(&fileio_template
);
692 static void fileio_module_exit(void)
694 transport_subsystem_release(&fileio_template
);
697 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
698 MODULE_AUTHOR("nab@Linux-iSCSI.org");
699 MODULE_LICENSE("GPL");
701 module_init(fileio_module_init
);
702 module_exit(fileio_module_exit
);