Merge branch 'drm-next' of ../main_line/linux-drm into dave-drm-next
[deliverable/linux.git] / drivers / target / target_core_file.c
1 /*******************************************************************************
2 * Filename: target_core_file.c
3 *
4 * This file contains the Storage Engine <-> FILEIO transport specific functions
5 *
6 * Copyright (c) 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29 #include <linux/string.h>
30 #include <linux/parser.h>
31 #include <linux/timer.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/module.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38
39 #include <target/target_core_base.h>
40 #include <target/target_core_backend.h>
41
42 #include "target_core_file.h"
43
44 static struct se_subsystem_api fileio_template;
45
46 /* fd_attach_hba(): (Part of se_subsystem_api_t template)
47 *
48 *
49 */
50 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
51 {
52 struct fd_host *fd_host;
53
54 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
55 if (!fd_host) {
56 pr_err("Unable to allocate memory for struct fd_host\n");
57 return -ENOMEM;
58 }
59
60 fd_host->fd_host_id = host_id;
61
62 hba->hba_ptr = fd_host;
63
64 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
65 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
66 TARGET_CORE_MOD_VERSION);
67 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
68 " MaxSectors: %u\n",
69 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
70
71 return 0;
72 }
73
74 static void fd_detach_hba(struct se_hba *hba)
75 {
76 struct fd_host *fd_host = hba->hba_ptr;
77
78 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
79 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
80
81 kfree(fd_host);
82 hba->hba_ptr = NULL;
83 }
84
85 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
86 {
87 struct fd_dev *fd_dev;
88 struct fd_host *fd_host = hba->hba_ptr;
89
90 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
91 if (!fd_dev) {
92 pr_err("Unable to allocate memory for struct fd_dev\n");
93 return NULL;
94 }
95
96 fd_dev->fd_host = fd_host;
97
98 pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
99
100 return fd_dev;
101 }
102
103 /* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
104 *
105 *
106 */
107 static struct se_device *fd_create_virtdevice(
108 struct se_hba *hba,
109 struct se_subsystem_dev *se_dev,
110 void *p)
111 {
112 char *dev_p = NULL;
113 struct se_device *dev;
114 struct se_dev_limits dev_limits;
115 struct queue_limits *limits;
116 struct fd_dev *fd_dev = p;
117 struct fd_host *fd_host = hba->hba_ptr;
118 mm_segment_t old_fs;
119 struct file *file;
120 struct inode *inode = NULL;
121 int dev_flags = 0, flags, ret = -EINVAL;
122
123 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
124
125 old_fs = get_fs();
126 set_fs(get_ds());
127 dev_p = getname(fd_dev->fd_dev_name);
128 set_fs(old_fs);
129
130 if (IS_ERR(dev_p)) {
131 pr_err("getname(%s) failed: %lu\n",
132 fd_dev->fd_dev_name, IS_ERR(dev_p));
133 ret = PTR_ERR(dev_p);
134 goto fail;
135 }
136 /*
137 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
138 * of pure timestamp updates.
139 */
140 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
141
142 file = filp_open(dev_p, flags, 0600);
143 if (IS_ERR(file)) {
144 pr_err("filp_open(%s) failed\n", dev_p);
145 ret = PTR_ERR(file);
146 goto fail;
147 }
148 if (!file || !file->f_dentry) {
149 pr_err("filp_open(%s) failed\n", dev_p);
150 goto fail;
151 }
152 fd_dev->fd_file = file;
153 /*
154 * If using a block backend with this struct file, we extract
155 * fd_dev->fd_[block,dev]_size from struct block_device.
156 *
157 * Otherwise, we use the passed fd_size= from configfs
158 */
159 inode = file->f_mapping->host;
160 if (S_ISBLK(inode->i_mode)) {
161 struct request_queue *q;
162 unsigned long long dev_size;
163 /*
164 * Setup the local scope queue_limits from struct request_queue->limits
165 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
166 */
167 q = bdev_get_queue(inode->i_bdev);
168 limits = &dev_limits.limits;
169 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
170 limits->max_hw_sectors = queue_max_hw_sectors(q);
171 limits->max_sectors = queue_max_sectors(q);
172 /*
173 * Determine the number of bytes from i_size_read() minus
174 * one (1) logical sector from underlying struct block_device
175 */
176 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
177 dev_size = (i_size_read(file->f_mapping->host) -
178 fd_dev->fd_block_size);
179
180 pr_debug("FILEIO: Using size: %llu bytes from struct"
181 " block_device blocks: %llu logical_block_size: %d\n",
182 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
183 fd_dev->fd_block_size);
184 } else {
185 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
186 pr_err("FILEIO: Missing fd_dev_size="
187 " parameter, and no backing struct"
188 " block_device\n");
189 goto fail;
190 }
191
192 limits = &dev_limits.limits;
193 limits->logical_block_size = FD_BLOCKSIZE;
194 limits->max_hw_sectors = FD_MAX_SECTORS;
195 limits->max_sectors = FD_MAX_SECTORS;
196 fd_dev->fd_block_size = FD_BLOCKSIZE;
197 }
198
199 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
200 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
201
202 dev = transport_add_device_to_core_hba(hba, &fileio_template,
203 se_dev, dev_flags, fd_dev,
204 &dev_limits, "FILEIO", FD_VERSION);
205 if (!dev)
206 goto fail;
207
208 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
209 fd_dev->fd_queue_depth = dev->queue_depth;
210
211 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
212 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
213 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
214
215 putname(dev_p);
216 return dev;
217 fail:
218 if (fd_dev->fd_file) {
219 filp_close(fd_dev->fd_file, NULL);
220 fd_dev->fd_file = NULL;
221 }
222 putname(dev_p);
223 return ERR_PTR(ret);
224 }
225
226 /* fd_free_device(): (Part of se_subsystem_api_t template)
227 *
228 *
229 */
230 static void fd_free_device(void *p)
231 {
232 struct fd_dev *fd_dev = p;
233
234 if (fd_dev->fd_file) {
235 filp_close(fd_dev->fd_file, NULL);
236 fd_dev->fd_file = NULL;
237 }
238
239 kfree(fd_dev);
240 }
241
242 static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
243 u32 sgl_nents)
244 {
245 struct se_device *se_dev = cmd->se_dev;
246 struct fd_dev *dev = se_dev->dev_ptr;
247 struct file *fd = dev->fd_file;
248 struct scatterlist *sg;
249 struct iovec *iov;
250 mm_segment_t old_fs;
251 loff_t pos = (cmd->t_task_lba *
252 se_dev->se_sub_dev->se_dev_attrib.block_size);
253 int ret = 0, i;
254
255 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
256 if (!iov) {
257 pr_err("Unable to allocate fd_do_readv iov[]\n");
258 return -ENOMEM;
259 }
260
261 for_each_sg(sgl, sg, sgl_nents, i) {
262 iov[i].iov_len = sg->length;
263 iov[i].iov_base = sg_virt(sg);
264 }
265
266 old_fs = get_fs();
267 set_fs(get_ds());
268 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
269 set_fs(old_fs);
270
271 kfree(iov);
272 /*
273 * Return zeros and GOOD status even if the READ did not return
274 * the expected virt_size for struct file w/o a backing struct
275 * block_device.
276 */
277 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
278 if (ret < 0 || ret != cmd->data_length) {
279 pr_err("vfs_readv() returned %d,"
280 " expecting %d for S_ISBLK\n", ret,
281 (int)cmd->data_length);
282 return (ret < 0 ? ret : -EINVAL);
283 }
284 } else {
285 if (ret < 0) {
286 pr_err("vfs_readv() returned %d for non"
287 " S_ISBLK\n", ret);
288 return ret;
289 }
290 }
291
292 return 1;
293 }
294
295 static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
296 u32 sgl_nents)
297 {
298 struct se_device *se_dev = cmd->se_dev;
299 struct fd_dev *dev = se_dev->dev_ptr;
300 struct file *fd = dev->fd_file;
301 struct scatterlist *sg;
302 struct iovec *iov;
303 mm_segment_t old_fs;
304 loff_t pos = (cmd->t_task_lba *
305 se_dev->se_sub_dev->se_dev_attrib.block_size);
306 int ret, i = 0;
307
308 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
309 if (!iov) {
310 pr_err("Unable to allocate fd_do_writev iov[]\n");
311 return -ENOMEM;
312 }
313
314 for_each_sg(sgl, sg, sgl_nents, i) {
315 iov[i].iov_len = sg->length;
316 iov[i].iov_base = sg_virt(sg);
317 }
318
319 old_fs = get_fs();
320 set_fs(get_ds());
321 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
322 set_fs(old_fs);
323
324 kfree(iov);
325
326 if (ret < 0 || ret != cmd->data_length) {
327 pr_err("vfs_writev() returned %d\n", ret);
328 return (ret < 0 ? ret : -EINVAL);
329 }
330
331 return 1;
332 }
333
334 static void fd_emulate_sync_cache(struct se_cmd *cmd)
335 {
336 struct se_device *dev = cmd->se_dev;
337 struct fd_dev *fd_dev = dev->dev_ptr;
338 int immed = (cmd->t_task_cdb[1] & 0x2);
339 loff_t start, end;
340 int ret;
341
342 /*
343 * If the Immediate bit is set, queue up the GOOD response
344 * for this SYNCHRONIZE_CACHE op
345 */
346 if (immed)
347 target_complete_cmd(cmd, SAM_STAT_GOOD);
348
349 /*
350 * Determine if we will be flushing the entire device.
351 */
352 if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
353 start = 0;
354 end = LLONG_MAX;
355 } else {
356 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
357 if (cmd->data_length)
358 end = start + cmd->data_length;
359 else
360 end = LLONG_MAX;
361 }
362
363 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
364 if (ret != 0)
365 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
366
367 if (immed)
368 return;
369
370 if (ret) {
371 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
372 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
373 } else {
374 target_complete_cmd(cmd, SAM_STAT_GOOD);
375 }
376 }
377
378 static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
379 u32 sgl_nents, enum dma_data_direction data_direction)
380 {
381 struct se_device *dev = cmd->se_dev;
382 int ret = 0;
383
384 /*
385 * Call vectorized fileio functions to map struct scatterlist
386 * physical memory addresses to struct iovec virtual memory.
387 */
388 if (data_direction == DMA_FROM_DEVICE) {
389 ret = fd_do_readv(cmd, sgl, sgl_nents);
390 } else {
391 ret = fd_do_writev(cmd, sgl, sgl_nents);
392 /*
393 * Perform implict vfs_fsync_range() for fd_do_writev() ops
394 * for SCSI WRITEs with Forced Unit Access (FUA) set.
395 * Allow this to happen independent of WCE=0 setting.
396 */
397 if (ret > 0 &&
398 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
399 (cmd->se_cmd_flags & SCF_FUA)) {
400 struct fd_dev *fd_dev = dev->dev_ptr;
401 loff_t start = cmd->t_task_lba *
402 dev->se_sub_dev->se_dev_attrib.block_size;
403 loff_t end = start + cmd->data_length;
404
405 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
406 }
407 }
408
409 if (ret < 0) {
410 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
411 return ret;
412 }
413 if (ret)
414 target_complete_cmd(cmd, SAM_STAT_GOOD);
415 return 0;
416 }
417
418 enum {
419 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
420 };
421
422 static match_table_t tokens = {
423 {Opt_fd_dev_name, "fd_dev_name=%s"},
424 {Opt_fd_dev_size, "fd_dev_size=%s"},
425 {Opt_err, NULL}
426 };
427
428 static ssize_t fd_set_configfs_dev_params(
429 struct se_hba *hba,
430 struct se_subsystem_dev *se_dev,
431 const char *page, ssize_t count)
432 {
433 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
434 char *orig, *ptr, *arg_p, *opts;
435 substring_t args[MAX_OPT_ARGS];
436 int ret = 0, token;
437
438 opts = kstrdup(page, GFP_KERNEL);
439 if (!opts)
440 return -ENOMEM;
441
442 orig = opts;
443
444 while ((ptr = strsep(&opts, ",\n")) != NULL) {
445 if (!*ptr)
446 continue;
447
448 token = match_token(ptr, tokens, args);
449 switch (token) {
450 case Opt_fd_dev_name:
451 arg_p = match_strdup(&args[0]);
452 if (!arg_p) {
453 ret = -ENOMEM;
454 break;
455 }
456 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
457 "%s", arg_p);
458 kfree(arg_p);
459 pr_debug("FILEIO: Referencing Path: %s\n",
460 fd_dev->fd_dev_name);
461 fd_dev->fbd_flags |= FBDF_HAS_PATH;
462 break;
463 case Opt_fd_dev_size:
464 arg_p = match_strdup(&args[0]);
465 if (!arg_p) {
466 ret = -ENOMEM;
467 break;
468 }
469 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
470 kfree(arg_p);
471 if (ret < 0) {
472 pr_err("strict_strtoull() failed for"
473 " fd_dev_size=\n");
474 goto out;
475 }
476 pr_debug("FILEIO: Referencing Size: %llu"
477 " bytes\n", fd_dev->fd_dev_size);
478 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
479 break;
480 default:
481 break;
482 }
483 }
484
485 out:
486 kfree(orig);
487 return (!ret) ? count : ret;
488 }
489
490 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
491 {
492 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
493
494 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
495 pr_err("Missing fd_dev_name=\n");
496 return -EINVAL;
497 }
498
499 return 0;
500 }
501
502 static ssize_t fd_show_configfs_dev_params(
503 struct se_hba *hba,
504 struct se_subsystem_dev *se_dev,
505 char *b)
506 {
507 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
508 ssize_t bl = 0;
509
510 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
511 bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
512 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
513 return bl;
514 }
515
516 /* fd_get_device_rev(): (Part of se_subsystem_api_t template)
517 *
518 *
519 */
520 static u32 fd_get_device_rev(struct se_device *dev)
521 {
522 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
523 }
524
525 /* fd_get_device_type(): (Part of se_subsystem_api_t template)
526 *
527 *
528 */
529 static u32 fd_get_device_type(struct se_device *dev)
530 {
531 return TYPE_DISK;
532 }
533
534 static sector_t fd_get_blocks(struct se_device *dev)
535 {
536 struct fd_dev *fd_dev = dev->dev_ptr;
537 struct file *f = fd_dev->fd_file;
538 struct inode *i = f->f_mapping->host;
539 unsigned long long dev_size;
540 /*
541 * When using a file that references an underlying struct block_device,
542 * ensure dev_size is always based on the current inode size in order
543 * to handle underlying block_device resize operations.
544 */
545 if (S_ISBLK(i->i_mode))
546 dev_size = (i_size_read(i) - fd_dev->fd_block_size);
547 else
548 dev_size = fd_dev->fd_dev_size;
549
550 return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
551 }
552
553 static struct se_subsystem_api fileio_template = {
554 .name = "fileio",
555 .owner = THIS_MODULE,
556 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
557 .write_cache_emulated = 1,
558 .fua_write_emulated = 1,
559 .attach_hba = fd_attach_hba,
560 .detach_hba = fd_detach_hba,
561 .allocate_virtdevice = fd_allocate_virtdevice,
562 .create_virtdevice = fd_create_virtdevice,
563 .free_device = fd_free_device,
564 .execute_cmd = fd_execute_cmd,
565 .do_sync_cache = fd_emulate_sync_cache,
566 .check_configfs_dev_params = fd_check_configfs_dev_params,
567 .set_configfs_dev_params = fd_set_configfs_dev_params,
568 .show_configfs_dev_params = fd_show_configfs_dev_params,
569 .get_device_rev = fd_get_device_rev,
570 .get_device_type = fd_get_device_type,
571 .get_blocks = fd_get_blocks,
572 };
573
574 static int __init fileio_module_init(void)
575 {
576 return transport_subsystem_register(&fileio_template);
577 }
578
579 static void fileio_module_exit(void)
580 {
581 transport_subsystem_release(&fileio_template);
582 }
583
584 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
585 MODULE_AUTHOR("nab@Linux-iSCSI.org");
586 MODULE_LICENSE("GPL");
587
588 module_init(fileio_module_init);
589 module_exit(fileio_module_exit);
This page took 0.043015 seconds and 5 git commands to generate.